summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitattributes1
-rw-r--r--README.md26
-rwxr-xr-xWrappers/Python/ccpi/framework/BlockDataContainer.py305
-rwxr-xr-xWrappers/Python/ccpi/framework/__init__.py24
-rwxr-xr-x[-rw-r--r--]Wrappers/Python/ccpi/framework/framework.py (renamed from Wrappers/Python/ccpi/framework.py)216
-rw-r--r--Wrappers/Python/ccpi/optimisation/Algorithms.py362
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/Algorithm.py7
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py12
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/__init__.py3
-rwxr-xr-xWrappers/Python/ccpi/optimisation/funcs.py47
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/BlockOperator.py141
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py67
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/CompositeOperator.py819
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/LinearOperator.py22
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/Operator.py23
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py42
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/__init__.py12
-rwxr-xr-xWrappers/Python/ccpi/optimisation/ops.py11
-rwxr-xr-xWrappers/Python/ccpi/processors.py6
-rw-r--r--Wrappers/Python/conda-recipe/conda_build_config.yaml2
-rw-r--r--Wrappers/Python/conda-recipe/meta.yaml7
-rw-r--r--Wrappers/Python/setup.py6
-rwxr-xr-xWrappers/Python/test/test_BlockDataContainer.py284
-rw-r--r--Wrappers/Python/test/test_BlockOperator.py290
-rwxr-xr-xWrappers/Python/test/test_DataContainer.py69
-rwxr-xr-xWrappers/Python/test/test_DataProcessor.py76
-rw-r--r--Wrappers/Python/test/test_Operator.py24
-rw-r--r--Wrappers/Python/wip/CGLS_tikhonov.py196
-rw-r--r--Wrappers/Python/wip/CreatePhantom.py242
-rwxr-xr-xWrappers/Python/wip/demo_gradient_descent.py295
30 files changed, 2361 insertions, 1276 deletions
diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index d9bd16b..0000000
--- a/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-*.py text eol=lf
diff --git a/README.md b/README.md
index bfbdbbd..c3ba5ea 100644
--- a/README.md
+++ b/README.md
@@ -63,7 +63,7 @@ In `ccpi.framework` we define a number of common classes normally used in tomogr
* `Operator`: A class specifying a (currently linear) operator
* `Function`: A class specifying mathematical functions such as a least squares data fidelity.
- * `Algorithm`: Implementation of an optimisation algorithm to solve a particular generic optimisation problem. These are currently python functions by may be changed to operators in another release.
+ * `Algorithm`: Implementation of an iterative optimisation algorithm to solve a particular generic optimisation problem. Algorithms are iterable Python object which can be run in a for loop. Can be stopped and warm restarted.
#### `Operator`
@@ -87,9 +87,27 @@ In `ccpi.framework` we define a number of common classes normally used in tomogr
#### `Algorithm`
- A number of generic algorithm implementations are provided including CGLS and FISTA. An algorithm
- is designed for a particular generic optimisation problem accepts and number of `function`s and/or
- `operator`s as input to define a specific instance of the generic optimisation problem to be solved.
+ A number of generic algorithm implementations are provided including Gradient Descent CGLS and FISTA. An algorithm
+ is designed for a particular generic optimisation problem accepts and number of `Function`s and/or
+ `Operator`s as input to define a specific instance of the generic optimisation problem to be solved.
+
+ They are iterable objects which can be run in a `for` loop. The user can provide a stopping cryterion different than the default max_iteration.
+
+ New algorithms can be easily created by extending the `Algorithm` class. The user is required to implement only 4 methods: `set_up`, `__init__`, `update` and `update_objective`.
+
+ * `set_up` and `__init__` are used to configure the algorithm
+ * `update` is the actual iteration updating the solution
+ * `update_objective` defines how the objective is calculated.
+
+ For example, the implementation of the `update` of the Gradient Descent algorithm to minimise a `Function` will only be:
+ ```python
+ def update(self):
+ self.x += -self.rate * self.objective_function.gradient(self.x)
+ def update_objective(self):
+ self.loss.append(self.objective_function(self.x))
+ ```
+
+ The `Algorithm` provides the infrastructure to continue iteration, to access the values of the objective function in subsequent iterations, the time for each iteration.
#### Examples
diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py
new file mode 100755
index 0000000..b9f5c5f
--- /dev/null
+++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 5 16:04:45 2019
+
+@author: ofn77899
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy
+from numbers import Number
+import functools
+#from ccpi.framework import AcquisitionData, ImageData
+#from ccpi.optimisation.operators import Operator, LinearOperator
+
+class BlockDataContainer(object):
+ '''Class to hold DataContainers as column vector'''
+ __array_priority__ = 1
+ def __init__(self, *args, **kwargs):
+ ''''''
+ self.containers = args
+ self.index = 0
+ #shape = kwargs.get('shape', None)
+ #if shape is None:
+ # shape = (len(args),1)
+ shape = (len(args),1)
+ self.shape = shape
+ #print (self.shape)
+ n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
+ if len(args) != n_elements:
+ raise ValueError(
+ 'Dimension and size do not match: expected {} got {}'
+ .format(n_elements, len(args)))
+
+
+ def __iter__(self):
+ '''BlockDataContainer is Iterable'''
+ return self
+ def next(self):
+ '''python2 backwards compatibility'''
+ return self.__next__()
+ def __next__(self):
+ try:
+ out = self[self.index]
+ except IndexError as ie:
+ raise StopIteration()
+ self.index+=1
+ return out
+
+ def is_compatible(self, other):
+ '''basic check if the size of the 2 objects fit'''
+ if isinstance(other, Number):
+ return True
+ elif isinstance(other, list):
+ for ot in other:
+ if not isinstance(ot, (Number,\
+ numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\
+ numpy.float, numpy.float16, numpy.float32, numpy.float64, \
+ numpy.complex)):
+ raise ValueError('List/ numpy array can only contain numbers {}'\
+ .format(type(ot)))
+ return len(self.containers) == len(other)
+ elif isinstance(other, numpy.ndarray):
+ return self.shape == other.shape
+ return len(self.containers) == len(other.containers)
+
+ def get_item(self, row):
+ if row > self.shape[0]:
+ raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
+ return self.containers[row]
+
+ def __getitem__(self, row):
+ return self.get_item(row)
+
+ def add(self, other, *args, **kwargs):
+ assert self.is_compatible(other)
+ out = kwargs.get('out', None)
+ if isinstance(other, Number):
+ return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ return type(self)(
+ *[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
+ shape=self.shape)
+
+ def subtract(self, other, *args, **kwargs):
+ assert self.is_compatible(other)
+ out = kwargs.get('out', None)
+ if isinstance(other, Number):
+ return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
+ shape=self.shape)
+
+ def multiply(self, other, *args, **kwargs):
+ self.is_compatible(other)
+ out = kwargs.get('out', None)
+ if isinstance(other, Number):
+ return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ elif isinstance(other, list):
+ return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ elif isinstance(other, numpy.ndarray):
+ return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
+ shape=self.shape)
+
+ def divide(self, other, *args, **kwargs):
+ self.is_compatible(other)
+ out = kwargs.get('out', None)
+ if isinstance(other, Number):
+ return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
+ shape=self.shape)
+
+ def power(self, other, *args, **kwargs):
+ assert self.is_compatible(other)
+ out = kwargs.get('out', None)
+ if isinstance(other, Number):
+ return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
+
+ def maximum(self,other, *args, **kwargs):
+ assert self.is_compatible(other)
+ out = kwargs.get('out', None)
+ if isinstance(other, Number):
+ return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
+
+ ## unary operations
+ def abs(self, *args, **kwargs):
+ out = kwargs.get('out', None)
+ return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ def sign(self, *args, **kwargs):
+ out = kwargs.get('out', None)
+ return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ def sqrt(self, *args, **kwargs):
+ out = kwargs.get('out', None)
+ return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ def conjugate(self, out=None):
+ return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape)
+
+ ## reductions
+ def sum(self, *args, **kwargs):
+ return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers])
+ def squared_norm(self):
+ y = numpy.asarray([el.squared_norm() for el in self.containers])
+ return y.sum()
+ def norm(self):
+ return numpy.sqrt(self.squared_norm())
+ def copy(self):
+ '''alias of clone'''
+ return self.clone()
+ def clone(self):
+ return type(self)(*[el.copy() for el in self.containers], shape=self.shape)
+
+ def __add__(self, other):
+ return self.add( other )
+ # __radd__
+
+ def __sub__(self, other):
+ return self.subtract( other )
+ # __rsub__
+
+ def __mul__(self, other):
+ return self.multiply(other)
+ # __rmul__
+
+ def __div__(self, other):
+ return self.divide(other)
+ # __rdiv__
+ def __truediv__(self, other):
+ return self.divide(other)
+
+ def __pow__(self, other):
+ return self.power(other)
+ # reverse operand
+ def __radd__(self, other):
+ '''Reverse addition
+
+ to make sure that this method is called rather than the __mul__ of a numpy array
+ the class constant __array_priority__ must be set > 0
+ https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
+ '''
+ return self + other
+ # __radd__
+
+ def __rsub__(self, other):
+ '''Reverse subtraction
+
+ to make sure that this method is called rather than the __mul__ of a numpy array
+ the class constant __array_priority__ must be set > 0
+ https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
+ '''
+ return (-1 * self) + other
+ # __rsub__
+
+ def __rmul__(self, other):
+ '''Reverse multiplication
+
+ to make sure that this method is called rather than the __mul__ of a numpy array
+ the class constant __array_priority__ must be set > 0
+ https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
+ '''
+ return self * other
+ # __rmul__
+
+ def __rdiv__(self, other):
+ '''Reverse division
+
+ to make sure that this method is called rather than the __mul__ of a numpy array
+ the class constant __array_priority__ must be set > 0
+ https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
+ '''
+ return pow(self / other, -1)
+ # __rdiv__
+ def __rtruediv__(self, other):
+ '''Reverse truedivision
+
+ to make sure that this method is called rather than the __mul__ of a numpy array
+ the class constant __array_priority__ must be set > 0
+ https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
+ '''
+ return self.__rdiv__(other)
+
+ def __rpow__(self, other):
+ '''Reverse power
+
+ to make sure that this method is called rather than the __mul__ of a numpy array
+ the class constant __array_priority__ must be set > 0
+ https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
+ '''
+ return other.power(self)
+
+ def __iadd__(self, other):
+ '''Inline addition'''
+ if isinstance (other, BlockDataContainer):
+ for el,ot in zip(self.containers, other.containers):
+ el += ot
+ elif isinstance(other, Number):
+ for el in self.containers:
+ el += other
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ self.is_compatible(other)
+ for el,ot in zip(self.containers, other):
+ el += ot
+ return self
+ # __iadd__
+
+ def __isub__(self, other):
+ '''Inline subtraction'''
+ if isinstance (other, BlockDataContainer):
+ for el,ot in zip(self.containers, other.containers):
+ el -= ot
+ elif isinstance(other, Number):
+ for el in self.containers:
+ el -= other
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ assert self.is_compatible(other)
+ for el,ot in zip(self.containers, other):
+ el -= ot
+ return self
+ # __isub__
+
+ def __imul__(self, other):
+ '''Inline multiplication'''
+ if isinstance (other, BlockDataContainer):
+ for el,ot in zip(self.containers, other.containers):
+ el *= ot
+ elif isinstance(other, Number):
+ for el in self.containers:
+ el *= other
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ assert self.is_compatible(other)
+ for el,ot in zip(self.containers, other):
+ el *= ot
+ return self
+ # __imul__
+
+ def __idiv__(self, other):
+ '''Inline division'''
+ if isinstance (other, BlockDataContainer):
+ for el,ot in zip(self.containers, other.containers):
+ el /= ot
+ elif isinstance(other, Number):
+ for el in self.containers:
+ el /= other
+ elif isinstance(other, list) or isinstance(other, numpy.ndarray):
+ assert self.is_compatible(other)
+ for el,ot in zip(self.containers, other):
+ el /= ot
+ return self
+ # __rdiv__
+ def __itruediv__(self, other):
+ '''Inline truedivision'''
+ return self.__idiv__(other)
+
diff --git a/Wrappers/Python/ccpi/framework/__init__.py b/Wrappers/Python/ccpi/framework/__init__.py
new file mode 100755
index 0000000..4683c21
--- /dev/null
+++ b/Wrappers/Python/ccpi/framework/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 5 16:00:18 2019
+
+@author: ofn77899
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy
+import sys
+from datetime import timedelta, datetime
+import warnings
+from functools import reduce
+
+from .framework import DataContainer
+from .framework import ImageData, AcquisitionData
+from .framework import ImageGeometry, AcquisitionGeometry
+from .framework import find_key, message
+from .framework import DataProcessor
+from .framework import AX, PixelByPixelDataProcessor, CastDataContainer
+from .BlockDataContainer import BlockDataContainer
diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework/framework.py
index dab2dd9..029a80d 100644..100755
--- a/Wrappers/Python/ccpi/framework.py
+++ b/Wrappers/Python/ccpi/framework/framework.py
@@ -54,7 +54,8 @@ class ImageGeometry(object):
center_x=0,
center_y=0,
center_z=0,
- channels=1):
+ channels=1,
+ dimension_labels=None):
self.voxel_num_x = voxel_num_x
self.voxel_num_y = voxel_num_y
@@ -66,6 +67,7 @@ class ImageGeometry(object):
self.center_y = center_y
self.center_z = center_z
self.channels = channels
+ self.dimension_labels = dimension_labels
def get_min_x(self):
return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x
@@ -111,8 +113,14 @@ class ImageGeometry(object):
repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z)
repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z)
return repres
-
-
+ def allocate(self, value=0, dimension_labels=None):
+ '''allocates an ImageData according to the size expressed in the instance'''
+ if dimension_labels is None:
+ dimension_labels = self.dimension_labels
+ out = ImageData(geometry=self, dimension_labels=dimension_labels)
+ if value != 0:
+ out += value
+ return out
class AcquisitionGeometry(object):
def __init__(self,
@@ -126,7 +134,8 @@ class AcquisitionGeometry(object):
dist_source_center=None,
dist_center_detector=None,
channels=1,
- angle_unit='degree'
+ angle_unit='degree',
+ dimension_labels=None
):
"""
General inputs for standard type projection geometries
@@ -167,6 +176,7 @@ class AcquisitionGeometry(object):
self.pixel_size_v = pixel_size_v
self.channels = channels
+ self.dimension_labels = dimension_labels
def clone(self):
'''returns a copy of the AcquisitionGeometry'''
@@ -192,9 +202,14 @@ class AcquisitionGeometry(object):
repres += "distance center-detector: {0}\n".format(self.dist_source_center)
repres += "number of channels: {0}\n".format(self.channels)
return repres
-
-
-
+ def allocate(self, value=0, dimension_labels=None):
+ '''allocates an AcquisitionData according to the size expressed in the instance'''
+ if dimension_labels is None:
+ dimension_labels = self.dimension_labels
+ out = AcquisitionData(geometry=self, dimension_labels=dimension_labels)
+ if value != 0:
+ out += value
+ return out
class DataContainer(object):
'''Generic class to hold data
@@ -375,7 +390,8 @@ class DataContainer(object):
return self.shape == other.shape
## algebra
- def __add__(self, other , out=None, *args, **kwargs):
+ def __add__(self, other, *args, **kwargs):
+ out = kwargs.get('out', None)
if issubclass(type(other), DataContainer):
if self.check_dimensions(other):
out = self.as_array() + other.as_array()
@@ -632,8 +648,8 @@ class DataContainer(object):
## binary operations
- def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs):
-
+ def pixel_wise_binary(self, pwop, x2, *args, **kwargs):
+ out = kwargs.get('out', None)
if out is None:
if isinstance(x2, (int, float, complex)):
out = pwop(self.as_array() , x2 , *args, **kwargs )
@@ -651,7 +667,8 @@ class DataContainer(object):
elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer):
if self.check_dimensions(out) and self.check_dimensions(x2):
- pwop(self.as_array(), x2.as_array(), out=out.as_array(), *args, **kwargs )
+ kwargs['out'] = out.as_array()
+ pwop(self.as_array(), x2.as_array(), *args, **kwargs )
#return type(self)(out.as_array(),
# deep_copy=False,
# dimension_labels=self.dimension_labels,
@@ -661,14 +678,15 @@ class DataContainer(object):
raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)):
if self.check_dimensions(out):
-
- pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs )
+ kwargs['out']=out.as_array()
+ pwop(self.as_array(), x2, *args, **kwargs )
return out
else:
raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
elif issubclass(type(out), numpy.ndarray):
if self.array.shape == out.shape and self.array.dtype == out.dtype:
- pwop(self.as_array(), x2 , out=out, *args, **kwargs)
+ kwargs['out'] = out
+ pwop(self.as_array(), x2, *args, **kwargs)
#return type(self)(out,
# deep_copy=False,
# dimension_labels=self.dimension_labels,
@@ -676,26 +694,27 @@ class DataContainer(object):
else:
raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out)))
- def add(self, other , out=None, *args, **kwargs):
- return self.pixel_wise_binary(numpy.add, other, out=out, *args, **kwargs)
+ def add(self, other, *args, **kwargs):
+ return self.pixel_wise_binary(numpy.add, other, *args, **kwargs)
- def subtract(self, other, out=None , *args, **kwargs):
- return self.pixel_wise_binary(numpy.subtract, other, out=out, *args, **kwargs)
+ def subtract(self, other, *args, **kwargs):
+ return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs)
- def multiply(self, other , out=None, *args, **kwargs):
- return self.pixel_wise_binary(numpy.multiply, other, out=out, *args, **kwargs)
+ def multiply(self, other, *args, **kwargs):
+ return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs)
- def divide(self, other , out=None ,*args, **kwargs):
- return self.pixel_wise_binary(numpy.divide, other, out=out, *args, **kwargs)
+ def divide(self, other, *args, **kwargs):
+ return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs)
- def power(self, other , out=None, *args, **kwargs):
- return self.pixel_wise_binary(numpy.power, other, out=out, *args, **kwargs)
+ def power(self, other, *args, **kwargs):
+ return self.pixel_wise_binary(numpy.power, other, *args, **kwargs)
- def maximum(self,x2, out=None, *args, **kwargs):
- return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs)
+ def maximum(self, x2, *args, **kwargs):
+ return self.pixel_wise_binary(numpy.maximum, x2, *args, **kwargs)
## unary operations
- def pixel_wise_unary(self,pwop, out=None, *args, **kwargs):
+ def pixel_wise_unary(self, pwop, *args, **kwargs):
+ out = kwargs.get('out', None)
if out is None:
out = pwop(self.as_array() , *args, **kwargs )
return type(self)(out,
@@ -704,31 +723,35 @@ class DataContainer(object):
geometry=self.geometry)
elif issubclass(type(out), DataContainer):
if self.check_dimensions(out):
- pwop(self.as_array(), out=out.as_array(), *args, **kwargs )
+ kwargs['out'] = out.as_array()
+ pwop(self.as_array(), *args, **kwargs )
else:
raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
elif issubclass(type(out), numpy.ndarray):
if self.array.shape == out.shape and self.array.dtype == out.dtype:
- pwop(self.as_array(), out=out, *args, **kwargs)
+ kwargs['out'] = out
+ pwop(self.as_array(), *args, **kwargs)
else:
raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out)))
- def abs(self, out=None, *args, **kwargs):
- return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs)
-
- def sign(self, out=None, *args, **kwargs):
- return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs)
+ def abs(self, *args, **kwargs):
+ return self.pixel_wise_unary(numpy.abs, *args, **kwargs)
- def sqrt(self, out=None, *args, **kwargs):
- return self.pixel_wise_unary(numpy.sqrt, out=out, *args, **kwargs)
+ def sign(self, *args, **kwargs):
+ return self.pixel_wise_unary(numpy.sign, *args, **kwargs)
+ def sqrt(self, *args, **kwargs):
+ return self.pixel_wise_unary(numpy.sqrt, *args, **kwargs)
+
+ def conjugate(self, *args, **kwargs):
+ return self.pixel_wise_unary(numpy.conjugate, *args, **kwargs)
#def __abs__(self):
# operation = FM.OPERATION.ABS
# return self.callFieldMath(operation, None, self.mask, self.maskOnValue)
# __abs__
## reductions
- def sum(self, out=None, *args, **kwargs):
+ def sum(self, *args, **kwargs):
return self.as_array().sum(*args, **kwargs)
def squared_norm(self):
'''return the squared euclidean norm of the DataContainer viewed as a vector'''
@@ -739,6 +762,14 @@ class DataContainer(object):
def norm(self):
'''return the euclidean norm of the DataContainer viewed as a vector'''
return numpy.sqrt(self.squared_norm())
+ def dot(self, other, *args, **kwargs):
+ '''return the inner product of 2 DataContainers viewed as vectors'''
+ if self.shape == other.shape:
+ return numpy.dot(self.as_array().ravel(), other.as_array().ravel())
+ else:
+ raise ValueError('Shapes are not aligned: {} != {}'.format(self.shape, other.shape))
+
+
@@ -904,8 +935,11 @@ class AcquisitionData(DataContainer):
elif dim == 'horizontal':
shape.append(horiz)
if len(shape) != len(dimension_labels):
- raise ValueError('Missing {0} axes'.format(
- len(dimension_labels) - len(shape)))
+ raise ValueError('Missing {0} axes.\nExpected{1} got {2}}'\
+ .format(
+ len(dimension_labels) - len(shape),
+ dimension_labels, shape)
+ )
shape = tuple(shape)
array = numpy.zeros( shape , dtype=numpy.float32)
@@ -992,9 +1026,9 @@ class DataProcessor(object):
'''
raise NotImplementedError('Implement basic checks for input DataContainer')
- def get_output(self):
+ def get_output(self, out=None):
for k,v in self.__dict__.items():
- if v is None:
+ if v is None and k != 'output':
raise ValueError('Key {0} is None'.format(k))
shouldRun = False
if self.runTime == -1:
@@ -1005,10 +1039,18 @@ class DataProcessor(object):
# CHECK this
if self.store_output and shouldRun:
self.runTime = datetime.now()
- self.output = self.process()
- return self.output
+ try:
+ self.output = self.process(out=out)
+ return self.output
+ except TypeError as te:
+ self.output = self.process()
+ return self.output
self.runTime = datetime.now()
- return self.process()
+ try:
+ return self.process(out=out)
+ except TypeError as te:
+ return self.process()
+
def set_input_processor(self, processor):
if issubclass(type(processor), DataProcessor):
@@ -1029,7 +1071,7 @@ class DataProcessor(object):
dsi = self.input
return dsi
- def process(self):
+ def process(self, out=None):
raise NotImplementedError('process must be implemented')
@@ -1076,17 +1118,58 @@ class AX(DataProcessor):
def check_input(self, dataset):
return True
- def process(self):
+ def process(self, out=None):
dsi = self.get_input()
a = self.scalar
+ if out is None:
+ y = DataContainer( a * dsi.as_array() , True,
+ dimension_labels=dsi.dimension_labels )
+ #self.setParameter(output_dataset=y)
+ return y
+ else:
+ out.fill(a * dsi.as_array())
+
+
+###### Example of DataProcessors
+
+class CastDataContainer(DataProcessor):
+ '''Example DataProcessor
+ Cast a DataContainer array to a different type.
+
+ y := a*x
+ where:
+
+ a is a scalar
+
+ x a DataContainer.
+ '''
+
+ def __init__(self, dtype=None):
+ kwargs = {'dtype':dtype,
+ 'input':None,
+ }
- y = DataContainer( a * dsi.as_array() , True,
- dimension_labels=dsi.dimension_labels )
- #self.setParameter(output_dataset=y)
- return y
+ #DataProcessor.__init__(self, **kwargs)
+ super(CastDataContainer, self).__init__(**kwargs)
+
+ def check_input(self, dataset):
+ return True
+
+ def process(self, out=None):
+
+ dsi = self.get_input()
+ dtype = self.dtype
+ if out is None:
+ y = numpy.asarray(dsi.as_array(), dtype=dtype)
+
+ return type(dsi)(numpy.asarray(dsi.as_array(), dtype=dtype),
+ dimension_labels=dsi.dimension_labels )
+ else:
+ out.fill(numpy.asarray(dsi.as_array(), dtype=dtype))
+
class PixelByPixelDataProcessor(DataProcessor):
@@ -1109,7 +1192,7 @@ class PixelByPixelDataProcessor(DataProcessor):
def check_input(self, dataset):
return True
- def process(self):
+ def process(self, out=None):
pyfunc = self.pyfunc
dsi = self.get_input()
@@ -1168,12 +1251,22 @@ if __name__ == '__main__':
#ax.apply()
print ("ax in {0} out {1}".format(c.as_array().flatten(),
ax.get_output().as_array().flatten()))
+
+ cast = CastDataContainer(dtype=numpy.float32)
+ cast.set_input(c)
+ out = cast.get_output()
+ out *= 0
axm = AX()
axm.scalar = 0.5
- axm.set_input(c)
+ axm.set_input_processor(cast)
+ axm.get_output(out)
#axm.apply()
print ("axm in {0} out {1}".format(c.as_array(), axm.get_output().as_array()))
+ # check out in DataSetProcessor
+ #a = numpy.asarray([i for i in range( size )])
+
+
# create a PixelByPixelDataProcessor
#define a python function which will take only one input (the pixel value)
@@ -1255,3 +1348,22 @@ if __name__ == '__main__':
sino = AcquisitionData(geometry=sgeometry)
sino2 = sino.clone()
+ a0 = numpy.asarray([i for i in range(2*3*4)])
+ a1 = numpy.asarray([2*i for i in range(2*3*4)])
+
+
+ ds0 = DataContainer(numpy.reshape(a0,(2,3,4)))
+ ds1 = DataContainer(numpy.reshape(a1,(2,3,4)))
+
+ numpy.testing.assert_equal(ds0.dot(ds1), a0.dot(a1))
+
+ a2 = numpy.asarray([2*i for i in range(2*3*5)])
+ ds2 = DataContainer(numpy.reshape(a2,(2,3,5)))
+
+# # it should fail if the shape is wrong
+# try:
+# ds2.dot(ds0)
+# self.assertTrue(False)
+# except ValueError as ve:
+# self.assertTrue(True)
+
diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py
deleted file mode 100644
index 0a5cac6..0000000
--- a/Wrappers/Python/ccpi/optimisation/Algorithms.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2019 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import numpy
-import time
-from ccpi.optimisation.funcs import ZeroFun
-
-class Algorithm(object):
- '''Base class for iterative algorithms
-
- provides the minimal infrastructure.
- Algorithms are iterables so can be easily run in a for loop. They will
- stop as soon as the stop cryterion is met.
- The user is required to implement the set_up, __init__, update and
- should_stop and update_objective methods
- '''
-
- def __init__(self):
- self.iteration = 0
- self.stop_cryterion = 'max_iter'
- self.__max_iteration = 0
- self.__loss = []
- self.memopt = False
- self.timing = []
- def set_up(self, *args, **kwargs):
- raise NotImplementedError()
- def update(self):
- raise NotImplementedError()
-
- def should_stop(self):
- '''default stopping cryterion: number of iterations'''
- return self.iteration >= self.max_iteration
-
- def __iter__(self):
- return self
- def next(self):
- '''python2 backwards compatibility'''
- return self.__next__()
- def __next__(self):
- if self.should_stop():
- raise StopIteration()
- else:
- time0 = time.time()
- self.update()
- self.timing.append( time.time() - time0 )
- # TODO update every N iterations
- self.update_objective()
- self.iteration += 1
- def get_output(self):
- '''Returns the solution found'''
- return self.x
- def get_current_loss(self):
- '''Returns the current value of the loss function'''
- return self.__loss[-1]
- def get_current_objective(self):
- return self.get_current_loss()
- def update_objective(self):
- raise NotImplementedError()
- @property
- def loss(self):
- return self.__loss
- @property
- def objective(self):
- return self.__loss
- @property
- def max_iteration(self):
- return self.__max_iteration
- @max_iteration.setter
- def max_iteration(self, value):
- assert isinstance(value, int)
- self.__max_iteration = value
- def run(self, iterations, callback=None):
- '''run n iterations and update the user with the callback if specified'''
- self.max_iteration += iterations
- for _ in self:
- if callback is not None:
- callback(self.iteration, self.get_current_loss())
-
-class GradientDescent(Algorithm):
- '''Implementation of a simple Gradient Descent algorithm
- '''
-
- def __init__(self, **kwargs):
- '''initialisation can be done at creation time if all
- proper variables are passed or later with set_up'''
- super(GradientDescent, self).__init__()
- self.x = None
- self.rate = 0
- self.objective_function = None
- self.regulariser = None
- args = ['x_init', 'objective_function', 'rate']
- for k,v in kwargs.items():
- if k in args:
- args.pop(args.index(k))
- if len(args) == 0:
- return self.set_up(x_init=kwargs['x_init'],
- objective_function=kwargs['objective_function'],
- rate=kwargs['rate'])
-
- def should_stop(self):
- '''stopping cryterion, currently only based on number of iterations'''
- return self.iteration >= self.max_iteration
-
- def set_up(self, x_init, objective_function, rate):
- '''initialisation of the algorithm'''
- self.x = x_init.copy()
- if self.memopt:
- self.x_update = x_init.copy()
- self.objective_function = objective_function
- self.rate = rate
- self.loss.append(objective_function(x_init))
-
- def update(self):
- '''Single iteration'''
- if self.memopt:
- self.objective_function.gradient(self.x, out=self.x_update)
- self.x_update *= -self.rate
- self.x += self.x_update
- else:
- self.x += -self.rate * self.objective_function.grad(self.x)
-
- def update_objective(self):
- self.loss.append(self.objective_function(self.x))
-
-
-
-class FISTA(Algorithm):
- '''Fast Iterative Shrinkage-Thresholding Algorithm
-
- Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding
- algorithm for linear inverse problems.
- SIAM journal on imaging sciences,2(1), pp.183-202.
-
- Parameters:
- x_init: initial guess
- f: data fidelity
- g: regularizer
- h:
- opt: additional algorithm
- '''
-
- def __init__(self, **kwargs):
- '''initialisation can be done at creation time if all
- proper variables are passed or later with set_up'''
- super(FISTA, self).__init__()
- self.f = None
- self.g = None
- self.invL = None
- self.t_old = 1
- args = ['x_init', 'f', 'g', 'opt']
- for k,v in kwargs.items():
- if k in args:
- args.pop(args.index(k))
- if len(args) == 0:
- return self.set_up(x_init=kwargs['x_init'],
- f=kwargs['f'],
- g=kwargs['g'],
- opt=kwargs['opt'])
-
- def set_up(self, x_init, f=None, g=None, opt=None):
-
- # default inputs
- if f is None:
- self.f = ZeroFun()
- else:
- self.f = f
- if g is None:
- g = ZeroFun()
- else:
- self.g = g
-
- # algorithmic parameters
- if opt is None:
- opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False}
-
- self.max_iteration = opt['iter'] if 'iter' in opt.keys() else 1000
- self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4
- memopt = opt['memopt'] if 'memopt' in opt.keys() else False
- self.memopt = memopt
-
- # initialization
- if memopt:
- self.y = x_init.clone()
- self.x_old = x_init.clone()
- self.x = x_init.clone()
- self.u = x_init.clone()
- else:
- self.x_old = x_init.copy()
- self.y = x_init.copy()
-
- #timing = numpy.zeros(max_iter)
- #criter = numpy.zeros(max_iter)
-
-
- self.invL = 1/f.L
-
- self.t_old = 1
-
- def update(self):
- # algorithm loop
- #for it in range(0, max_iter):
-
- if self.memopt:
- # u = y - invL*f.grad(y)
- # store the result in x_old
- self.f.gradient(self.y, out=self.u)
- self.u.__imul__( -self.invL )
- self.u.__iadd__( self.y )
- # x = g.prox(u,invL)
- self.g.proximal(self.u, self.invL, out=self.x)
-
- self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
-
- # y = x + (t_old-1)/t*(x-x_old)
- self.x.subtract(self.x_old, out=self.y)
- self.y.__imul__ ((self.t_old-1)/self.t)
- self.y.__iadd__( self.x )
-
- self.x_old.fill(self.x)
- self.t_old = self.t
-
-
- else:
- u = self.y - self.invL*self.f.grad(self.y)
-
- self.x = self.g.prox(u,self.invL)
-
- self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
-
- self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old)
-
- self.x_old = self.x.copy()
- self.t_old = self.t
-
- def update_objective(self):
- self.loss.append( self.f(self.x) + self.g(self.x) )
-
-class FBPD(Algorithm):
- '''FBPD Algorithm
-
- Parameters:
- x_init: initial guess
- f: constraint
- g: data fidelity
- h: regularizer
- opt: additional algorithm
- '''
- constraint = None
- data_fidelity = None
- regulariser = None
- def __init__(self, **kwargs):
- pass
- def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\
- regulariser=None, opt=None):
-
- # default inputs
- if constraint is None:
- self.constraint = ZeroFun()
- else:
- self.constraint = constraint
- if data_fidelity is None:
- data_fidelity = ZeroFun()
- else:
- self.data_fidelity = data_fidelity
- if regulariser is None:
- self.regulariser = ZeroFun()
- else:
- self.regulariser = regulariser
-
- # algorithmic parameters
-
-
- # step-sizes
- self.tau = 2 / (self.data_fidelity.L + 2)
- self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L
-
- self.inv_sigma = 1/self.sigma
-
- # initialization
- self.x = x_init
- self.y = operator.direct(self.x)
-
-
- def update(self):
-
- # primal forward-backward step
- x_old = self.x
- self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) )
- self.x = self.constraint.prox(self.x, self.tau);
-
- # dual forward-backward step
- self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old);
- self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma);
-
- # time and criterion
- self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x))
-
-class CGLS(Algorithm):
-
- '''Conjugate Gradient Least Squares algorithm
-
- Parameters:
- x_init: initial guess
- operator: operator for forward/backward projections
- data: data to operate on
- '''
- def __init__(self, **kwargs):
- super(CGLS, self).__init__()
- self.x = kwargs.get('x_init', None)
- self.operator = kwargs.get('operator', None)
- self.data = kwargs.get('data', None)
- if self.x is not None and self.operator is not None and \
- self.data is not None:
- print ("Calling from creator")
- return self.set_up(x_init =kwargs['x_init'],
- operator=kwargs['operator'],
- data =kwargs['data'])
-
- def set_up(self, x_init, operator , data ):
-
- self.r = data.copy()
- self.x = x_init.copy()
-
- self.operator = operator
- self.d = operator.adjoint(self.r)
-
- self.normr2 = self.d.norm()
-
- def should_stop(self):
- '''stopping cryterion, currently only based on number of iterations'''
- return self.iteration >= self.max_iteration
-
- def update(self):
-
- Ad = self.operator.direct(self.d)
- alpha = self.normr2/Ad.norm()
- self.x += alpha * self.d
- self.r -= alpha * Ad
- s = self.operator.adjoint(self.r)
-
- normr2_new = s.norm()
- beta = normr2_new/self.normr2
- self.normr2 = normr2_new
- self.d = s + beta*self.d
-
- def update_objective(self):
- self.loss.append(self.r.norm())
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
index 680b268..cc99344 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
@@ -140,7 +140,7 @@ class Algorithm(object):
raise ValueError('Update objective interval must be an integer >= 1')
else:
raise ValueError('Update objective interval must be an integer >= 1')
- def run(self, iterations, verbose=False, callback=None):
+ def run(self, iterations, verbose=True, callback=None):
'''run n iterations and update the user with the callback if specified'''
if self.should_stop():
print ("Stop cryterion has been reached.")
@@ -149,8 +149,9 @@ class Algorithm(object):
if verbose:
print ("Iteration {}/{}, objective {}".format(self.iteration,
self.max_iteration, self.get_last_objective()) )
- if callback is not None:
- callback(self.iteration, self.get_last_objective())
+ else:
+ if callback is not None:
+ callback(self.iteration, self.get_last_objective())
i += 1
if i == iterations:
break
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
index 7794b4d..f1e4132 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
@@ -51,13 +51,17 @@ class GradientDescent(Algorithm):
def set_up(self, x_init, objective_function, rate):
'''initialisation of the algorithm'''
self.x = x_init.copy()
- if self.memopt:
- self.x_update = x_init.copy()
self.objective_function = objective_function
self.rate = rate
self.loss.append(objective_function(x_init))
self.iteration = 0
-
+ try:
+ self.memopt = self.objective_function.memopt
+ except AttributeError as ae:
+ self.memopt = False
+ if self.memopt:
+ self.x_update = x_init.copy()
+
def update(self):
'''Single iteration'''
if self.memopt:
@@ -65,7 +69,7 @@ class GradientDescent(Algorithm):
self.x_update *= -self.rate
self.x += self.x_update
else:
- self.x += -self.rate * self.objective_function.grad(self.x)
+ self.x += -self.rate * self.objective_function.gradient(self.x)
def update_objective(self):
self.loss.append(self.objective_function(self.x))
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
index 4a3830f..7e500e8 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
@@ -26,4 +26,5 @@ from .Algorithm import Algorithm
from .CGLS import CGLS
from .GradientDescent import GradientDescent
from .FISTA import FISTA
-from .FBPD import FBPD \ No newline at end of file
+from .FBPD import FBPD
+
diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py
index 9b9fc36..4f84889 100755
--- a/Wrappers/Python/ccpi/optimisation/funcs.py
+++ b/Wrappers/Python/ccpi/optimisation/funcs.py
@@ -20,6 +20,7 @@
from ccpi.optimisation.ops import Identity, FiniteDiff2D
import numpy
from ccpi.framework import DataContainer
+import warnings
def isSizeCorrect(data1 ,data2):
@@ -40,8 +41,12 @@ class Function(object):
def __init__(self):
self.L = None
def __call__(self,x, out=None): raise NotImplementedError
- def grad(self, x): raise NotImplementedError
- def prox(self, x, tau): raise NotImplementedError
+ def grad(self, x):
+ warnings.warn("grad method is deprecated. use gradient instead", DeprecationWarning)
+ return self.gradient(x, out=None)
+ def prox(self, x, tau):
+ warnings.warn("prox method is deprecated. use proximal instead", DeprecationWarning)
+ return self.proximal(x,tau,out=None)
def gradient(self, x, out=None): raise NotImplementedError
def proximal(self, x, tau, out=None): raise NotImplementedError
@@ -141,12 +146,20 @@ class Norm2sq(Function):
self.A = A # Should be an operator, default identity
self.b = b # Default zero DataSet?
self.c = c # Default 1.
- self.memopt = memopt
if memopt:
- #self.direct_placehold = A.adjoint(b)
- self.direct_placehold = A.allocate_direct()
- self.adjoint_placehold = A.allocate_adjoint()
-
+ try:
+ self.adjoint_placehold = A.range_geometry().allocate()
+ self.direct_placehold = A.domain_geometry().allocate()
+ self.memopt = True
+ except NameError as ne:
+ warnings.warn(str(ne))
+ self.memopt = False
+ except NotImplementedError as nie:
+ print (nie)
+ warnings.warn(str(nie))
+ self.memopt = False
+ else:
+ self.memopt = False
# Compute the Lipschitz parameter from the operator if possible
# Leave it initialised to None otherwise
@@ -154,11 +167,12 @@ class Norm2sq(Function):
self.L = 2.0*self.c*(self.A.get_max_sing_val()**2)
except AttributeError as ae:
pass
+ except NotImplementedError as noe:
+ pass
- def grad(self,x):
- #return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b )
- return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
-
+ #def grad(self,x):
+ # return self.gradient(x, out=None)
+
def __call__(self,x):
#return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel()))
#if out is None:
@@ -176,12 +190,13 @@ class Norm2sq(Function):
self.A.direct(x, out=self.adjoint_placehold)
self.adjoint_placehold.__isub__( self.b )
self.A.adjoint(self.adjoint_placehold, out=self.direct_placehold)
- self.direct_placehold.__imul__(2.0 * self.c)
- # can this be avoided?
- out.fill(self.direct_placehold)
+ #self.direct_placehold.__imul__(2.0 * self.c)
+ ## can this be avoided?
+ #out.fill(self.direct_placehold)
+ self.direct_placehold.multiply(2.0*self.c, out=out)
else:
- return self.grad(x)
-
+ return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
+
class ZeroFun(Function):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
new file mode 100755
index 0000000..21ea104
--- /dev/null
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Feb 14 12:36:40 2019
+
+@author: ofn77899
+"""
+#from ccpi.optimisation.ops import Operator
+import numpy
+from numbers import Number
+import functools
+from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer
+from ccpi.optimisation.operators import Operator, LinearOperator
+from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator
+
+
+class BlockOperator(Operator):
+ '''Class to hold a block operator
+
+ Class to hold a number of Operators in a block.
+ User may specify the shape of the block, by default is a row vector
+
+ BlockOperators have a generic shape M x N, and when applied on an
+ Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer.
+ Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with
+ N rows and 1 column.
+ '''
+ __array_priority__ = 1
+ def __init__(self, *args, **kwargs):
+ '''
+ Class creator
+
+ Note:
+ Do not include the `self` parameter in the ``Args`` section.
+
+ Args:
+ vararg (Operator): Operators in the block.
+ shape (:obj:`tuple`, optional): If shape is passed the Operators in
+ vararg are considered input in a row-by-row fashion.
+ Shape and number of Operators must match.
+
+ Example:
+ BlockOperator(op0,op1) results in a row block
+ BlockOperator(op0,op1,shape=(1,2)) results in a column block
+ '''
+ self.operators = args
+ shape = kwargs.get('shape', None)
+ if shape is None:
+ shape = (len(args),1)
+ self.shape = shape
+ n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
+ if len(args) != n_elements:
+ raise ValueError(
+ 'Dimension and size do not match: expected {} got {}'
+ .format(n_elements,len(args)))
+ def get_item(self, row, col):
+ if row > self.shape[0]:
+ raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
+ if col > self.shape[1]:
+ raise ValueError('Requested col {} > max {}'.format(col, self.shape[1]))
+
+ index = row*self.shape[1]+col
+ return self.operators[index]
+
+ def norm(self):
+ norm = [op.norm() for op in self.operators]
+ b = []
+ for i in range(self.shape[0]):
+ b.append([])
+ for j in range(self.shape[1]):
+ b[-1].append(norm[i*self.shape[1]+j])
+ return numpy.asarray(b)
+
+ def direct(self, x, out=None):
+ shape = self.get_output_shape(x.shape)
+ res = []
+ for row in range(self.shape[0]):
+ for col in range(self.shape[1]):
+ if col == 0:
+ prod = self.get_item(row,col).direct(x.get_item(col))
+ else:
+ prod += self.get_item(row,col).direct(x.get_item(col))
+ res.append(prod)
+ return BlockDataContainer(*res, shape=shape)
+
+ def adjoint(self, x, out=None):
+ '''Adjoint operation for the BlockOperator
+
+ BlockOperator may contain both LinearOperator and Operator
+ This method exists in BlockOperator as it is not known what type of
+ Operator it will contain.
+
+ Raises: ValueError if the contained Operators are not linear
+ '''
+ if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True):
+ raise ValueError('Not all operators in Block are linear.')
+ shape = self.get_output_shape(x.shape, adjoint=True)
+ res = []
+ for row in range(self.shape[1]):
+ for col in range(self.shape[0]):
+ if col == 0:
+ prod = self.get_item(row, col).adjoint(x.get_item(col))
+ else:
+ prod += self.get_item(row, col).adjoint(x.get_item(col))
+ res.append(prod)
+ return BlockDataContainer(*res, shape=shape)
+
+ def get_output_shape(self, xshape, adjoint=False):
+ sshape = self.shape[1]
+ oshape = self.shape[0]
+ if adjoint:
+ sshape = self.shape[0]
+ oshape = self.shape[1]
+ if sshape != xshape[0]:
+ raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape))
+ return (oshape, xshape[-1])
+
+ def __rmul__(self, scalar):
+ '''Defines the left multiplication with a scalar
+
+ Args: scalar (number or iterable containing numbers):
+
+ Returns: a block operator with Scaled Operators inside'''
+ if isinstance (scalar, list) or isinstance(scalar, tuple) or \
+ isinstance(scalar, numpy.ndarray):
+ if len(scalar) != len(self.operators):
+ raise ValueError('dimensions of scalars and operators do not match')
+ scalars = scalar
+ else:
+ scalars = [scalar for _ in self.operators]
+ # create a list of ScaledOperator-s
+ ops = [ v * op for v,op in zip(scalars, self.operators)]
+ #return BlockScaledOperator(self, scalars ,shape=self.shape)
+ return type(self)(*ops, shape=self.shape)
+ @property
+ def T(self):
+ '''Return the transposed of self'''
+ shape = (self.shape[1], self.shape[0])
+ return type(self)(*self.operators, shape=shape)
+
+if __name__ == '__main__':
+ pass
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
new file mode 100644
index 0000000..aeb6c53
--- /dev/null
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
@@ -0,0 +1,67 @@
+from numbers import Number
+import numpy
+from ccpi.optimisation.operators import ScaledOperator
+import functools
+
+class BlockScaledOperator(ScaledOperator):
+ '''ScaledOperator
+
+ A class to represent the scalar multiplication of an Operator with a scalar.
+ It holds an operator and a scalar. Basically it returns the multiplication
+ of the result of direct and adjoint of the operator with the scalar.
+ For the rest it behaves like the operator it holds.
+
+ Args:
+ operator (Operator): a Operator or LinearOperator
+ scalar (Number): a scalar multiplier
+ Example:
+ The scaled operator behaves like the following:
+ sop = ScaledOperator(operator, scalar)
+ sop.direct(x) = scalar * operator.direct(x)
+ sop.adjoint(x) = scalar * operator.adjoint(x)
+ sop.norm() = operator.norm()
+ sop.range_geometry() = operator.range_geometry()
+ sop.domain_geometry() = operator.domain_geometry()
+ '''
+ def __init__(self, operator, scalar, shape=None):
+ if shape is None:
+ shape = operator.shape
+
+ if isinstance(scalar, (list, tuple, numpy.ndarray)):
+ size = functools.reduce(lambda x,y:x*y, shape, 1)
+ if len(scalar) != size:
+ raise ValueError('Scalar and operators size do not match: {}!={}'
+ .format(len(scalar), len(operator)))
+ self.scalar = scalar[:]
+ print ("BlockScaledOperator ", self.scalar)
+ elif isinstance (scalar, Number):
+ self.scalar = scalar
+ else:
+ raise TypeError('expected scalar to be a number of an iterable: got {}'.format(type(scalar)))
+ self.operator = operator
+ self.shape = shape
+ def direct(self, x, out=None):
+ print ("BlockScaledOperator self.scalar", self.scalar)
+ #print ("self.scalar", self.scalar[0]* x.get_item(0).as_array())
+ return self.scalar * (self.operator.direct(x, out=out))
+ def adjoint(self, x, out=None):
+ if self.operator.is_linear():
+ return self.scalar * self.operator.adjoint(x, out=out)
+ else:
+ raise TypeError('Operator is not linear')
+ def norm(self):
+ return numpy.abs(self.scalar) * self.operator.norm()
+ def range_geometry(self):
+ return self.operator.range_geometry()
+ def domain_geometry(self):
+ return self.operator.domain_geometry()
+ @property
+ def T(self):
+ '''Return the transposed of self'''
+ #print ("transpose before" , self.shape)
+ #shape = (self.shape[1], self.shape[0])
+ ##self.shape = shape
+ ##self.operator.shape = shape
+ #print ("transpose" , shape)
+ #return self
+ return type(self)(self.operator.T, self.scalar) \ No newline at end of file
diff --git a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py
deleted file mode 100755
index 77abb8c..0000000
--- a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py
+++ /dev/null
@@ -1,819 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Feb 14 12:36:40 2019
-
-@author: ofn77899
-"""
-#from ccpi.optimisation.ops import Operator
-import numpy
-from numbers import Number
-import functools
-from ccpi.framework import AcquisitionData, ImageData
-
-class Operator(object):
- '''Operator that maps from a space X -> Y'''
- def __init__(self, **kwargs):
- self.scalar = 1
- def is_linear(self):
- '''Returns if the operator is linear'''
- return False
- def direct(self,x, out=None):
- raise NotImplementedError
- def size(self):
- # To be defined for specific class
- raise NotImplementedError
- def norm(self):
- raise NotImplementedError
- def allocate_direct(self):
- '''Allocates memory on the Y space'''
- raise NotImplementedError
- def allocate_adjoint(self):
- '''Allocates memory on the X space'''
- raise NotImplementedError
- def range_dim(self):
- raise NotImplementedError
- def domain_dim(self):
- raise NotImplementedError
- def __rmul__(self, other):
- assert isinstance(other, Number)
- self.scalar = other
- return self
-
-class LinearOperator(Operator):
- '''Operator that maps from a space X -> Y'''
- def is_linear(self):
- '''Returns if the operator is linear'''
- return True
- def adjoint(self,x, out=None):
- raise NotImplementedError
-
-# this should go in the framework
-
-class CompositeDataContainer(object):
- '''Class to hold a composite operator'''
- __array_priority__ = 1
- def __init__(self, *args, shape=None):
- '''containers must be passed row by row'''
- self.containers = args
- self.index = 0
- if shape is None:
- shape = (len(args),1)
- self.shape = shape
- n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
- if len(args) != n_elements:
- raise ValueError(
- 'Dimension and size do not match: expected {} got {}'
- .format(n_elements,len(args)))
-# for i in range(shape[0]):
-# b.append([])
-# for j in range(shape[1]):
-# b[-1].append(args[i*shape[1]+j])
-# indices.append(i*shape[1]+j)
-# self.containers = b
-
- def __iter__(self):
- return self
- def next(self):
- '''python2 backwards compatibility'''
- return self.__next__()
- def __next__(self):
- try:
- out = self[self.index]
- except IndexError as ie:
- raise StopIteration()
- self.index+=1
- return out
-
- def is_compatible(self, other):
- '''basic check if the size of the 2 objects fit'''
- if isinstance(other, Number):
- return True
- elif isinstance(other, list):
- # TODO look elements should be numbers
- for ot in other:
- if not isinstance(ot, (Number,\
- numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\
- numpy.float, numpy.float16, numpy.float32, numpy.float64, \
- numpy.complex)):
- raise ValueError('List/ numpy array can only contain numbers {}'\
- .format(type(ot)))
- return len(self.containers) == len(other)
- elif isinstance(other, numpy.ndarray):
- return self.shape == other.shape
- return len(self.containers) == len(other.containers)
- def get_item(self, row, col=0):
- if row > self.shape[0]:
- raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
- if col > self.shape[1]:
- raise ValueError('Requested col {} > max {}'.format(col, self.shape[1]))
-
- index = row*self.shape[1]+col
- return self.containers[index]
-
- def add(self, other, out=None, *args, **kwargs):
- assert self.is_compatible(other)
- if isinstance(other, Number):
- return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers])
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)])
- return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)])
-
- def subtract(self, other, out=None , *args, **kwargs):
- assert self.is_compatible(other)
- if isinstance(other, Number):
- return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers])
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)])
- return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)])
-
- def multiply(self, other , out=None, *args, **kwargs):
- self.is_compatible(other)
- if isinstance(other, Number):
- return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers])
- elif isinstance(other, list):
- return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)])
- elif isinstance(other, numpy.ndarray):
- return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)])
- return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)])
-
- def divide(self, other , out=None ,*args, **kwargs):
- self.is_compatible(other)
- if isinstance(other, Number):
- return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers])
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)])
- return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)])
-
- def power(self, other , out=None, *args, **kwargs):
- assert self.is_compatible(other)
- if isinstance(other, Number):
- return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers])
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)])
- return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)])
-
- def maximum(self,other, out=None, *args, **kwargs):
- assert self.is_compatible(other)
- if isinstance(other, Number):
- return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers])
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)])
- return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)])
-
- ## unary operations
- def abs(self, out=None, *args, **kwargs):
- return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers])
- def sign(self, out=None, *args, **kwargs):
- return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers])
- def sqrt(self, out=None, *args, **kwargs):
- return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers])
- def conjugate(self, out=None):
- return type(self)(*[el.conjugate() for el in self.containers])
-
- ## reductions
- def sum(self, out=None, *args, **kwargs):
- return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers])
- def norm(self):
- y = numpy.asarray([el**2 for el in self.containers])
- return y.sum()
- def copy(self):
- '''alias of clone'''
- return self.clone()
- def clone(self):
- return type(self)(*[el.copy() for el in self.containers])
-
- def __add__(self, other):
- return self.add( other )
- # __radd__
-
- def __sub__(self, other):
- return self.subtract( other )
- # __rsub__
-
- def __mul__(self, other):
- return self.multiply(other)
- # __rmul__
-
- def __div__(self, other):
- return self.divide(other)
- # __rdiv__
- def __truediv__(self, other):
- return self.divide(other)
-
- def __pow__(self, other):
- return self.power(other)
- # reverse operand
- def __radd__(self, other):
- return self + other
- # __radd__
-
- def __rsub__(self, other):
- return (-1 * self) + other
- # __rsub__
-
- def __rmul__(self, other):
- '''Reverse multiplication
-
- to make sure that this method is called rather than the __mul__ of a numpy array
- the class constant __array_priority__ must be set > 0
- https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
- '''
- return self * other
- # __rmul__
-
- def __rdiv__(self, other):
- return pow(self / other, -1)
- # __rdiv__
- def __rtruediv__(self, other):
- return self.__rdiv__(other)
-
- def __rpow__(self, other):
- return other.power(self)
-
- def __iadd__(self, other):
- if isinstance (other, CompositeDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el += ot
- elif isinstance(other, Number):
- for el in self.containers:
- el += other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- self.is_compatible(other)
- for el,ot in zip(self.containers, other):
- el += ot
- return self
- # __radd__
-
- def __isub__(self, other):
- if isinstance (other, CompositeDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el -= ot
- elif isinstance(other, Number):
- for el in self.containers:
- el -= other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- assert self.is_compatible(other)
- for el,ot in zip(self.containers, other):
- el -= ot
- return self
- # __rsub__
-
- def __imul__(self, other):
- if isinstance (other, CompositeDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el *= ot
- elif isinstance(other, Number):
- for el in self.containers:
- el *= other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- assert self.is_compatible(other)
- for el,ot in zip(self.containers, other):
- el *= ot
- return self
- # __imul__
-
- def __idiv__(self, other):
- if isinstance (other, CompositeDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el /= ot
- elif isinstance(other, Number):
- for el in self.containers:
- el /= other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- assert self.is_compatible(other)
- for el,ot in zip(self.containers, other):
- el /= ot
- return self
- # __rdiv__
- def __itruediv__(self, other):
- return self.__idiv__(other)
-
-
-
-class CompositeOperator(Operator):
- '''Class to hold a composite operator'''
- def __init__(self, *args, shape=None):
- self.operators = args
- if shape is None:
- shape = (len(args),1)
- self.shape = shape
- n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
- if len(args) != n_elements:
- raise ValueError(
- 'Dimension and size do not match: expected {} got {}'
- .format(n_elements,len(args)))
- def get_item(self, row, col):
- if row > self.shape[0]:
- raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
- if col > self.shape[1]:
- raise ValueError('Requested col {} > max {}'.format(col, self.shape[1]))
-
- index = row*self.shape[1]+col
- return self.operators[index]
-
- def norm(self):
- norm = [op.norm() for op in self.operators]
- b = []
- for i in range(self.shape[0]):
- b.append([])
- for j in range(self.shape[1]):
- b[-1].append(norm[i*self.shape[1]+j])
- return numpy.asarray(b)
-
- def direct(self, x, out=None):
- shape = self.get_output_shape(x.shape)
- res = []
- for row in range(self.shape[0]):
- for col in range(self.shape[1]):
- if col == 0:
- prod = self.get_item(row,col).direct(x.get_item(col))
- else:
- prod += self.get_item(row,col).direct(x.get_item(col))
- res.append(prod)
- return CompositeDataContainer(*res, shape=shape)
-
- def adjoint(self, x, out=None):
- shape = self.get_output_shape(x.shape, adjoint=True)
- res = []
- for row in range(self.shape[1]):
- for col in range(self.shape[0]):
- if col == 0:
- prod = self.get_item(row,col).adjoint(x.get_item(col))
- else:
- prod += self.get_item(row,col).adjoint(x.get_item(col))
- res.append(prod)
- return CompositeDataContainer(*res, shape=shape)
-
- def get_output_shape(self, xshape, adjoint=False):
- sshape = self.shape[1]
- oshape = self.shape[0]
- if adjoint:
- sshape = self.shape[0]
- oshape = self.shape[1]
- if sshape != xshape[0]:
- raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape))
- return (oshape, xshape[-1])
-
-'''
- def direct(self, x, out=None):
-
- out = [None]*self.dimension[0]
- for i in range(self.dimension[0]):
- z1 = ImageData(np.zeros(self.compMat[i][0].range_dim()))
- for j in range(self.dimension[1]):
- z1 += self.compMat[i][j].direct(x[j])
- out[i] = z1
-
- return out
-
-
- def adjoint(self, x, out=None):
-
- out = [None]*self.dimension[1]
- for i in range(self.dimension[1]):
- z2 = ImageData(np.zeros(self.compMat[0][i].domain_dim()))
- for j in range(self.dimension[0]):
- z2 += self.compMat[j][i].adjoint(x[j])
- out[i] = z2
-'''
-from ccpi.optimisation.Algorithms import Algorithm
-from collections.abc import Iterable
-class CGLS(Algorithm):
-
- '''Conjugate Gradient Least Squares algorithm
-
- Parameters:
- x_init: initial guess
- operator: operator for forward/backward projections
- data: data to operate on
- '''
- def __init__(self, **kwargs):
- super(CGLS, self).__init__()
- self.x = kwargs.get('x_init', None)
- self.operator = kwargs.get('operator', None)
- self.data = kwargs.get('data', None)
- if self.x is not None and self.operator is not None and \
- self.data is not None:
- print ("Calling from creator")
- return self.set_up(x_init =kwargs['x_init'],
- operator=kwargs['operator'],
- data =kwargs['data'])
-
- def set_up(self, x_init, operator , data ):
-
- self.r = data.copy()
- self.x = x_init.copy()
-
- self.operator = operator
- self.d = operator.adjoint(self.r)
-
-
- self.normr2 = (self.d * self.d).sum()
- if isinstance(self.normr2, Iterable):
- self.normr2 = sum(self.normr2)
- #self.normr2 = numpy.sqrt(self.normr2)
- print ("set_up" , self.normr2)
-
- def should_stop(self):
- '''stopping cryterion, currently only based on number of iterations'''
- return self.iteration >= self.max_iteration
-
- def update(self):
-
- Ad = self.operator.direct(self.d)
- norm = (Ad*Ad).sum()
- if isinstance(norm, Iterable):
- norm = sum(norm)
- #norm = numpy.sqrt(norm)
- print (norm)
- alpha = self.normr2/norm
- self.x += (self.d * alpha)
- self.r -= (Ad * alpha)
- s = self.operator.adjoint(self.r)
-
- normr2_new = (s*s).sum()
- if isinstance(normr2_new, Iterable):
- normr2_new = sum(normr2_new)
- #normr2_new = numpy.sqrt(normr2_new)
- print (normr2_new)
-
- beta = normr2_new/self.normr2
- self.normr2 = normr2_new
- self.d = s + beta*self.d
-
- def update_objective(self):
- self.loss.append((self.r*self.r).sum())
-
- def run(self, iterations, callback=None):
- self.max_iteration += iterations
- for _ in self:
- if callback is not None:
- callback(self.iteration, self.get_current_loss())
-
-
-if __name__ == '__main__':
- #from ccpi.optimisation.Algorithms import GradientDescent
- from ccpi.plugins.ops import CCPiProjectorSimple
- from ccpi.optimisation.ops import PowerMethodNonsquare
- from ccpi.optimisation.ops import TomoIdentity
- from ccpi.optimisation.funcs import Norm2sq, Norm1
- from ccpi.framework import ImageGeometry, AcquisitionGeometry
- from ccpi.optimisation.Algorithms import GradientDescent
- #from ccpi.optimisation.Algorithms import CGLS
- import matplotlib.pyplot as plt
-
- ig0 = ImageGeometry(2,3,4)
- ig1 = ImageGeometry(12,42,55,32)
-
- data0 = ImageData(geometry=ig0)
- data1 = ImageData(geometry=ig1) + 1
-
- data2 = ImageData(geometry=ig0) + 2
- data3 = ImageData(geometry=ig1) + 3
-
- cp0 = CompositeDataContainer(data0,data1)
- cp1 = CompositeDataContainer(data2,data3)
-#
- a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)]
- print (a[0][0].shape)
- #cp2 = CompositeDataContainer(*a)
- cp2 = cp0.add(cp1)
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.)
- assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.)
-
- cp2 = cp0 + cp1
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.)
- assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.)
- cp2 = cp0 + 1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5)
- cp2 = cp0 + [1 ,2]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5)
- cp2 += cp1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5)
-
- cp2 += 1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5)
-
- cp2 += [-2,-1]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5)
-
-
- cp2 = cp0.subtract(cp1)
- assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.)
- assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.)
- cp2 = cp0 - cp1
- assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.)
- assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.)
-
- cp2 = cp0 - 1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5)
- cp2 = cp0 - [1 ,2]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5)
-
- cp2 -= cp1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5)
-
- cp2 -= 1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5)
-
- cp2 -= [-2,-1]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5)
-
-
- cp2 = cp0.multiply(cp1)
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.)
- assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.)
- cp2 = cp0 * cp1
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.)
- assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.)
-
- cp2 = cp0 * 2
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5)
- cp2 = 2 * cp0
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5)
- cp2 = cp0 * [3 ,2]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5)
- cp2 = cp0 * numpy.asarray([3 ,2])
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5)
-
- cp2 = [3,2] * cp0
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5)
- cp2 = numpy.asarray([3,2]) * cp0
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5)
- cp2 = [3,2,3] * cp0
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5)
-
- cp2 *= cp1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5)
-
- cp2 *= 1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5)
-
- cp2 *= [-2,-1]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5)
-
-
- cp2 = cp0.divide(cp1)
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4)
- cp2 = cp0/cp1
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4)
-
- cp2 = cp0 / 2
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5)
- cp2 = cp0 / [3 ,2]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5)
- cp2 = cp0 / numpy.asarray([3 ,2])
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5)
- cp3 = numpy.asarray([3 ,2]) / (cp0+1)
- numpy.testing.assert_almost_equal(cp3.get_item(0,0).as_array()[0][0][0] , 3. , decimal=5)
- numpy.testing.assert_almost_equal(cp3.get_item(1,0).as_array()[0][0][0] , 1, decimal = 5)
-
- cp2 += 1
- cp2 /= cp1
- # TODO fix inplace division
-
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5)
-
- cp2 /= 1
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5)
-
- cp2 /= [-2,-1]
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5)
- ####
-
- cp2 = cp0.power(cp1)
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4)
- cp2 = cp0**cp1
- assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4)
-
- cp2 = cp0 ** 2
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5)
-
- cp2 = cp0.maximum(cp1)
- assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0])
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4)
-
-
- cp2 = cp0.abs()
- numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4)
- numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4)
-
- cp2 = cp0.subtract(cp1)
- s = cp2.sign()
- numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4)
- numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4)
-
- cp2 = cp0.add(cp1)
- s = cp2.sqrt()
- numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4)
- numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4)
-
- s = cp0.sum()
- numpy.testing.assert_almost_equal(s[0], 0, decimal=4)
- s0 = 1
- s1 = 1
- for i in cp0.get_item(0,0).shape:
- s0 *= i
- for i in cp0.get_item(1,0).shape:
- s1 *= i
-
- numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4)
-
- # Set up phantom size N x N x vert by creating ImageGeometry, initialising the
- # ImageData object with this geometry and empty array and finally put some
- # data into its array, and display one slice as image.
-
- # Image parameters
- N = 128
- vert = 4
-
- # Set up image geometry
- ig = ImageGeometry(voxel_num_x=N,
- voxel_num_y=N,
- voxel_num_z=vert)
-
- # Set up empty image data
- Phantom = ImageData(geometry=ig,
- dimension_labels=['horizontal_x',
- 'horizontal_y',
- 'vertical'])
- Phantom += 0.05
- # Populate image data by looping over and filling slices
- i = 0
- while i < vert:
- if vert > 1:
- x = Phantom.subset(vertical=i).array
- else:
- x = Phantom.array
- x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
- x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94
- if vert > 1 :
- Phantom.fill(x, vertical=i)
- i += 1
-
-
- perc = 0.02
- # Set up empty image data
- noise = ImageData(numpy.random.normal(loc = 0.04 ,
- scale = perc ,
- size = Phantom.shape), geometry=ig,
- dimension_labels=['horizontal_x',
- 'horizontal_y',
- 'vertical'])
- Phantom += noise
-
- # Set up AcquisitionGeometry object to hold the parameters of the measurement
- # setup geometry: # Number of angles, the actual angles from 0 to
- # pi for parallel beam, set the width of a detector
- # pixel relative to an object pixe and the number of detector pixels.
- angles_num = 20
- det_w = 1.0
- det_num = N
-
- angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\
- 180/numpy.pi
-
- # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count,
- # horz detector pixel size, vert detector pixel count,
- # vert detector pixel size.
- ag = AcquisitionGeometry('parallel',
- '3D',
- angles,
- N,
- det_w,
- vert,
- det_w)
-
- # Set up Operator object combining the ImageGeometry and AcquisitionGeometry
- # wrapping calls to CCPi projector.
- A = CCPiProjectorSimple(ig, ag)
-
- # Forward and backprojection are available as methods direct and adjoint. Here
- # generate test data b and some noise
-
- b = A.direct(Phantom)
-
-
- #z = A.adjoint(b)
-
-
- # Using the test data b, different reconstruction methods can now be set up as
- # demonstrated in the rest of this file. In general all methods need an initial
- # guess and some algorithm options to be set. Note that 100 iterations for
- # some of the methods is a very low number and 1000 or 10000 iterations may be
- # needed if one wants to obtain a converged solution.
- x_init = ImageData(geometry=ig,
- dimension_labels=['horizontal_x','horizontal_y','vertical'])
- X_init = CompositeDataContainer(x_init)
- B = CompositeDataContainer(b,
- ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
-
- # setup a tomo identity
- Ibig = 1e5 * TomoIdentity(geometry=ig)
- Ismall = 1e-5 * TomoIdentity(geometry=ig)
-
- # composite operator
- Kbig = CompositeOperator(A, Ibig, shape=(2,1))
- Ksmall = CompositeOperator(A, Ismall, shape=(2,1))
-
- #out = K.direct(X_init)
-
- f = Norm2sq(Kbig,B)
- f.L = 0.00003
-
- fsmall = Norm2sq(Ksmall,B)
- f.L = 0.00003
-
- simplef = Norm2sq(A, b)
- simplef.L = 0.00003
-
- gd = GradientDescent( x_init=x_init, objective_function=simplef,
- rate=simplef.L)
- gd.max_iteration = 10
-
- cg = CGLS()
- cg.set_up(X_init, Kbig, B )
- cg.max_iteration = 1
-
- cgsmall = CGLS()
- cgsmall.set_up(X_init, Ksmall, B )
- cgsmall.max_iteration = 1
-
-
- cgs = CGLS()
- cgs.set_up(x_init, A, b )
- cgs.max_iteration = 6
-#
- #out.__isub__(B)
- #out2 = K.adjoint(out)
-
- #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
-
- for _ in gd:
- print ("iteration {} {}".format(gd.iteration, gd.get_current_loss()))
-
- cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
-
- cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
-
- cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
- cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
-# for _ in cg:
-# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss()))
-#
-# fig = plt.figure()
-# plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
-# plt.title('Composite CGLS')
-# plt.show()
-#
-# for _ in cgs:
-# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
-#
- fig = plt.figure()
- plt.subplot(1,5,1)
- plt.imshow(Phantom.subset(vertical=0).as_array())
- plt.title('Simulated Phantom')
- plt.subplot(1,5,2)
- plt.imshow(gd.get_output().subset(vertical=0).as_array())
- plt.title('Simple Gradient Descent')
- plt.subplot(1,5,3)
- plt.imshow(cgs.get_output().subset(vertical=0).as_array())
- plt.title('Simple CGLS')
- plt.subplot(1,5,4)
- plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
- plt.title('Composite CGLS\nbig lambda')
- plt.subplot(1,5,5)
- plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array())
- plt.title('Composite CGLS\nsmall lambda')
- plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
new file mode 100755
index 0000000..e19304f
--- /dev/null
+++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 5 15:57:52 2019
+
+@author: ofn77899
+"""
+
+from ccpi.optimisation.operators import Operator
+
+
+class LinearOperator(Operator):
+ '''A Linear Operator that maps from a space X <-> Y'''
+ def __init__(self):
+ super(LinearOperator, self).__init__()
+ def is_linear(self):
+ '''Returns if the operator is linear'''
+ return True
+ def adjoint(self,x, out=None):
+ '''returns the adjoint/inverse operation
+
+ only available to linear operators'''
+ raise NotImplementedError
diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
new file mode 100755
index 0000000..95082f4
--- /dev/null
+++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 5 15:55:56 2019
+
+@author: ofn77899
+"""
+from ccpi.optimisation.operators import ScaledOperator
+
+class Operator(object):
+ '''Operator that maps from a space X -> Y'''
+ def is_linear(self):
+ '''Returns if the operator is linear'''
+ return False
+ def direct(self,x, out=None):
+ raise NotImplementedError
+ def norm(self):
+ raise NotImplementedError
+ def range_geometry(self):
+ raise NotImplementedError
+ def domain_geometry(self):
+ raise NotImplementedError
+ def __rmul__(self, scalar):
+ return ScaledOperator(self, scalar)
diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
new file mode 100644
index 0000000..adcc6d9
--- /dev/null
+++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
@@ -0,0 +1,42 @@
+from numbers import Number
+import numpy
+
+class ScaledOperator(object):
+ '''ScaledOperator
+
+ A class to represent the scalar multiplication of an Operator with a scalar.
+ It holds an operator and a scalar. Basically it returns the multiplication
+ of the result of direct and adjoint of the operator with the scalar.
+ For the rest it behaves like the operator it holds.
+
+ Args:
+ operator (Operator): a Operator or LinearOperator
+ scalar (Number): a scalar multiplier
+ Example:
+ The scaled operator behaves like the following:
+ sop = ScaledOperator(operator, scalar)
+ sop.direct(x) = scalar * operator.direct(x)
+ sop.adjoint(x) = scalar * operator.adjoint(x)
+ sop.norm() = operator.norm()
+ sop.range_geometry() = operator.range_geometry()
+ sop.domain_geometry() = operator.domain_geometry()
+ '''
+ def __init__(self, operator, scalar):
+ super(ScaledOperator, self).__init__()
+ if not isinstance (scalar, Number):
+ raise TypeError('expected scalar: got {}'.format(type(scalar)))
+ self.scalar = scalar
+ self.operator = operator
+ def direct(self, x, out=None):
+ return self.scalar * self.operator.direct(x, out=out)
+ def adjoint(self, x, out=None):
+ if self.operator.is_linear():
+ return self.scalar * self.operator.adjoint(x, out=out)
+ else:
+ raise TypeError('Operator is not linear')
+ def norm(self):
+ return numpy.abs(self.scalar) * self.operator.norm()
+ def range_geometry(self):
+ return self.operator.range_geometry()
+ def domain_geometry(self):
+ return self.operator.domain_geometry()
diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py
new file mode 100755
index 0000000..cc307e0
--- /dev/null
+++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 5 15:56:27 2019
+
+@author: ofn77899
+"""
+
+from .Operator import Operator
+from .LinearOperator import LinearOperator
+from .ScaledOperator import ScaledOperator
+from .BlockOperator import BlockOperator
+from .BlockScaledOperator import BlockScaledOperator
diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py
index e9e7f44..6afb97a 100755
--- a/Wrappers/Python/ccpi/optimisation/ops.py
+++ b/Wrappers/Python/ccpi/optimisation/ops.py
@@ -49,9 +49,9 @@ class Operator(object):
def allocate_adjoint(self):
'''Allocates memory on the X space'''
raise NotImplementedError
- def range_dim(self):
+ def range_geometry(self):
raise NotImplementedError
- def domain_dim(self):
+ def domain_geometry(self):
raise NotImplementedError
def __rmul__(self, other):
'''reverse multiplication of Operator with number sets the variable scalar in the Operator'''
@@ -97,7 +97,8 @@ class TomoIdentity(Operator):
self.s1 = 1.0
self.geometry = geometry
-
+ def is_linear(self):
+ return True
def direct(self,x,out=None):
if out is None:
@@ -128,6 +129,10 @@ class TomoIdentity(Operator):
raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry))
def allocate_adjoint(self):
return self.allocate_direct()
+ def range_geometry(self):
+ return self.geometry
+ def domain_geometry(self):
+ return self.geometry
diff --git a/Wrappers/Python/ccpi/processors.py b/Wrappers/Python/ccpi/processors.py
index 611c8c6..ccef410 100755
--- a/Wrappers/Python/ccpi/processors.py
+++ b/Wrappers/Python/ccpi/processors.py
@@ -102,7 +102,7 @@ class Normalizer(DataProcessor):
rel_norm_error = (b + a) / (b * a) * (df + dd)
return rel_norm_error
- def process(self):
+ def process(self, out=None):
projections = self.get_input()
dark = self.dark_field
@@ -400,7 +400,7 @@ class CenterOfRotationFinder(DataProcessor):
mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32')
return mask
- def process(self):
+ def process(self, out=None):
projections = self.get_input()
@@ -442,7 +442,7 @@ class AcquisitionDataPadder(DataProcessor):
raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
.format(dataset.number_of_dimensions))
- def process(self):
+ def process(self, out=None):
projections = self.get_input()
w = projections.get_dimension_size('horizontal')
delta = w - 2 * self.center_of_rotation
diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml
index 96a211f..30c8e9d 100644
--- a/Wrappers/Python/conda-recipe/conda_build_config.yaml
+++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml
@@ -4,5 +4,5 @@ python:
- 3.6
numpy:
# TODO investigage, as it doesn't currently build with cvxp, requires >1.14
- #- 1.12
+ - 1.12
- 1.15
diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml
index 8ded429..dd3238e 100644
--- a/Wrappers/Python/conda-recipe/meta.yaml
+++ b/Wrappers/Python/conda-recipe/meta.yaml
@@ -11,7 +11,7 @@ build:
test:
requires:
- python-wget
- - cvxpy # [not win]
+ - cvxpy # [ unix and py36 and np115 ]
source_files:
- ./test # [win]
@@ -24,8 +24,9 @@ test:
requirements:
build:
+ - {{ pin_compatible('numpy', max_pin='x.x') }}
- python
- - numpy {{ numpy }}
+ - numpy
- setuptools
run:
@@ -33,7 +34,7 @@ requirements:
- python
- numpy
- scipy
- - matplotlib
+ #- matplotlib
- h5py
about:
diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py
index 85907eb..630e33e 100644
--- a/Wrappers/Python/setup.py
+++ b/Wrappers/Python/setup.py
@@ -31,8 +31,10 @@ if cil_version == '':
setup(
name="ccpi-framework",
version=cil_version,
- packages=['ccpi' , 'ccpi.io', 'ccpi.optimisation',
- 'ccpi.optimisation.operators', 'ccpi.optimisation.algorithms'],
+ packages=['ccpi' , 'ccpi.io',
+ 'ccpi.framework', 'ccpi.optimisation',
+ 'ccpi.optimisation.operators',
+ 'ccpi.optimisation.algorithms'],
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py
new file mode 100755
index 0000000..ef11a82
--- /dev/null
+++ b/Wrappers/Python/test/test_BlockDataContainer.py
@@ -0,0 +1,284 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 5 16:08:23 2019
+
+@author: ofn77899
+"""
+
+import unittest
+import numpy
+#from ccpi.plugins.ops import CCPiProjectorSimple
+from ccpi.optimisation.ops import PowerMethodNonsquare
+from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.funcs import Norm2sq, Norm1
+from ccpi.framework import ImageGeometry, AcquisitionGeometry
+from ccpi.framework import ImageData, AcquisitionData
+#from ccpi.optimisation.algorithms import GradientDescent
+from ccpi.framework import BlockDataContainer
+#from ccpi.optimisation.Algorithms import CGLS
+import functools
+
+class TestBlockDataContainer(unittest.TestCase):
+ def skiptest_BlockDataContainerShape(self):
+ print ("test block data container")
+ ig0 = ImageGeometry(12,42,55,32)
+ ig1 = ImageGeometry(12,42,55,32)
+
+ data0 = ImageData(geometry=ig0)
+ data1 = ImageData(geometry=ig1) + 1
+
+ data2 = ImageData(geometry=ig0) + 2
+ data3 = ImageData(geometry=ig1) + 3
+
+ cp0 = BlockDataContainer(data0,data1)
+ cp1 = BlockDataContainer(data2,data3)
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp0.T.shape == transpose_shape)
+ def skiptest_BlockDataContainerShapeArithmetic(self):
+ print ("test block data container")
+ ig0 = ImageGeometry(2,3,4)
+ ig1 = ImageGeometry(2,3,4)
+
+ data0 = ImageData(geometry=ig0)
+ data1 = ImageData(geometry=ig1) + 1
+
+ data2 = ImageData(geometry=ig0) + 2
+ data3 = ImageData(geometry=ig1) + 3
+
+ cp0 = BlockDataContainer(data0,data1)
+ #cp1 = BlockDataContainer(data2,data3)
+ cp1 = cp0 + 1
+ self.assertTrue(cp1.shape == cp0.shape)
+ cp1 = cp0.T + 1
+
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = cp0.T - 1
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = (cp0.T + 1)*2
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = (cp0.T + 1)/2
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = cp0.T.power(2.2)
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = cp0.T.maximum(3)
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = cp0.T.abs()
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = cp0.T.sign()
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = cp0.T.sqrt()
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ cp1 = cp0.T.conjugate()
+ transpose_shape = (cp0.shape[1], cp0.shape[0])
+ self.assertTrue(cp1.shape == transpose_shape)
+
+ def test_BlockDataContainer(self):
+ print ("test block data container")
+ ig0 = ImageGeometry(2,3,4)
+ ig1 = ImageGeometry(2,3,4)
+
+ data0 = ImageData(geometry=ig0)
+ data1 = ImageData(geometry=ig1) + 1
+
+ data2 = ImageData(geometry=ig0) + 2
+ data3 = ImageData(geometry=ig1) + 3
+
+ cp0 = BlockDataContainer(data0,data1)
+ cp1 = BlockDataContainer(data2,data3)
+ #
+ a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)]
+ print (a[0][0].shape)
+ #cp2 = BlockDataContainer(*a)
+ cp2 = cp0.add(cp1)
+ assert (cp2.get_item(0).as_array()[0][0][0] == 2.)
+ assert (cp2.get_item(1).as_array()[0][0][0] == 4.)
+
+ cp2 = cp0 + cp1
+ assert (cp2.get_item(0).as_array()[0][0][0] == 2.)
+ assert (cp2.get_item(1).as_array()[0][0][0] == 4.)
+ cp2 = cp0 + 1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 1. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+ cp2 = cp0 + [1 ,2]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 1. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 3., decimal = 5)
+ cp2 += cp1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , +3. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5)
+
+ cp2 += 1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , +4. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +7., decimal = 5)
+
+ cp2 += [-2,-1]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 2. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 6., decimal = 5)
+
+
+ cp2 = cp0.subtract(cp1)
+ assert (cp2.get_item(0).as_array()[0][0][0] == -2.)
+ assert (cp2.get_item(1).as_array()[0][0][0] == -2.)
+ cp2 = cp0 - cp1
+ assert (cp2.get_item(0).as_array()[0][0][0] == -2.)
+ assert (cp2.get_item(1).as_array()[0][0][0] == -2.)
+
+ cp2 = cp0 - 1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -1. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0, decimal = 5)
+ cp2 = cp0 - [1 ,2]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -1. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -1., decimal = 5)
+
+ cp2 -= cp1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -3. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -4., decimal = 5)
+
+ cp2 -= 1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -4. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -5., decimal = 5)
+
+ cp2 -= [-2,-1]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -2. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -4., decimal = 5)
+
+
+ cp2 = cp0.multiply(cp1)
+ assert (cp2.get_item(0).as_array()[0][0][0] == 0.)
+ assert (cp2.get_item(1).as_array()[0][0][0] == 3.)
+ cp2 = cp0 * cp1
+ assert (cp2.get_item(0).as_array()[0][0][0] == 0.)
+ assert (cp2.get_item(1).as_array()[0][0][0] == 3.)
+
+ cp2 = cp0 * 2
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2, decimal = 5)
+ cp2 = 2 * cp0
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2, decimal = 5)
+ cp2 = cp0 * [3 ,2]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+ cp2 = cp0 * numpy.asarray([3 ,2])
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+
+ cp2 = [3,2] * cp0
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+ cp2 = numpy.asarray([3,2]) * cp0
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+ cp2 = [3,2,3] * cp0
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+
+ cp2 *= cp1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0 , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5)
+
+ cp2 *= 1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5)
+
+ cp2 *= [-2,-1]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -6., decimal = 5)
+
+
+ cp2 = cp0.divide(cp1)
+ assert (cp2.get_item(0).as_array()[0][0][0] == 0.)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1./3., decimal=4)
+ cp2 = cp0/cp1
+ assert (cp2.get_item(0).as_array()[0][0][0] == 0.)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1./3., decimal=4)
+
+ cp2 = cp0 / 2
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5)
+ cp2 = cp0 / [3 ,2]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5)
+ cp2 = cp0 / numpy.asarray([3 ,2])
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5)
+ cp3 = numpy.asarray([3 ,2]) / (cp0+1)
+ numpy.testing.assert_almost_equal(cp3.get_item(0).as_array()[0][0][0] , 3. , decimal=5)
+ numpy.testing.assert_almost_equal(cp3.get_item(1).as_array()[0][0][0] , 1, decimal = 5)
+
+ cp2 += 1
+ cp2 /= cp1
+ # TODO fix inplace division
+
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 1./2 , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 1.5/3., decimal = 5)
+
+ cp2 /= 1
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0.5 , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5)
+
+ cp2 /= [-2,-1]
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -0.5/2. , decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -0.5, decimal = 5)
+ ####
+
+ cp2 = cp0.power(cp1)
+ assert (cp2.get_item(0).as_array()[0][0][0] == 0.)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1., decimal=4)
+ cp2 = cp0**cp1
+ assert (cp2.get_item(0).as_array()[0][0][0] == 0.)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1., decimal=4)
+
+ cp2 = cp0 ** 2
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0., decimal=5)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 1., decimal = 5)
+
+ cp2 = cp0.maximum(cp1)
+ assert (cp2.get_item(0).as_array()[0][0][0] == cp1.get_item(0).as_array()[0][0][0])
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], cp2.get_item(1).as_array()[0][0][0], decimal=4)
+
+
+ cp2 = cp0.abs()
+ numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0], 0., decimal=4)
+ numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1., decimal=4)
+
+ cp2 = cp0.subtract(cp1)
+ s = cp2.sign()
+ numpy.testing.assert_almost_equal(s.get_item(0).as_array()[0][0][0], -1., decimal=4)
+ numpy.testing.assert_almost_equal(s.get_item(1).as_array()[0][0][0], -1., decimal=4)
+
+ cp2 = cp0.add(cp1)
+ s = cp2.sqrt()
+ numpy.testing.assert_almost_equal(s.get_item(0).as_array()[0][0][0], numpy.sqrt(2), decimal=4)
+ numpy.testing.assert_almost_equal(s.get_item(1).as_array()[0][0][0], numpy.sqrt(4), decimal=4)
+
+ s = cp0.sum()
+ size = functools.reduce(lambda x,y: x*y, data1.shape, 1)
+ print ("size" , size)
+ numpy.testing.assert_almost_equal(s, 0 + size, decimal=4)
+ s0 = 1
+ s1 = 1
+ for i in cp0.get_item(0).shape:
+ s0 *= i
+ for i in cp0.get_item(1).shape:
+ s1 *= i
+
+ #numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4)
+ \ No newline at end of file
diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py
new file mode 100644
index 0000000..8bd673b
--- /dev/null
+++ b/Wrappers/Python/test/test_BlockOperator.py
@@ -0,0 +1,290 @@
+import unittest
+from ccpi.optimisation.operators import BlockOperator
+from ccpi.framework import BlockDataContainer
+from ccpi.optimisation.ops import TomoIdentity
+from ccpi.framework import ImageGeometry, ImageData
+import numpy
+
+class TestBlockOperator(unittest.TestCase):
+
+ def test_BlockOperator(self):
+ print ("test_BlockOperator")
+ ig = [ ImageGeometry(10,20,30) , \
+ ImageGeometry(10,20,30) , \
+ ImageGeometry(10,20,30) ]
+ x = [ g.allocate() for g in ig ]
+ ops = [ TomoIdentity(g) for g in ig ]
+
+ K = BlockOperator(*ops)
+ X = BlockDataContainer(x[0])
+ Y = K.direct(X)
+ self.assertTrue(Y.shape == K.shape)
+
+ numpy.testing.assert_array_equal(Y.get_item(0).as_array(),X.get_item(0).as_array())
+ numpy.testing.assert_array_equal(Y.get_item(1).as_array(),X.get_item(0).as_array())
+ #numpy.testing.assert_array_equal(Y.get_item(2).as_array(),X.get_item(2).as_array())
+
+ X = BlockDataContainer(*x) + 1
+ Y = K.T.direct(X)
+ # K.T (1,3) X (3,1) => output shape (1,1)
+ self.assertTrue(Y.shape == (1,1))
+ zero = numpy.zeros(X.get_item(0).shape)
+ numpy.testing.assert_array_equal(Y.get_item(0).as_array(),len(x)+zero)
+
+
+ def test_ScaledBlockOperatorSingleScalar(self):
+ ig = [ ImageGeometry(10,20,30) , \
+ ImageGeometry(10,20,30) , \
+ ImageGeometry(10,20,30) ]
+ x = [ g.allocate() for g in ig ]
+ ops = [ TomoIdentity(g) for g in ig ]
+
+ val = 1
+ # test limit as non Scaled
+ scalar = 1
+ k = BlockOperator(*ops)
+ K = scalar * k
+ X = BlockDataContainer(*x) + val
+
+ Y = K.T.direct(X)
+ self.assertTrue(Y.shape == (1,1))
+ zero = numpy.zeros(X.get_item(0).shape)
+ xx = numpy.asarray([val for _ in x])
+ numpy.testing.assert_array_equal(Y.get_item(0).as_array(),((scalar*xx).sum()+zero))
+
+ scalar = 0.5
+ k = BlockOperator(*ops)
+ K = scalar * k
+ X = BlockDataContainer(*x) + 1
+
+ Y = K.T.direct(X)
+ self.assertTrue(Y.shape == (1,1))
+ zero = numpy.zeros(X.get_item(0).shape)
+ numpy.testing.assert_array_equal(Y.get_item(0).as_array(),scalar*(len(x)+zero))
+
+
+ def test_ScaledBlockOperatorScalarList(self):
+ ig = [ ImageGeometry(2,3) , \
+ #ImageGeometry(10,20,30) , \
+ ImageGeometry(2,3 ) ]
+ x = [ g.allocate() for g in ig ]
+ ops = [ TomoIdentity(g) for g in ig ]
+
+
+ # test limit as non Scaled
+ scalar = numpy.asarray([1 for _ in x])
+ k = BlockOperator(*ops)
+ K = scalar * k
+ val = 1
+ X = BlockDataContainer(*x) + val
+
+ Y = K.T.direct(X)
+ self.assertTrue(Y.shape == (1,1))
+ zero = numpy.zeros(X.get_item(0).shape)
+ xx = numpy.asarray([val for _ in x])
+ numpy.testing.assert_array_equal(Y.get_item(0).as_array(),(scalar*xx).sum()+zero)
+
+ scalar = numpy.asarray([i+1 for i,el in enumerate(x)])
+ #scalar = numpy.asarray([6,0])
+ k = BlockOperator(*ops)
+ K = scalar * k
+ X = BlockDataContainer(*x) + val
+ Y = K.T.direct(X)
+ self.assertTrue(Y.shape == (1,1))
+ zero = numpy.zeros(X.get_item(0).shape)
+ xx = numpy.asarray([val for _ in x])
+
+
+ numpy.testing.assert_array_equal(Y.get_item(0).as_array(),
+ (scalar*xx).sum()+zero)
+
+
+ def test_TomoIdentity(self):
+ ig = ImageGeometry(10,20,30)
+ img = ig.allocate()
+ self.assertTrue(img.shape == (30,20,10))
+ self.assertEqual(img.sum(), 0)
+ Id = TomoIdentity(ig)
+ y = Id.direct(img)
+ numpy.testing.assert_array_equal(y.as_array(), img.as_array())
+
+ def skiptest_CGLS_tikhonov(self):
+ from ccpi.optimisation.algorithms import CGLS
+
+ from ccpi.plugins.ops import CCPiProjectorSimple
+ from ccpi.optimisation.ops import PowerMethodNonsquare
+ from ccpi.optimisation.ops import TomoIdentity
+ from ccpi.optimisation.funcs import Norm2sq, Norm1
+ from ccpi.framework import ImageGeometry, AcquisitionGeometry
+ from ccpi.optimisation.Algorithms import GradientDescent
+ #from ccpi.optimisation.Algorithms import CGLS
+ import matplotlib.pyplot as plt
+
+
+ # Set up phantom size N x N x vert by creating ImageGeometry, initialising the
+ # ImageData object with this geometry and empty array and finally put some
+ # data into its array, and display one slice as image.
+
+ # Image parameters
+ N = 128
+ vert = 4
+
+ # Set up image geometry
+ ig = ImageGeometry(voxel_num_x=N,
+ voxel_num_y=N,
+ voxel_num_z=vert)
+
+ # Set up empty image data
+ Phantom = ImageData(geometry=ig,
+ dimension_labels=['horizontal_x',
+ 'horizontal_y',
+ 'vertical'])
+ Phantom += 0.05
+ # Populate image data by looping over and filling slices
+ i = 0
+ while i < vert:
+ if vert > 1:
+ x = Phantom.subset(vertical=i).array
+ else:
+ x = Phantom.array
+ x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
+ x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94
+ if vert > 1 :
+ Phantom.fill(x, vertical=i)
+ i += 1
+
+
+ perc = 0.02
+ # Set up empty image data
+ noise = ImageData(numpy.random.normal(loc = 0.04 ,
+ scale = perc ,
+ size = Phantom.shape), geometry=ig,
+ dimension_labels=['horizontal_x',
+ 'horizontal_y',
+ 'vertical'])
+ Phantom += noise
+
+ # Set up AcquisitionGeometry object to hold the parameters of the measurement
+ # setup geometry: # Number of angles, the actual angles from 0 to
+ # pi for parallel beam, set the width of a detector
+ # pixel relative to an object pixe and the number of detector pixels.
+ angles_num = 20
+ det_w = 1.0
+ det_num = N
+
+ angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\
+ 180/numpy.pi
+
+ # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count,
+ # horz detector pixel size, vert detector pixel count,
+ # vert detector pixel size.
+ ag = AcquisitionGeometry('parallel',
+ '3D',
+ angles,
+ N,
+ det_w,
+ vert,
+ det_w)
+
+ # Set up Operator object combining the ImageGeometry and AcquisitionGeometry
+ # wrapping calls to CCPi projector.
+ A = CCPiProjectorSimple(ig, ag)
+
+ # Forward and backprojection are available as methods direct and adjoint. Here
+ # generate test data b and some noise
+
+ b = A.direct(Phantom)
+
+
+ #z = A.adjoint(b)
+
+
+ # Using the test data b, different reconstruction methods can now be set up as
+ # demonstrated in the rest of this file. In general all methods need an initial
+ # guess and some algorithm options to be set. Note that 100 iterations for
+ # some of the methods is a very low number and 1000 or 10000 iterations may be
+ # needed if one wants to obtain a converged solution.
+ x_init = ImageData(geometry=ig,
+ dimension_labels=['horizontal_x','horizontal_y','vertical'])
+ X_init = BlockDataContainer(x_init)
+ B = BlockDataContainer(b,
+ ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
+
+ # setup a tomo identity
+ Ibig = 1e5 * TomoIdentity(geometry=ig)
+ Ismall = 1e-5 * TomoIdentity(geometry=ig)
+
+ # composite operator
+ Kbig = BlockOperator(A, Ibig, shape=(2,1))
+ Ksmall = BlockOperator(A, Ismall, shape=(2,1))
+
+ #out = K.direct(X_init)
+
+ f = Norm2sq(Kbig,B)
+ f.L = 0.00003
+
+ fsmall = Norm2sq(Ksmall,B)
+ f.L = 0.00003
+
+ simplef = Norm2sq(A, b)
+ simplef.L = 0.00003
+
+ gd = GradientDescent( x_init=x_init, objective_function=simplef,
+ rate=simplef.L)
+ gd.max_iteration = 10
+
+ cg = CGLS()
+ cg.set_up(X_init, Kbig, B )
+ cg.max_iteration = 1
+
+ cgsmall = CGLS()
+ cgsmall.set_up(X_init, Ksmall, B )
+ cgsmall.max_iteration = 1
+
+
+ cgs = CGLS()
+ cgs.set_up(x_init, A, b )
+ cgs.max_iteration = 6
+ #
+ #out.__isub__(B)
+ #out2 = K.adjoint(out)
+
+ #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
+
+ #for _ in gd:
+ # print ("iteration {} {}".format(gd.iteration, gd.get_current_loss()))
+
+ #cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)) )
+
+ #cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
+
+ #cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
+ #cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
+ # for _ in cg:
+ # print ("iteration {} {}".format(cg.iteration, cg.get_current_loss()))
+ #
+ # fig = plt.figure()
+ # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
+ # plt.title('Composite CGLS')
+ # plt.show()
+ #
+ # for _ in cgs:
+ # print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
+ #
+ fig = plt.figure()
+ plt.subplot(1,5,1)
+ plt.imshow(Phantom.subset(vertical=0).as_array())
+ plt.title('Simulated Phantom')
+ plt.subplot(1,5,2)
+ plt.imshow(gd.get_output().subset(vertical=0).as_array())
+ plt.title('Simple Gradient Descent')
+ plt.subplot(1,5,3)
+ plt.imshow(cgs.get_output().subset(vertical=0).as_array())
+ plt.title('Simple CGLS')
+ plt.subplot(1,5,4)
+ plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
+ plt.title('Composite CGLS\nbig lambda')
+ plt.subplot(1,5,5)
+ plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array())
+ plt.title('Composite CGLS\nsmall lambda')
+ plt.show()
diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py
index 05f3fe8..47feb95 100755
--- a/Wrappers/Python/test/test_DataContainer.py
+++ b/Wrappers/Python/test/test_DataContainer.py
@@ -174,7 +174,7 @@ class TestDataContainer(unittest.TestCase):
def binary_add(self):
print("Test binary add")
X, Y, Z = 512, 512, 512
- X, Y, Z = 256, 512, 512
+ X, Y, Z = 1024, 512, 512
steps = [timer()]
a = numpy.ones((X, Y, Z), dtype='float32')
steps.append(timer())
@@ -425,6 +425,28 @@ class TestDataContainer(unittest.TestCase):
res = False
print(err)
self.assertTrue(res)
+
+ def test_dot(self):
+ a0 = numpy.asarray([i for i in range(2*3*4)])
+ a1 = numpy.asarray([2*i for i in range(2*3*4)])
+
+
+ ds0 = DataContainer(numpy.reshape(a0,(2,3,4)))
+ ds1 = DataContainer(numpy.reshape(a1,(2,3,4)))
+
+ numpy.testing.assert_equal(ds0.dot(ds1), a0.dot(a1))
+
+ a2 = numpy.asarray([2*i for i in range(2*3*5)])
+ ds2 = DataContainer(numpy.reshape(a2,(2,3,5)))
+
+ # it should fail if the shape is wrong
+ try:
+ ds2.dot(ds0)
+ self.assertTrue(False)
+ except ValueError as ve:
+ self.assertTrue(True)
+
+
def test_ImageData(self):
# create ImageData from geometry
@@ -457,6 +479,49 @@ class TestDataContainer(unittest.TestCase):
pixel_num_h=5, channels=2)
sino = AcquisitionData(geometry=sgeometry)
self.assertEqual(sino.shape, (2, 10, 3, 5))
+ def test_ImageGeometry_allocate(self):
+ vgeometry = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2)
+ image = vgeometry.allocate()
+ self.assertEqual(0,image.as_array()[0][0][0])
+ image = vgeometry.allocate(1)
+ self.assertEqual(1,image.as_array()[0][0][0])
+ default_order = ['channel' , 'horizontal_y' , 'horizontal_x']
+ self.assertEqual(default_order[0], image.dimension_labels[0])
+ self.assertEqual(default_order[1], image.dimension_labels[1])
+ self.assertEqual(default_order[2], image.dimension_labels[2])
+ order = [ 'horizontal_x' , 'horizontal_y', 'channel' ]
+ image = vgeometry.allocate(0,dimension_labels=order)
+ self.assertEqual(order[0], image.dimension_labels[0])
+ self.assertEqual(order[1], image.dimension_labels[1])
+ self.assertEqual(order[2], image.dimension_labels[2])
+ def test_AcquisitionGeometry_allocate(self):
+ ageometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=10),
+ geom_type='parallel', pixel_num_v=3,
+ pixel_num_h=5, channels=2)
+ sino = ageometry.allocate()
+ shape = sino.shape
+ print ("shape", shape)
+ self.assertEqual(0,sino.as_array()[0][0][0][0])
+ self.assertEqual(0,sino.as_array()[shape[0]-1][shape[1]-1][shape[2]-1][shape[3]-1])
+
+ sino = ageometry.allocate(1)
+ self.assertEqual(1,sino.as_array()[0][0][0][0])
+ self.assertEqual(1,sino.as_array()[shape[0]-1][shape[1]-1][shape[2]-1][shape[3]-1])
+ print (sino.dimension_labels, sino.shape, ageometry)
+
+ default_order = ['channel' , ' angle' ,
+ 'vertical' , 'horizontal']
+ self.assertEqual(default_order[0], sino.dimension_labels[0])
+ self.assertEqual(default_order[1], sino.dimension_labels[1])
+ self.assertEqual(default_order[2], sino.dimension_labels[2])
+ self.assertEqual(default_order[3], sino.dimension_labels[3])
+ order = ['vertical' , 'horizontal', 'channel' , 'angle' ]
+ sino = ageometry.allocate(0,dimension_labels=order)
+ print (sino.dimension_labels, sino.shape, ageometry)
+ self.assertEqual(order[0], sino.dimension_labels[0])
+ self.assertEqual(order[1], sino.dimension_labels[1])
+ self.assertEqual(order[2], sino.dimension_labels[2])
+ self.assertEqual(order[2], sino.dimension_labels[2])
def assertNumpyArrayEqual(self, first, second):
res = True
@@ -496,4 +561,4 @@ class TestDataContainer(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
- \ No newline at end of file
+
diff --git a/Wrappers/Python/test/test_DataProcessor.py b/Wrappers/Python/test/test_DataProcessor.py
new file mode 100755
index 0000000..1c1de3a
--- /dev/null
+++ b/Wrappers/Python/test/test_DataProcessor.py
@@ -0,0 +1,76 @@
+import sys
+import unittest
+import numpy
+from ccpi.framework import DataProcessor
+from ccpi.framework import DataContainer
+from ccpi.framework import ImageData
+from ccpi.framework import AcquisitionData
+from ccpi.framework import ImageGeometry
+from ccpi.framework import AcquisitionGeometry
+from timeit import default_timer as timer
+
+from ccpi.framework import AX, CastDataContainer, PixelByPixelDataProcessor
+
+class TestDataProcessor(unittest.TestCase):
+
+ def test_DataProcessorChaining(self):
+ shape = (2,3,4,5)
+ size = shape[0]
+ for i in range(1, len(shape)):
+ size = size * shape[i]
+ #print("a refcount " , sys.getrefcount(a))
+ a = numpy.asarray([i for i in range( size )])
+ a = numpy.reshape(a, shape)
+ ds = DataContainer(a, False, ['X', 'Y','Z' ,'W'])
+ c = ds.subset(['Z','W','X'])
+ arr = c.as_array()
+ #[ 0 60 1 61 2 62 3 63 4 64 5 65 6 66 7 67 8 68 9 69 10 70 11 71
+ # 12 72 13 73 14 74 15 75 16 76 17 77 18 78 19 79]
+
+ ax = AX()
+ ax.scalar = 2
+ ax.set_input(c)
+ #ax.apply()
+ print ("ax in {0} out {1}".format(c.as_array().flatten(),
+ ax.get_output().as_array().flatten()))
+ numpy.testing.assert_array_equal(ax.get_output().as_array(), arr*2)
+
+ cast = CastDataContainer(dtype=numpy.float32)
+ cast.set_input(c)
+ out = cast.get_output()
+ self.assertTrue(out.as_array().dtype == numpy.float32)
+ out *= 0
+ axm = AX()
+ axm.scalar = 0.5
+ axm.set_input(c)
+ axm.get_output(out)
+ numpy.testing.assert_array_equal(out.as_array(), arr*0.5)
+
+ # check out in DataSetProcessor
+ #a = numpy.asarray([i for i in range( size )])
+
+ # create a PixelByPixelDataProcessor
+
+ #define a python function which will take only one input (the pixel value)
+ pyfunc = lambda x: -x if x > 20 else x
+ clip = PixelByPixelDataProcessor()
+ clip.pyfunc = pyfunc
+ clip.set_input(c)
+ #clip.apply()
+ v = clip.get_output().as_array()
+
+ self.assertTrue(v.max() == 19)
+ self.assertTrue(v.min() == -79)
+
+ print ("clip in {0} out {1}".format(c.as_array(), clip.get_output().as_array()))
+
+ #dsp = DataProcessor()
+ #dsp.set_input(ds)
+ #dsp.input = a
+ # pipeline
+
+ chain = AX()
+ chain.scalar = 0.5
+ chain.set_input_processor(ax)
+ print ("chain in {0} out {1}".format(ax.get_output().as_array(), chain.get_output().as_array()))
+ numpy.testing.assert_array_equal(chain.get_output().as_array(), arr) \ No newline at end of file
diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py
new file mode 100644
index 0000000..46e8c7c
--- /dev/null
+++ b/Wrappers/Python/test/test_Operator.py
@@ -0,0 +1,24 @@
+import unittest
+#from ccpi.optimisation.operators import Operator
+from ccpi.optimisation.ops import TomoIdentity
+from ccpi.framework import ImageGeometry, ImageData
+import numpy
+
+class TestOperator(unittest.TestCase):
+ def test_ScaledOperator(self):
+ ig = ImageGeometry(10,20,30)
+ img = ig.allocate()
+ scalar = 0.5
+ sid = scalar * TomoIdentity(ig)
+ numpy.testing.assert_array_equal(scalar * img.as_array(), sid.direct(img).as_array())
+
+
+ def test_TomoIdentity(self):
+ ig = ImageGeometry(10,20,30)
+ img = ig.allocate()
+ self.assertTrue(img.shape == (30,20,10))
+ self.assertEqual(img.sum(), 0)
+ Id = TomoIdentity(ig)
+ y = Id.direct(img)
+ numpy.testing.assert_array_equal(y.as_array(), img.as_array())
+
diff --git a/Wrappers/Python/wip/CGLS_tikhonov.py b/Wrappers/Python/wip/CGLS_tikhonov.py
new file mode 100644
index 0000000..e9bbcd9
--- /dev/null
+++ b/Wrappers/Python/wip/CGLS_tikhonov.py
@@ -0,0 +1,196 @@
+from ccpi.optimisation.algorithms import CGLS
+
+from ccpi.plugins.ops import CCPiProjectorSimple
+from ccpi.optimisation.ops import PowerMethodNonsquare
+from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.funcs import Norm2sq, Norm1
+from ccpi.framework import ImageGeometry, AcquisitionGeometry, ImageData, AcquisitionData
+from ccpi.optimisation.algorithms import GradientDescent
+#from ccpi.optimisation.algorithms import CGLS
+import matplotlib.pyplot as plt
+import numpy
+from ccpi.framework import BlockDataContainer
+from ccpi.optimisation.operators import BlockOperator
+
+# Set up phantom size N x N x vert by creating ImageGeometry, initialising the
+# ImageData object with this geometry and empty array and finally put some
+# data into its array, and display one slice as image.
+
+# Image parameters
+N = 128
+vert = 4
+
+# Set up image geometry
+ig = ImageGeometry(voxel_num_x=N,
+ voxel_num_y=N,
+ voxel_num_z=vert)
+
+# Set up empty image data
+Phantom = ImageData(geometry=ig,
+ dimension_labels=['horizontal_x',
+ 'horizontal_y',
+ 'vertical'])
+Phantom += 0.05
+# Populate image data by looping over and filling slices
+i = 0
+while i < vert:
+ if vert > 1:
+ x = Phantom.subset(vertical=i).array
+ else:
+ x = Phantom.array
+ x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
+ x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94
+ if vert > 1 :
+ Phantom.fill(x, vertical=i)
+ i += 1
+
+
+perc = 0.02
+# Set up empty image data
+noise = ImageData(numpy.random.normal(loc = 0.04 ,
+ scale = perc ,
+ size = Phantom.shape), geometry=ig,
+ dimension_labels=['horizontal_x',
+ 'horizontal_y',
+ 'vertical'])
+Phantom += noise
+
+# Set up AcquisitionGeometry object to hold the parameters of the measurement
+# setup geometry: # Number of angles, the actual angles from 0 to
+# pi for parallel beam, set the width of a detector
+# pixel relative to an object pixe and the number of detector pixels.
+angles_num = 20
+det_w = 1.0
+det_num = N
+
+angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\
+ 180/numpy.pi
+
+# Inputs: Geometry, 2D or 3D, angles, horz detector pixel count,
+# horz detector pixel size, vert detector pixel count,
+# vert detector pixel size.
+ag = AcquisitionGeometry('parallel',
+ '3D',
+ angles,
+ N,
+ det_w,
+ vert,
+ det_w)
+
+# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
+# wrapping calls to CCPi projector.
+A = CCPiProjectorSimple(ig, ag)
+
+# Forward and backprojection are available as methods direct and adjoint. Here
+# generate test data b and some noise
+
+b = A.direct(Phantom)
+
+
+#z = A.adjoint(b)
+
+
+# Using the test data b, different reconstruction methods can now be set up as
+# demonstrated in the rest of this file. In general all methods need an initial
+# guess and some algorithm options to be set. Note that 100 iterations for
+# some of the methods is a very low number and 1000 or 10000 iterations may be
+# needed if one wants to obtain a converged solution.
+x_init = ImageData(geometry=ig,
+ dimension_labels=['horizontal_x','horizontal_y','vertical'])
+X_init = BlockDataContainer(x_init)
+B = BlockDataContainer(b,
+ ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
+
+# setup a tomo identity
+Ibig = 1e5 * TomoIdentity(geometry=ig)
+Ismall = 1e-5 * TomoIdentity(geometry=ig)
+Iok = 1e1 * TomoIdentity(geometry=ig)
+
+# composite operator
+Kbig = BlockOperator(A, Ibig, shape=(2,1))
+Ksmall = BlockOperator(A, Ismall, shape=(2,1))
+Kok = BlockOperator(A, Iok, shape=(2,1))
+
+#out = K.direct(X_init)
+
+f = Norm2sq(Kbig,B)
+f.L = 0.00003
+
+fsmall = Norm2sq(Ksmall,B)
+fsmall.L = 0.00003
+
+fok = Norm2sq(Kok,B)
+fok.L = 0.00003
+
+simplef = Norm2sq(A, b)
+simplef.L = 0.00003
+
+gd = GradientDescent( x_init=x_init, objective_function=simplef,
+ rate=simplef.L)
+gd.max_iteration = 50
+
+Kbig.direct(X_init)
+Kbig.adjoint(B)
+cg = CGLS()
+cg.set_up(X_init, Kbig, B )
+cg.max_iteration = 10
+
+cgsmall = CGLS()
+cgsmall.set_up(X_init, Ksmall, B )
+cgsmall.max_iteration = 10
+
+
+cgs = CGLS()
+cgs.set_up(x_init, A, b )
+cgs.max_iteration = 10
+
+cgok = CGLS()
+cgok.set_up(X_init, Kok, B )
+cgok.max_iteration = 10
+# #
+#out.__isub__(B)
+#out2 = K.adjoint(out)
+
+#(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
+
+for _ in gd:
+ print ("iteration {} {}".format(gd.iteration, gd.get_last_loss()))
+
+cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
+
+cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
+
+cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
+cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
+cgok.run(10, verbose=True)
+# # for _ in cg:
+# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss()))
+# #
+# # fig = plt.figure()
+# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
+# # plt.title('Composite CGLS')
+# # plt.show()
+# #
+# # for _ in cgs:
+# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
+# #
+fig = plt.figure()
+plt.subplot(2,3,1)
+plt.imshow(Phantom.subset(vertical=0).as_array())
+plt.title('Simulated Phantom')
+plt.subplot(2,3,2)
+plt.imshow(gd.get_output().subset(vertical=0).as_array())
+plt.title('Simple Gradient Descent')
+plt.subplot(2,3,3)
+plt.imshow(cgs.get_output().subset(vertical=0).as_array())
+plt.title('Simple CGLS')
+plt.subplot(2,3,5)
+plt.imshow(cg.get_output().get_item(0).subset(vertical=0).as_array())
+plt.title('Composite CGLS\nbig lambda')
+plt.subplot(2,3,6)
+plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=0).as_array())
+plt.title('Composite CGLS\nsmall lambda')
+plt.subplot(2,3,4)
+plt.imshow(cgok.get_output().get_item(0).subset(vertical=0).as_array())
+plt.title('Composite CGLS\nok lambda')
+plt.show()
diff --git a/Wrappers/Python/wip/CreatePhantom.py b/Wrappers/Python/wip/CreatePhantom.py
new file mode 100644
index 0000000..4bf6ea4
--- /dev/null
+++ b/Wrappers/Python/wip/CreatePhantom.py
@@ -0,0 +1,242 @@
+import numpy
+import tomophantom
+from tomophantom import TomoP3D
+from tomophantom.supp.artifacts import ArtifactsClass as Artifact
+import os
+
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import CGLS
+from ccpi.plugins.ops import CCPiProjectorSimple
+from ccpi.optimisation.ops import PowerMethodNonsquare
+from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.funcs import Norm2sq, Norm1
+from ccpi.framework import ImageGeometry, AcquisitionGeometry, ImageData, AcquisitionData
+from ccpi.optimisation.algorithms import GradientDescent
+from ccpi.framework import BlockDataContainer
+from ccpi.optimisation.operators import BlockOperator
+
+
+model = 13 # select a model number from tomophantom library
+N_size = 64 # Define phantom dimensions using a scalar value (cubic phantom)
+path = os.path.dirname(tomophantom.__file__)
+path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
+
+#This will generate a N_size x N_size x N_size phantom (3D)
+phantom_tm = TomoP3D.Model(model, N_size, path_library3D)
+
+# detector column count (horizontal)
+detector_horiz = int(numpy.sqrt(2)*N_size)
+# detector row count (vertical) (no reason for it to be > N)
+detector_vert = N_size
+# number of projection angles
+angles_num = int(0.5*numpy.pi*N_size)
+# angles are expressed in degrees
+angles = numpy.linspace(0.0, 179.9, angles_num, dtype='float32')
+
+
+acquisition_data_array = TomoP3D.ModelSino(model, N_size,
+ detector_horiz, detector_vert,
+ angles,
+ path_library3D)
+
+tomophantom_acquisition_axes_order = ['vertical', 'angle', 'horizontal']
+
+artifacts = Artifact(acquisition_data_array)
+
+
+tp_acq_data = AcquisitionData(artifacts.noise(0.2, 'Gaussian'),
+ dimension_labels=tomophantom_acquisition_axes_order)
+#print ("size", acquisition_data.shape)
+print ("horiz", detector_horiz)
+print ("vert", detector_vert)
+print ("angles", angles_num)
+
+tp_acq_geometry = AcquisitionGeometry(geom_type='parallel', dimension='3D',
+ angles=angles,
+ pixel_num_h=detector_horiz,
+ pixel_num_v=detector_vert,
+ channels=1,
+ )
+
+acq_data = tp_acq_geometry.allocate()
+#print (tp_acq_geometry)
+print ("AcquisitionData", acq_data.shape)
+print ("TomoPhantom", tp_acq_data.shape, tp_acq_data.dimension_labels)
+
+default_acquisition_axes_order = ['angle', 'vertical', 'horizontal']
+
+acq_data2 = tp_acq_data.subset(dimensions=default_acquisition_axes_order)
+print ("AcquisitionData", acq_data2.shape, acq_data2.dimension_labels)
+print ("AcquisitionData {} TomoPhantom {}".format(id(acq_data2.as_array()),
+ id(acquisition_data_array)))
+
+fig = plt.figure()
+plt.subplot(1,2,1)
+plt.imshow(acquisition_data_array[20])
+plt.title('Sinogram')
+plt.subplot(1,2,2)
+plt.imshow(tp_acq_data.as_array()[20])
+plt.title('Sinogram + noise')
+plt.show()
+
+# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
+# wrapping calls to CCPi projector.
+
+ig = ImageGeometry(voxel_num_x=detector_horiz,
+ voxel_num_y=detector_horiz,
+ voxel_num_z=detector_vert)
+A = CCPiProjectorSimple(ig, tp_acq_geometry)
+# Forward and backprojection are available as methods direct and adjoint. Here
+# generate test data b and some noise
+
+#b = A.direct(Phantom)
+b = acq_data2
+
+#z = A.adjoint(b)
+
+
+# Using the test data b, different reconstruction methods can now be set up as
+# demonstrated in the rest of this file. In general all methods need an initial
+# guess and some algorithm options to be set. Note that 100 iterations for
+# some of the methods is a very low number and 1000 or 10000 iterations may be
+# needed if one wants to obtain a converged solution.
+x_init = ImageData(geometry=ig,
+ dimension_labels=['horizontal_x','horizontal_y','vertical'])
+X_init = BlockDataContainer(x_init)
+B = BlockDataContainer(b,
+ ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
+
+# setup a tomo identity
+Ibig = 4e1 * TomoIdentity(geometry=ig)
+Ismall = 1e-3 * TomoIdentity(geometry=ig)
+Iok = 7.6e0 * TomoIdentity(geometry=ig)
+
+# composite operator
+Kbig = BlockOperator(A, Ibig, shape=(2,1))
+Ksmall = BlockOperator(A, Ismall, shape=(2,1))
+Kok = BlockOperator(A, Iok, shape=(2,1))
+
+#out = K.direct(X_init)
+#x0 = x_init.copy()
+#x0.fill(numpy.random.randn(*x0.shape))
+#lipschitz = PowerMethodNonsquare(A, 5, x0)
+#print("lipschitz", lipschitz)
+
+#%%
+
+simplef = Norm2sq(A, b, memopt=False)
+#simplef.L = lipschitz[0]/3000.
+simplef.L = 0.00003
+
+f = Norm2sq(Kbig,B)
+f.L = 0.00003
+
+fsmall = Norm2sq(Ksmall,B)
+fsmall.L = 0.00003
+
+fok = Norm2sq(Kok,B)
+fok.L = 0.00003
+
+print("setup gradient descent")
+gd = GradientDescent( x_init=x_init, objective_function=simplef,
+ rate=simplef.L)
+gd.max_iteration = 5
+simplef2 = Norm2sq(A, b, memopt=True)
+#simplef.L = lipschitz[0]/3000.
+simplef2.L = 0.00003
+print("setup gradient descent")
+gd2 = GradientDescent( x_init=x_init, objective_function=simplef2,
+ rate=simplef2.L)
+gd2.max_iteration = 5
+
+Kbig.direct(X_init)
+Kbig.adjoint(B)
+print("setup CGLS")
+cg = CGLS()
+cg.set_up(X_init, Kbig, B )
+cg.max_iteration = 10
+
+print("setup CGLS")
+cgsmall = CGLS()
+cgsmall.set_up(X_init, Ksmall, B )
+cgsmall.max_iteration = 10
+
+
+print("setup CGLS")
+cgs = CGLS()
+cgs.set_up(x_init, A, b )
+cgs.max_iteration = 10
+
+print("setup CGLS")
+cgok = CGLS()
+cgok.set_up(X_init, Kok, B )
+cgok.max_iteration = 10
+# #
+#out.__isub__(B)
+#out2 = K.adjoint(out)
+
+#(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
+
+
+for _ in gd:
+ print ("GradientDescent iteration {} {}".format(gd.iteration, gd.get_last_loss()))
+#gd2.run(5,verbose=True)
+print("CGLS block lambda big")
+cg.run(10, lambda it,val: print ("CGLS big iteration {} objective {}".format(it,val)))
+
+print("CGLS standard")
+cgs.run(10, lambda it,val: print ("CGLS standard iteration {} objective {}".format(it,val)))
+
+print("CGLS block lambda small")
+cgsmall.run(10, lambda it,val: print ("CGLS small iteration {} objective {}".format(it,val)))
+print("CGLS block lambdaok")
+cgok.run(10, verbose=True)
+# # for _ in cg:
+# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss()))
+# #
+# # fig = plt.figure()
+# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
+# # plt.title('Composite CGLS')
+# # plt.show()
+# #
+# # for _ in cgs:
+# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
+# #
+Phantom = ImageData(phantom_tm)
+
+theslice=40
+
+fig = plt.figure()
+plt.subplot(2,3,1)
+plt.imshow(numpy.flip(Phantom.subset(vertical=theslice).as_array(),axis=0), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Simulated Phantom')
+plt.subplot(2,3,2)
+plt.imshow(gd.get_output().subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Simple Gradient Descent')
+plt.subplot(2,3,3)
+plt.imshow(cgs.get_output().subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Simple CGLS')
+plt.subplot(2,3,5)
+plt.imshow(cg.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Composite CGLS\nbig lambda')
+plt.subplot(2,3,6)
+plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Composite CGLS\nsmall lambda')
+plt.subplot(2,3,4)
+plt.imshow(cgok.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Composite CGLS\nok lambda')
+plt.show()
+
+
+#Ibig = 7e1 * TomoIdentity(geometry=ig)
+#Kbig = BlockOperator(A, Ibig, shape=(2,1))
+#cg2 = CGLS(x_init=X_init, operator=Kbig, data=B)
+#cg2.max_iteration = 10
+#cg2.run(10, verbose=True)
diff --git a/Wrappers/Python/wip/demo_gradient_descent.py b/Wrappers/Python/wip/demo_gradient_descent.py
new file mode 100755
index 0000000..4d6647e
--- /dev/null
+++ b/Wrappers/Python/wip/demo_gradient_descent.py
@@ -0,0 +1,295 @@
+
+from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
+from ccpi.optimisation.algs import FISTA, FBPD, CGLS
+from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D, Norm2
+
+from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity
+from ccpi.optimisation.ops import Identity
+from ccpi.optimisation.ops import FiniteDiff2D
+
+# Requires CVXPY, see http://www.cvxpy.org/
+# CVXPY can be installed in anaconda using
+# conda install -c cvxgrp cvxpy libgcc
+
+# Whether to use or omit CVXPY
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+class Algorithm(object):
+ def __init__(self, *args, **kwargs):
+ pass
+ def set_up(self, *args, **kwargs):
+ raise NotImplementedError()
+ def update(self):
+ raise NotImplementedError()
+
+ def should_stop(self):
+ raise NotImplementedError()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.should_stop():
+ raise StopIteration()
+ else:
+ self.update()
+
+class GradientDescent(Algorithm):
+ x = None
+ rate = 0
+ objective_function = None
+ regulariser = None
+ iteration = 0
+ stop_cryterion = 'max_iter'
+ __max_iteration = 0
+ __loss = []
+ def __init__(self, **kwargs):
+ args = ['x_init', 'objective_function', 'rate']
+ present = True
+ for k,v in kwargs.items():
+ if k in args:
+ args.pop(args.index(k))
+ if len(args) == 0:
+ return self.set_up(x_init=kwargs['x_init'],
+ objective_function=kwargs['objective_function'],
+ rate=kwargs['rate'])
+
+ def should_stop(self):
+ return self.iteration >= self.max_iteration
+
+ def set_up(self, x_init, objective_function, rate):
+ self.x = x_init.copy()
+ self.x_update = x_init.copy()
+ self.objective_function = objective_function
+ self.rate = rate
+ self.__loss.append(objective_function(x_init))
+
+ def update(self):
+
+ self.objective_function.gradient(self.x, out=self.x_update)
+ self.x_update *= -self.rate
+ self.x += self.x_update
+ self.__loss.append(self.objective_function(self.x))
+ self.iteration += 1
+
+ def get_output(self):
+ return self.x
+ def get_current_loss(self):
+ return self.__loss[-1]
+ @property
+ def loss(self):
+ return self.__loss
+ @property
+ def max_iteration(self):
+ return self.__max_iteration
+ @max_iteration.setter
+ def max_iteration(self, value):
+ assert isinstance(value, int)
+ self.__max_iteration = value
+
+
+
+
+
+# Problem data.
+m = 30
+n = 20
+np.random.seed(1)
+Amat = np.random.randn(m, n)
+A = LinearOperatorMatrix(Amat)
+bmat = np.random.randn(m)
+bmat.shape = (bmat.shape[0],1)
+
+# A = Identity()
+# Change n to equal to m.
+
+b = DataContainer(bmat)
+
+# Regularization parameter
+lam = 10
+opt = {'memopt':True}
+# Create object instances with the test data A and b.
+f = Norm2sq(A,b,c=0.5, memopt=True)
+g0 = ZeroFun()
+
+# Initial guess
+x_init = DataContainer(np.zeros((n,1)))
+
+f.grad(x_init)
+
+# Run FISTA for least squares plus zero function.
+x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)
+
+# Print solution and final objective/criterion value for comparison
+print("FISTA least squares plus zero function solution and objective value:")
+print(x_fista0.array)
+print(criter0[-1])
+
+gd = GradientDescent(x_init=x_init, objective_function=f, rate=0.001)
+gd.max_iteration = 5000
+
+for i,el in enumerate(gd):
+ if i%100 == 0:
+ print ("\rIteration {} Loss: {}".format(gd.iteration,
+ gd.get_current_loss()))
+
+
+#%%
+
+
+#
+#if use_cvxpy:
+# # Compare to CVXPY
+#
+# # Construct the problem.
+# x0 = Variable(n)
+# objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) )
+# prob0 = Problem(objective0)
+#
+# # The optimal objective is returned by prob.solve().
+# result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9)
+#
+# # The optimal solution for x is stored in x.value and optimal objective value
+# # is in result as well as in objective.value
+# print("CVXPY least squares plus zero function solution and objective value:")
+# print(x0.value)
+# print(objective0.value)
+#
+## Plot criterion curve to see FISTA converge to same value as CVX.
+#iternum = np.arange(1,1001)
+#plt.figure()
+#plt.loglog(iternum[[0,-1]],[objective0.value, objective0.value], label='CVX LS')
+#plt.loglog(iternum,criter0,label='FISTA LS')
+#plt.legend()
+#plt.show()
+#
+## Create 1-norm object instance
+#g1 = Norm1(lam)
+#
+#g1(x_init)
+#x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1)))
+#x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1)))
+#v = g1.prox(x_rand,0.02)
+##vv = g1.prox(x_rand2,0.02)
+#vv = v.copy()
+#vv *= 0
+#print (">>>>>>>>>>vv" , vv.as_array())
+#vv.fill(v)
+#print (">>>>>>>>>>fill" , vv.as_array())
+#g1.proximal(x_rand, 0.02, out=vv)
+#print (">>>>>>>>>>v" , v.as_array())
+#print (">>>>>>>>>>gradient" , vv.as_array())
+#
+#print (">>>>>>>>>>" , (v-vv).as_array())
+#import sys
+##sys.exit(0)
+## Combine with least squares and solve using generic FISTA implementation
+#x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt)
+#
+## Print for comparison
+#print("FISTA least squares plus 1-norm solution and objective value:")
+#print(x_fista1)
+#print(criter1[-1])
+#
+#if use_cvxpy:
+# # Compare to CVXPY
+#
+# # Construct the problem.
+# x1 = Variable(n)
+# objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
+# prob1 = Problem(objective1)
+#
+# # The optimal objective is returned by prob.solve().
+# result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9)
+#
+# # The optimal solution for x is stored in x.value and optimal objective value
+# # is in result as well as in objective.value
+# print("CVXPY least squares plus 1-norm solution and objective value:")
+# print(x1.value)
+# print(objective1.value)
+#
+## Now try another algorithm FBPD for same problem:
+#x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1)
+#print(x_fbpd1)
+#print(criterfbpd1[-1])
+#
+## Plot criterion curve to see both FISTA and FBPD converge to same value.
+## Note that FISTA is very efficient for 1-norm minimization so it beats
+## FBPD in this test by a lot. But FBPD can handle a larger class of problems
+## than FISTA can.
+#plt.figure()
+#plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
+#plt.loglog(iternum,criter1,label='FISTA LS+1')
+#plt.legend()
+#plt.show()
+#
+#plt.figure()
+#plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
+#plt.loglog(iternum,criter1,label='FISTA LS+1')
+#plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
+#plt.legend()
+#plt.show()
+
+# Now try 1-norm and TV denoising with FBPD, first 1-norm.
+
+# Set up phantom size NxN by creating ImageGeometry, initialising the
+# ImageData object with this geometry and empty array and finally put some
+# data into its array, and display as image.
+N = 64
+ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
+Phantom = ImageData(geometry=ig)
+
+x = Phantom.as_array()
+x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
+x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
+
+plt.imshow(x)
+plt.title('Phantom image')
+plt.show()
+
+# Identity operator for denoising
+I = TomoIdentity(ig)
+
+# Data and add noise
+y = I.direct(Phantom)
+y.array = y.array + 0.1*np.random.randn(N, N)
+
+plt.imshow(y.array)
+plt.title('Noisy image')
+plt.show()
+
+
+###################
+# Data fidelity term
+f_denoise = Norm2sq(I,y,c=0.5,memopt=True)
+
+# 1-norm regulariser
+lam1_denoise = 1.0
+g1_denoise = Norm1(lam1_denoise)
+
+# Initial guess
+x_init_denoise = ImageData(np.zeros((N,N)))
+
+# Combine with least squares and solve using generic FISTA implementation
+x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = \
+ FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
+
+print(x_fista1_denoise)
+print(criter1_denoise[-1])
+
+f_2 =
+gd = GradientDescent(x_init=x_init_denoise,
+ objective_function=f, rate=0.001)
+gd.max_iteration = 5000
+
+for i,el in enumerate(gd):
+ if i%100 == 0:
+ print ("\rIteration {} Loss: {}".format(gd.iteration,
+ gd.get_current_loss()))
+
+plt.imshow(gd.get_output().as_array())
+plt.title('GD image')
+plt.show()
+