summaryrefslogtreecommitdiffstats
path: root/Wrappers
diff options
context:
space:
mode:
authorEdoardo Pasca <edo.paskino@gmail.com>2019-04-10 11:30:28 +0100
committerGitHub <noreply@github.com>2019-04-10 11:30:28 +0100
commit58229d793fc2a24ff65d3128435e4351e3b7e73d (patch)
treea0f9740d9371de94156c0e40f1e98bccf55dfd47 /Wrappers
parente7a0152d17e03df79db6cd3cd50d07202f9d185d (diff)
downloadframework-58229d793fc2a24ff65d3128435e4351e3b7e73d.tar.gz
framework-58229d793fc2a24ff65d3128435e4351e3b7e73d.tar.bz2
framework-58229d793fc2a24ff65d3128435e4351e3b7e73d.tar.xz
framework-58229d793fc2a24ff65d3128435e4351e3b7e73d.zip
Direct adjoint out (#236)
* fix direct out * test blockoperator adjoint and direct with out * add missing method and code for memory optimisation * untrack build directory * fix imports * fixed adjoint without out parameter * fix adjoint operator of Gradient, added test
Diffstat (limited to 'Wrappers')
-rw-r--r--Wrappers/Python/build/lib/ccpi/__init__.py18
-rw-r--r--Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py337
-rw-r--r--Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py38
-rw-r--r--Wrappers/Python/build/lib/ccpi/framework/__init__.py26
-rw-r--r--Wrappers/Python/build/lib/ccpi/framework/framework.py1496
-rw-r--r--Wrappers/Python/build/lib/ccpi/io/__init__.py18
-rw-r--r--Wrappers/Python/build/lib/ccpi/io/reader.py500
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/__init__.py18
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py158
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py87
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py86
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py121
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py76
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py155
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py32
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/algs.py319
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/funcs.py272
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py79
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py69
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py65
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py65
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py92
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py233
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py136
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py98
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py91
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py60
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py13
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py312
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py56
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py223
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py67
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py322
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py186
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py79
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py22
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py30
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py42
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py144
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py118
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py39
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py19
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/ops.py294
-rw-r--r--Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py338
-rw-r--r--Wrappers/Python/build/lib/ccpi/processors.py514
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py4
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/BlockOperator.py91
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py2
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py14
-rw-r--r--Wrappers/Python/test/test_Operator.py318
-rw-r--r--Wrappers/Python/test/test_functions.py2
51 files changed, 397 insertions, 7597 deletions
diff --git a/Wrappers/Python/build/lib/ccpi/__init__.py b/Wrappers/Python/build/lib/ccpi/__init__.py
deleted file mode 100644
index cf2d93d..0000000
--- a/Wrappers/Python/build/lib/ccpi/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py
deleted file mode 100644
index 21ef3f0..0000000
--- a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py
+++ /dev/null
@@ -1,337 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Mar 5 16:04:45 2019
-
-@author: ofn77899
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import numpy
-from numbers import Number
-import functools
-from ccpi.framework import DataContainer
-#from ccpi.framework import AcquisitionData, ImageData
-#from ccpi.optimisation.operators import Operator, LinearOperator
-
-class BlockDataContainer(object):
- '''Class to hold DataContainers as column vector'''
- __array_priority__ = 1
- def __init__(self, *args, **kwargs):
- ''''''
- self.containers = args
- self.index = 0
- #shape = kwargs.get('shape', None)
- #if shape is None:
- # shape = (len(args),1)
- shape = (len(args),1)
- self.shape = shape
- #print (self.shape)
- n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
- if len(args) != n_elements:
- raise ValueError(
- 'Dimension and size do not match: expected {} got {}'
- .format(n_elements, len(args)))
-
-
- def __iter__(self):
- '''BlockDataContainer is Iterable'''
- return self
- def next(self):
- '''python2 backwards compatibility'''
- return self.__next__()
- def __next__(self):
- try:
- out = self[self.index]
- except IndexError as ie:
- raise StopIteration()
- self.index+=1
- return out
-
- def is_compatible(self, other):
- '''basic check if the size of the 2 objects fit'''
-
- for i in range(len(self.containers)):
- if type(self.containers[i])==type(self):
- self = self.containers[i]
-
- if isinstance(other, Number):
- return True
- elif isinstance(other, list):
- for ot in other:
- if not isinstance(ot, (Number,\
- numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\
- numpy.float, numpy.float16, numpy.float32, numpy.float64, \
- numpy.complex)):
- raise ValueError('List/ numpy array can only contain numbers {}'\
- .format(type(ot)))
- return len(self.containers) == len(other)
- elif isinstance(other, numpy.ndarray):
- return len(self.containers) == len(other)
- elif issubclass(other.__class__, DataContainer):
- return self.get_item(0).shape == other.shape
- return len(self.containers) == len(other.containers)
-
- def get_item(self, row):
- if row > self.shape[0]:
- raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
- return self.containers[row]
-
- def __getitem__(self, row):
- return self.get_item(row)
-
- def add(self, other, *args, **kwargs):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for add')
- out = kwargs.get('out', None)
- #print ("args" , *args)
- if isinstance(other, Number):
- return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
- elif issubclass(other.__class__, DataContainer):
- # try to do algebra with one DataContainer. Will raise error if not compatible
- return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape)
-
- return type(self)(
- *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
- shape=self.shape)
-
- def subtract(self, other, *args, **kwargs):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for subtract')
- out = kwargs.get('out', None)
- if isinstance(other, Number):
- return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
- elif issubclass(other.__class__, DataContainer):
- # try to do algebra with one DataContainer. Will raise error if not compatible
- return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
- shape=self.shape)
-
- def multiply(self, other, *args, **kwargs):
- if not self.is_compatible(other):
- raise ValueError('{} Incompatible for multiply'.format(other))
- out = kwargs.get('out', None)
- if isinstance(other, Number):
- return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- elif isinstance(other, list):
- return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
- elif isinstance(other, numpy.ndarray):
- return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
- elif issubclass(other.__class__, DataContainer):
- # try to do algebra with one DataContainer. Will raise error if not compatible
- return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
- shape=self.shape)
-
- def divide(self, other, *args, **kwargs):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for divide')
- out = kwargs.get('out', None)
- if isinstance(other, Number):
- return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
- elif issubclass(other.__class__, DataContainer):
- # try to do algebra with one DataContainer. Will raise error if not compatible
- return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
- shape=self.shape)
-
- def power(self, other, *args, **kwargs):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for power')
- out = kwargs.get('out', None)
- if isinstance(other, Number):
- return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
- return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
-
- def maximum(self,other, *args, **kwargs):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for maximum')
- out = kwargs.get('out', None)
- if isinstance(other, Number):
- return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
- return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
-
- ## unary operations
- def abs(self, *args, **kwargs):
- return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape)
- def sign(self, *args, **kwargs):
- return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape)
- def sqrt(self, *args, **kwargs):
- return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape)
- def conjugate(self, out=None):
- return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape)
-
- ## reductions
- def sum(self, *args, **kwargs):
- return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers])
- def squared_norm(self):
- y = numpy.asarray([el.squared_norm() for el in self.containers])
- return y.sum()
- def norm(self):
- return numpy.sqrt(self.squared_norm())
- def copy(self):
- '''alias of clone'''
- return self.clone()
- def clone(self):
- return type(self)(*[el.copy() for el in self.containers], shape=self.shape)
- def fill(self, x):
- for el,ot in zip(self.containers, x):
- el.fill(ot)
-
- def __add__(self, other):
- return self.add( other )
- # __radd__
-
- def __sub__(self, other):
- return self.subtract( other )
- # __rsub__
-
- def __mul__(self, other):
- return self.multiply(other)
- # __rmul__
-
- def __div__(self, other):
- return self.divide(other)
- # __rdiv__
- def __truediv__(self, other):
- return self.divide(other)
-
- def __pow__(self, other):
- return self.power(other)
- # reverse operand
- def __radd__(self, other):
- '''Reverse addition
-
- to make sure that this method is called rather than the __mul__ of a numpy array
- the class constant __array_priority__ must be set > 0
- https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
- '''
- return self + other
- # __radd__
-
- def __rsub__(self, other):
- '''Reverse subtraction
-
- to make sure that this method is called rather than the __mul__ of a numpy array
- the class constant __array_priority__ must be set > 0
- https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
- '''
- return (-1 * self) + other
- # __rsub__
-
- def __rmul__(self, other):
- '''Reverse multiplication
-
- to make sure that this method is called rather than the __mul__ of a numpy array
- the class constant __array_priority__ must be set > 0
- https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
- '''
- return self * other
- # __rmul__
-
- def __rdiv__(self, other):
- '''Reverse division
-
- to make sure that this method is called rather than the __mul__ of a numpy array
- the class constant __array_priority__ must be set > 0
- https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
- '''
- return pow(self / other, -1)
- # __rdiv__
- def __rtruediv__(self, other):
- '''Reverse truedivision
-
- to make sure that this method is called rather than the __mul__ of a numpy array
- the class constant __array_priority__ must be set > 0
- https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
- '''
- return self.__rdiv__(other)
-
- def __rpow__(self, other):
- '''Reverse power
-
- to make sure that this method is called rather than the __mul__ of a numpy array
- the class constant __array_priority__ must be set > 0
- https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
- '''
- return other.power(self)
-
- def __iadd__(self, other):
- '''Inline addition'''
- if isinstance (other, BlockDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el += ot
- elif isinstance(other, Number):
- for el in self.containers:
- el += other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for __iadd__')
- for el,ot in zip(self.containers, other):
- el += ot
- return self
- # __iadd__
-
- def __isub__(self, other):
- '''Inline subtraction'''
- if isinstance (other, BlockDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el -= ot
- elif isinstance(other, Number):
- for el in self.containers:
- el -= other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for __isub__')
- for el,ot in zip(self.containers, other):
- el -= ot
- return self
- # __isub__
-
- def __imul__(self, other):
- '''Inline multiplication'''
- if isinstance (other, BlockDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el *= ot
- elif isinstance(other, Number):
- for el in self.containers:
- el *= other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for __imul__')
- for el,ot in zip(self.containers, other):
- el *= ot
- return self
- # __imul__
-
- def __idiv__(self, other):
- '''Inline division'''
- if isinstance (other, BlockDataContainer):
- for el,ot in zip(self.containers, other.containers):
- el /= ot
- elif isinstance(other, Number):
- for el in self.containers:
- el /= other
- elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- if not self.is_compatible(other):
- raise ValueError('Incompatible for __idiv__')
- for el,ot in zip(self.containers, other):
- el /= ot
- return self
- # __rdiv__
- def __itruediv__(self, other):
- '''Inline truedivision'''
- return self.__idiv__(other)
-
diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py
deleted file mode 100644
index 0f43155..0000000
--- a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import numpy
-from numbers import Number
-import functools
-from ccpi.framework import BlockDataContainer
-#from ccpi.optimisation.operators import Operator, LinearOperator
-
-class BlockGeometry(object):
- '''Class to hold Geometry as column vector'''
- #__array_priority__ = 1
- def __init__(self, *args, **kwargs):
- ''''''
- self.geometries = args
- self.index = 0
- #shape = kwargs.get('shape', None)
- #if shape is None:
- # shape = (len(args),1)
- shape = (len(args),1)
- self.shape = shape
- #print (self.shape)
- n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
- if len(args) != n_elements:
- raise ValueError(
- 'Dimension and size do not match: expected {} got {}'
- .format(n_elements, len(args)))
-
- def get_item(self, index):
- '''returns the Geometry in the BlockGeometry located at position index'''
- return self.geometries[index]
-
- def allocate(self, value=0, dimension_labels=None):
- containers = [geom.allocate(value) for geom in self.geometries]
- return BlockDataContainer(*containers)
-
diff --git a/Wrappers/Python/build/lib/ccpi/framework/__init__.py b/Wrappers/Python/build/lib/ccpi/framework/__init__.py
deleted file mode 100644
index 229edb5..0000000
--- a/Wrappers/Python/build/lib/ccpi/framework/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Mar 5 16:00:18 2019
-
-@author: ofn77899
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import numpy
-import sys
-from datetime import timedelta, datetime
-import warnings
-from functools import reduce
-
-
-from .framework import DataContainer
-from .framework import ImageData, AcquisitionData
-from .framework import ImageGeometry, AcquisitionGeometry
-from .framework import find_key, message
-from .framework import DataProcessor
-from .framework import AX, PixelByPixelDataProcessor, CastDataContainer
-from .BlockDataContainer import BlockDataContainer
-from .BlockGeometry import BlockGeometry
diff --git a/Wrappers/Python/build/lib/ccpi/framework/framework.py b/Wrappers/Python/build/lib/ccpi/framework/framework.py
deleted file mode 100644
index 07c2ead..0000000
--- a/Wrappers/Python/build/lib/ccpi/framework/framework.py
+++ /dev/null
@@ -1,1496 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import numpy
-import sys
-from datetime import timedelta, datetime
-import warnings
-from functools import reduce
-from numbers import Number
-
-
-def find_key(dic, val):
- """return the key of dictionary dic given the value"""
- return [k for k, v in dic.items() if v == val][0]
-
-def message(cls, msg, *args):
- msg = "{0}: " + msg
- for i in range(len(args)):
- msg += " {%d}" %(i+1)
- args = list(args)
- args.insert(0, cls.__name__ )
-
- return msg.format(*args )
-
-
-class ImageGeometry(object):
- RANDOM = 'random'
- RANDOM_INT = 'random_int'
- CHANNEL = 'channel'
- ANGLE = 'angle'
- VERTICAL = 'vertical'
- HORIZONTAL_X = 'horizontal_x'
- HORIZONTAL_Y = 'horizontal_y'
-
- def __init__(self,
- voxel_num_x=0,
- voxel_num_y=0,
- voxel_num_z=0,
- voxel_size_x=1,
- voxel_size_y=1,
- voxel_size_z=1,
- center_x=0,
- center_y=0,
- center_z=0,
- channels=1):
-
- self.voxel_num_x = voxel_num_x
- self.voxel_num_y = voxel_num_y
- self.voxel_num_z = voxel_num_z
- self.voxel_size_x = voxel_size_x
- self.voxel_size_y = voxel_size_y
- self.voxel_size_z = voxel_size_z
- self.center_x = center_x
- self.center_y = center_y
- self.center_z = center_z
- self.channels = channels
-
- # this is some code repetition
- if self.channels > 1:
- if self.voxel_num_z>1:
- self.length = 4
- self.shape = (self.channels, self.voxel_num_z, self.voxel_num_y, self.voxel_num_x)
- dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL,
- ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X]
- else:
- self.length = 3
- self.shape = (self.channels, self.voxel_num_y, self.voxel_num_x)
- dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X]
- else:
- if self.voxel_num_z>1:
- self.length = 3
- self.shape = (self.voxel_num_z, self.voxel_num_y, self.voxel_num_x)
- dim_labels = [ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
- else:
- self.length = 2
- self.shape = (self.voxel_num_y, self.voxel_num_x)
- dim_labels = [ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X]
-
- self.dimension_labels = dim_labels
-
- def get_min_x(self):
- return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x
-
- def get_max_x(self):
- return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x
-
- def get_min_y(self):
- return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y
-
- def get_max_y(self):
- return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y
-
- def get_min_z(self):
- if not self.voxel_num_z == 0:
- return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z
- else:
- return 0
-
- def get_max_z(self):
- if not self.voxel_num_z == 0:
- return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z
- else:
- return 0
-
- def clone(self):
- '''returns a copy of ImageGeometry'''
- return ImageGeometry(
- self.voxel_num_x,
- self.voxel_num_y,
- self.voxel_num_z,
- self.voxel_size_x,
- self.voxel_size_y,
- self.voxel_size_z,
- self.center_x,
- self.center_y,
- self.center_z,
- self.channels)
- def __str__ (self):
- repres = ""
- repres += "Number of channels: {0}\n".format(self.channels)
- repres += "voxel_num : x{0},y{1},z{2}\n".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z)
- repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z)
- repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z)
- return repres
- def allocate(self, value=0, dimension_labels=None, **kwargs):
- '''allocates an ImageData according to the size expressed in the instance'''
- out = ImageData(geometry=self)
- if isinstance(value, Number):
- if value != 0:
- out += value
- else:
- if value == ImageGeometry.RANDOM:
- seed = kwargs.get('seed', None)
- if seed is not None:
- numpy.random.seed(seed)
- out.fill(numpy.random.random_sample(self.shape))
- elif value == ImageGeometry.RANDOM_INT:
- seed = kwargs.get('seed', None)
- if seed is not None:
- numpy.random.seed(seed)
- max_value = kwargs.get('max_value', 100)
- out.fill(numpy.random.randint(max_value,size=self.shape))
- else:
- raise ValueError('Value {} unknown'.format(value))
- if dimension_labels is not None:
- if dimension_labels != self.dimension_labels:
- return out.subset(dimensions=dimension_labels)
- return out
- # The following methods return 2 members of the class, therefore I
- # don't think we need to implement them.
- # Additionally using __len__ is confusing as one would think this is
- # an iterable.
- #def __len__(self):
- # '''returns the length of the geometry'''
- # return self.length
- #def shape(self):
- # '''Returns the shape of the array of the ImageData it describes'''
- # return self.shape
-
-class AcquisitionGeometry(object):
- RANDOM = 'random'
- RANDOM_INT = 'random_int'
- ANGLE_UNIT = 'angle_unit'
- DEGREE = 'degree'
- RADIAN = 'radian'
- CHANNEL = 'channel'
- ANGLE = 'angle'
- VERTICAL = 'vertical'
- HORIZONTAL = 'horizontal'
- def __init__(self,
- geom_type,
- dimension,
- angles,
- pixel_num_h=0,
- pixel_size_h=1,
- pixel_num_v=0,
- pixel_size_v=1,
- dist_source_center=None,
- dist_center_detector=None,
- channels=1,
- **kwargs
- ):
- """
- General inputs for standard type projection geometries
- detectorDomain or detectorpixelSize:
- If 2D
- If scalar: Width of detector or single detector pixel
- If 2-vec: Error
- If 3D
- If scalar: Width in both dimensions
- If 2-vec: Vertical then horizontal size
- grid
- If 2D
- If scalar: number of detectors
- If 2-vec: error
- If 3D
- If scalar: Square grid that size
- If 2-vec vertical then horizontal size
- cone or parallel
- 2D or 3D
- parallel_parameters: ?
- cone_parameters:
- source_to_center_dist (if parallel: NaN)
- center_to_detector_dist (if parallel: NaN)
- standard or nonstandard (vec) geometry
- angles
- angles_format radians or degrees
- """
- self.geom_type = geom_type # 'parallel' or 'cone'
- self.dimension = dimension # 2D or 3D
- self.angles = angles
- num_of_angles = len (angles)
-
- self.dist_source_center = dist_source_center
- self.dist_center_detector = dist_center_detector
-
- self.pixel_num_h = pixel_num_h
- self.pixel_size_h = pixel_size_h
- self.pixel_num_v = pixel_num_v
- self.pixel_size_v = pixel_size_v
-
- self.channels = channels
- self.angle_unit=kwargs.get(AcquisitionGeometry.ANGLE_UNIT,
- AcquisitionGeometry.DEGREE)
- if channels > 1:
- if pixel_num_v > 1:
- shape = (channels, num_of_angles , pixel_num_v, pixel_num_h)
- dim_labels = [AcquisitionGeometry.CHANNEL ,
- AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL ,
- AcquisitionGeometry.HORIZONTAL]
- else:
- shape = (channels , num_of_angles, pixel_num_h)
- dim_labels = [AcquisitionGeometry.CHANNEL ,
- AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL]
- else:
- if pixel_num_v > 1:
- shape = (num_of_angles, pixel_num_v, pixel_num_h)
- dim_labels = [AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL ,
- AcquisitionGeometry.HORIZONTAL]
- else:
- shape = (num_of_angles, pixel_num_h)
- dim_labels = [AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL]
- self.shape = shape
-
- self.dimension_labels = dim_labels
-
- def clone(self):
- '''returns a copy of the AcquisitionGeometry'''
- return AcquisitionGeometry(self.geom_type,
- self.dimension,
- self.angles,
- self.pixel_num_h,
- self.pixel_size_h,
- self.pixel_num_v,
- self.pixel_size_v,
- self.dist_source_center,
- self.dist_center_detector,
- self.channels)
-
- def __str__ (self):
- repres = ""
- repres += "Number of dimensions: {0}\n".format(self.dimension)
- repres += "angles: {0}\n".format(self.angles)
- repres += "voxel_num : h{0},v{1}\n".format(self.pixel_num_h, self.pixel_num_v)
- repres += "voxel size: h{0},v{1}\n".format(self.pixel_size_h, self.pixel_size_v)
- repres += "geometry type: {0}\n".format(self.geom_type)
- repres += "distance source-detector: {0}\n".format(self.dist_source_center)
- repres += "distance center-detector: {0}\n".format(self.dist_source_center)
- repres += "number of channels: {0}\n".format(self.channels)
- return repres
- def allocate(self, value=0, dimension_labels=None):
- '''allocates an AcquisitionData according to the size expressed in the instance'''
- out = AcquisitionData(geometry=self)
- if isinstance(value, Number):
- if value != 0:
- out += value
- else:
- if value == AcquisitionData.RANDOM:
- seed = kwargs.get('seed', None)
- if seed is not None:
- numpy.random.seed(seed)
- out.fill(numpy.random.random_sample(self.shape))
- elif value == AcquisitionData.RANDOM_INT:
- seed = kwargs.get('seed', None)
- if seed is not None:
- numpy.random.seed(seed)
- max_value = kwargs.get('max_value', 100)
- out.fill(numpy.random.randint(max_value,size=self.shape))
- else:
- raise ValueError('Value {} unknown'.format(value))
- if dimension_labels is not None:
- if dimension_labels != self.dimension_labels:
- return out.subset(dimensions=dimension_labels)
- return out
-
-class DataContainer(object):
- '''Generic class to hold data
-
- Data is currently held in a numpy arrays'''
-
- def __init__ (self, array, deep_copy=True, dimension_labels=None,
- **kwargs):
- '''Holds the data'''
-
- self.shape = numpy.shape(array)
- self.number_of_dimensions = len (self.shape)
- self.dimension_labels = {}
- self.geometry = None # Only relevant for AcquisitionData and ImageData
-
- if dimension_labels is not None and \
- len (dimension_labels) == self.number_of_dimensions:
- for i in range(self.number_of_dimensions):
- self.dimension_labels[i] = dimension_labels[i]
- else:
- for i in range(self.number_of_dimensions):
- self.dimension_labels[i] = 'dimension_{0:02}'.format(i)
-
- if type(array) == numpy.ndarray:
- if deep_copy:
- self.array = array.copy()
- else:
- self.array = array
- else:
- raise TypeError('Array must be NumpyArray, passed {0}'\
- .format(type(array)))
-
- # finally copy the geometry
- if 'geometry' in kwargs.keys():
- self.geometry = kwargs['geometry']
- else:
- # assume it is parallel beam
- pass
-
- def get_dimension_size(self, dimension_label):
- if dimension_label in self.dimension_labels.values():
- acq_size = -1
- for k,v in self.dimension_labels.items():
- if v == dimension_label:
- acq_size = self.shape[k]
- return acq_size
- else:
- raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label,
- self.dimension_labels))
- def get_dimension_axis(self, dimension_label):
- if dimension_label in self.dimension_labels.values():
- for k,v in self.dimension_labels.items():
- if v == dimension_label:
- return k
- else:
- raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label,
- self.dimension_labels.values()))
-
-
- def as_array(self, dimensions=None):
- '''Returns the DataContainer as Numpy Array
-
- Returns the pointer to the array if dimensions is not set.
- If dimensions is set, it first creates a new DataContainer with the subset
- and then it returns the pointer to the array'''
- if dimensions is not None:
- return self.subset(dimensions).as_array()
- return self.array
-
-
- def subset(self, dimensions=None, **kw):
- '''Creates a DataContainer containing a subset of self according to the
- labels in dimensions'''
- if dimensions is None:
- if kw == {}:
- return self.array.copy()
- else:
- reduced_dims = [v for k,v in self.dimension_labels.items()]
- for dim_l, dim_v in kw.items():
- for k,v in self.dimension_labels.items():
- if v == dim_l:
- reduced_dims.pop(k)
- return self.subset(dimensions=reduced_dims, **kw)
- else:
- # check that all the requested dimensions are in the array
- # this is done by checking the dimension_labels
- proceed = True
- unknown_key = ''
- # axis_order contains the order of the axis that the user wants
- # in the output DataContainer
- axis_order = []
- if type(dimensions) == list:
- for dl in dimensions:
- if dl not in self.dimension_labels.values():
- proceed = False
- unknown_key = dl
- break
- else:
- axis_order.append(find_key(self.dimension_labels, dl))
- if not proceed:
- raise KeyError('Subset error: Unknown key specified {0}'.format(dl))
-
- # slice away the unwanted data from the array
- unwanted_dimensions = self.dimension_labels.copy()
- left_dimensions = []
- for ax in sorted(axis_order):
- this_dimension = unwanted_dimensions.pop(ax)
- left_dimensions.append(this_dimension)
- #print ("unwanted_dimensions {0}".format(unwanted_dimensions))
- #print ("left_dimensions {0}".format(left_dimensions))
- #new_shape = [self.shape[ax] for ax in axis_order]
- #print ("new_shape {0}".format(new_shape))
- command = "self.array["
- for i in range(self.number_of_dimensions):
- if self.dimension_labels[i] in unwanted_dimensions.values():
- value = 0
- for k,v in kw.items():
- if k == self.dimension_labels[i]:
- value = v
-
- command = command + str(value)
- else:
- command = command + ":"
- if i < self.number_of_dimensions -1:
- command = command + ','
- command = command + ']'
-
- cleaned = eval(command)
- # cleaned has collapsed dimensions in the same order of
- # self.array, but we want it in the order stated in the
- # "dimensions".
- # create axes order for numpy.transpose
- axes = []
- for key in dimensions:
- #print ("key {0}".format( key))
- for i in range(len( left_dimensions )):
- ld = left_dimensions[i]
- #print ("ld {0}".format( ld))
- if ld == key:
- axes.append(i)
- #print ("axes {0}".format(axes))
-
- cleaned = numpy.transpose(cleaned, axes).copy()
-
- return type(self)(cleaned , True, dimensions)
-
- def fill(self, array, **dimension):
- '''fills the internal numpy array with the one provided'''
- if dimension == {}:
- if issubclass(type(array), DataContainer) or\
- issubclass(type(array), numpy.ndarray):
- if array.shape != self.shape:
- raise ValueError('Cannot fill with the provided array.' + \
- 'Expecting {0} got {1}'.format(
- self.shape,array.shape))
- if issubclass(type(array), DataContainer):
- numpy.copyto(self.array, array.array)
- else:
- #self.array[:] = array
- numpy.copyto(self.array, array)
- else:
-
- command = 'self.array['
- i = 0
- for k,v in self.dimension_labels.items():
- for dim_label, dim_value in dimension.items():
- if dim_label == v:
- command = command + str(dim_value)
- else:
- command = command + ":"
- if i < self.number_of_dimensions -1:
- command = command + ','
- i += 1
- command = command + "] = array[:]"
- exec(command)
-
-
- def check_dimensions(self, other):
- return self.shape == other.shape
-
- ## algebra
- def __add__(self, other, *args, **kwargs):
- out = kwargs.get('out', None)
-
- if issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- out = self.as_array() + other.as_array()
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise ValueError('Wrong shape: {0} and {1}'.format(self.shape,
- other.shape))
- elif isinstance(other, (int, float, complex)):
- return type(self)(
- self.as_array() + other,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise TypeError('Cannot {0} DataContainer with {1}'.format("add" ,
- type(other)))
- # __add__
-
- def __sub__(self, other):
- if issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- out = self.as_array() - other.as_array()
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape,
- other.shape))
- elif isinstance(other, (int, float, complex)):
- return type(self)(self.as_array() - other,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" ,
- type(other)))
- # __sub__
- def __truediv__(self,other):
- return self.__div__(other)
-
- def __div__(self, other):
- if issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- out = self.as_array() / other.as_array()
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape,
- other.shape))
- elif isinstance(other, (int, float, complex)):
- return type(self)(self.as_array() / other,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" ,
- type(other)))
- # __div__
-
- def __pow__(self, other):
- if issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- out = self.as_array() ** other.as_array()
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape,
- other.shape))
- elif isinstance(other, (int, float, complex)):
- return type(self)(self.as_array() ** other,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" ,
- type(other)))
- # __pow__
-
- def __mul__(self, other):
- if issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- out = self.as_array() * other.as_array()
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape,
- other.shape))
- elif isinstance(other, (int, float, complex,\
- numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\
- numpy.float, numpy.float16, numpy.float32, numpy.float64, \
- numpy.complex)):
- return type(self)(self.as_array() * other,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
-
- else:
- raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" ,
- type(other)))
- # __mul__
-
- # reverse operand
- def __radd__(self, other):
- return self + other
- # __radd__
-
- def __rsub__(self, other):
- return (-1 * self) + other
- # __rsub__
-
- def __rmul__(self, other):
- return self * other
- # __rmul__
-
- def __rdiv__(self, other):
- print ("call __rdiv__")
- return pow(self / other, -1)
- # __rdiv__
- def __rtruediv__(self, other):
- return self.__rdiv__(other)
-
- def __rpow__(self, other):
- if isinstance(other, (int, float)) :
- fother = numpy.ones(numpy.shape(self.array)) * other
- return type(self)(fother ** self.array ,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- elif issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- return type(self)(other.as_array() ** self.array ,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- else:
- raise ValueError('Dimensions do not match')
- # __rpow__
-
- # in-place arithmetic operators:
- # (+=, -=, *=, /= , //=,
- # must return self
-
-
-
- def __iadd__(self, other):
- if isinstance(other, (int, float)) :
- numpy.add(self.array, other, out=self.array)
- elif issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- numpy.add(self.array, other.array, out=self.array)
- else:
- raise ValueError('Dimensions do not match')
- return self
- # __iadd__
-
- def __imul__(self, other):
- if isinstance(other, (int, float)) :
- arr = self.as_array()
- numpy.multiply(arr, other, out=arr)
- elif issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- numpy.multiply(self.array, other.array, out=self.array)
- else:
- raise ValueError('Dimensions do not match')
- return self
- # __imul__
-
- def __isub__(self, other):
- if isinstance(other, (int, float)) :
- numpy.subtract(self.array, other, out=self.array)
- elif issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- numpy.subtract(self.array, other.array, out=self.array)
- else:
- raise ValueError('Dimensions do not match')
- return self
- # __isub__
-
- def __idiv__(self, other):
- return self.__itruediv__(other)
- def __itruediv__(self, other):
- if isinstance(other, (int, float)) :
- numpy.divide(self.array, other, out=self.array)
- elif issubclass(type(other), DataContainer):
- if self.check_dimensions(other):
- numpy.divide(self.array, other.array, out=self.array)
- else:
- raise ValueError('Dimensions do not match')
- return self
- # __idiv__
-
- def __str__ (self, representation=False):
- repres = ""
- repres += "Number of dimensions: {0}\n".format(self.number_of_dimensions)
- repres += "Shape: {0}\n".format(self.shape)
- repres += "Axis labels: {0}\n".format(self.dimension_labels)
- if representation:
- repres += "Representation: \n{0}\n".format(self.array)
- return repres
-
- def clone(self):
- '''returns a copy of itself'''
-
- return type(self)(self.array,
- dimension_labels=self.dimension_labels,
- deep_copy=True,
- geometry=self.geometry )
-
- def get_data_axes_order(self,new_order=None):
- '''returns the axes label of self as a list
-
- if new_order is None returns the labels of the axes as a sorted-by-key list
- if new_order is a list of length number_of_dimensions, returns a list
- with the indices of the axes in new_order with respect to those in
- self.dimension_labels: i.e.
- self.dimension_labels = {0:'horizontal',1:'vertical'}
- new_order = ['vertical','horizontal']
- returns [1,0]
- '''
- if new_order is None:
-
- axes_order = [i for i in range(len(self.shape))]
- for k,v in self.dimension_labels.items():
- axes_order[k] = v
- return axes_order
- else:
- if len(new_order) == self.number_of_dimensions:
- axes_order = [i for i in range(self.number_of_dimensions)]
-
- for i in range(len(self.shape)):
- found = False
- for k,v in self.dimension_labels.items():
- if new_order[i] == v:
- axes_order[i] = k
- found = True
- if not found:
- raise ValueError('Axis label {0} not found.'.format(new_order[i]))
- return axes_order
- else:
- raise ValueError('Expecting {0} axes, got {2}'\
- .format(len(self.shape),len(new_order)))
-
-
- def copy(self):
- '''alias of clone'''
- return self.clone()
-
- ## binary operations
-
- def pixel_wise_binary(self, pwop, x2, *args, **kwargs):
- out = kwargs.get('out', None)
- if out is None:
- if isinstance(x2, (int, float, complex)):
- out = pwop(self.as_array() , x2 , *args, **kwargs )
- elif isinstance(x2, (numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\
- numpy.float, numpy.float16, numpy.float32, numpy.float64, \
- numpy.complex)):
- out = pwop(self.as_array() , x2 , *args, **kwargs )
- elif issubclass(type(x2) , DataContainer):
- out = pwop(self.as_array() , x2.as_array() , *args, **kwargs )
- return type(self)(out,
- deep_copy=False,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
-
-
- elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer):
- if self.check_dimensions(out) and self.check_dimensions(x2):
- kwargs['out'] = out.as_array()
- pwop(self.as_array(), x2.as_array(), *args, **kwargs )
- #return type(self)(out.as_array(),
- # deep_copy=False,
- # dimension_labels=self.dimension_labels,
- # geometry=self.geometry)
- return out
- else:
- raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
- elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)):
- if self.check_dimensions(out):
- kwargs['out']=out.as_array()
- pwop(self.as_array(), x2, *args, **kwargs )
- return out
- else:
- raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
- elif issubclass(type(out), numpy.ndarray):
- if self.array.shape == out.shape and self.array.dtype == out.dtype:
- kwargs['out'] = out
- pwop(self.as_array(), x2, *args, **kwargs)
- #return type(self)(out,
- # deep_copy=False,
- # dimension_labels=self.dimension_labels,
- # geometry=self.geometry)
- else:
- raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out)))
-
- def add(self, other, *args, **kwargs):
- return self.pixel_wise_binary(numpy.add, other, *args, **kwargs)
-
- def subtract(self, other, *args, **kwargs):
- return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs)
-
- def multiply(self, other, *args, **kwargs):
- return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs)
-
- def divide(self, other, *args, **kwargs):
- return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs)
-
- def power(self, other, *args, **kwargs):
- return self.pixel_wise_binary(numpy.power, other, *args, **kwargs)
-
- def maximum(self, x2, *args, **kwargs):
- return self.pixel_wise_binary(numpy.maximum, x2, *args, **kwargs)
-
- ## unary operations
- def pixel_wise_unary(self, pwop, *args, **kwargs):
- out = kwargs.get('out', None)
- if out is None:
- out = pwop(self.as_array() , *args, **kwargs )
- return type(self)(out,
- deep_copy=False,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
- elif issubclass(type(out), DataContainer):
- if self.check_dimensions(out):
- kwargs['out'] = out.as_array()
- pwop(self.as_array(), *args, **kwargs )
- else:
- raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
- elif issubclass(type(out), numpy.ndarray):
- if self.array.shape == out.shape and self.array.dtype == out.dtype:
- kwargs['out'] = out
- pwop(self.as_array(), *args, **kwargs)
- else:
- raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out)))
-
- def abs(self, *args, **kwargs):
- return self.pixel_wise_unary(numpy.abs, *args, **kwargs)
-
- def sign(self, *args, **kwargs):
- return self.pixel_wise_unary(numpy.sign, *args, **kwargs)
-
- def sqrt(self, *args, **kwargs):
- return self.pixel_wise_unary(numpy.sqrt, *args, **kwargs)
-
- def conjugate(self, *args, **kwargs):
- return self.pixel_wise_unary(numpy.conjugate, *args, **kwargs)
- #def __abs__(self):
- # operation = FM.OPERATION.ABS
- # return self.callFieldMath(operation, None, self.mask, self.maskOnValue)
- # __abs__
-
- ## reductions
- def sum(self, *args, **kwargs):
- return self.as_array().sum(*args, **kwargs)
- def squared_norm(self):
- '''return the squared euclidean norm of the DataContainer viewed as a vector'''
- #shape = self.shape
- #size = reduce(lambda x,y:x*y, shape, 1)
- #y = numpy.reshape(self.as_array(), (size, ))
- return self.dot(self.conjugate())
- #return self.dot(self)
- def norm(self):
- '''return the euclidean norm of the DataContainer viewed as a vector'''
- return numpy.sqrt(self.squared_norm())
- def dot(self, other, *args, **kwargs):
- '''return the inner product of 2 DataContainers viewed as vectors'''
- if self.shape == other.shape:
- return numpy.dot(self.as_array().ravel(), other.as_array().ravel())
- else:
- raise ValueError('Shapes are not aligned: {} != {}'.format(self.shape, other.shape))
-
-
-
-
-
-class ImageData(DataContainer):
- '''DataContainer for holding 2D or 3D DataContainer'''
-
- def __init__(self,
- array = None,
- deep_copy=False,
- dimension_labels=None,
- **kwargs):
-
- self.geometry = None
- if array is None:
- if 'geometry' in kwargs.keys():
- geometry = kwargs['geometry']
- self.geometry = geometry
- channels = geometry.channels
- horiz_x = geometry.voxel_num_x
- horiz_y = geometry.voxel_num_y
- vert = 1 if geometry.voxel_num_z is None\
- else geometry.voxel_num_z # this should be 1 for 2D
- if dimension_labels is None:
- if channels > 1:
- if vert > 1:
- shape = (channels, vert, horiz_y, horiz_x)
- dim_labels = [ImageGeometry.CHANNEL,
- ImageGeometry.VERTICAL,
- ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
- else:
- shape = (channels , horiz_y, horiz_x)
- dim_labels = [ImageGeometry.CHANNEL,
- ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
- else:
- if vert > 1:
- shape = (vert, horiz_y, horiz_x)
- dim_labels = [ImageGeometry.VERTICAL,
- ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
- else:
- shape = (horiz_y, horiz_x)
- dim_labels = [ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
- dimension_labels = dim_labels
- else:
- shape = []
- for dim in dimension_labels:
- if dim == ImageGeometry.CHANNEL:
- shape.append(channels)
- elif dim == ImageGeometry.HORIZONTAL_Y:
- shape.append(horiz_y)
- elif dim == ImageGeometry.VERTICAL:
- shape.append(vert)
- elif dim == ImageGeometry.HORIZONTAL_X:
- shape.append(horiz_x)
- if len(shape) != len(dimension_labels):
- raise ValueError('Missing {0} axes'.format(
- len(dimension_labels) - len(shape)))
- shape = tuple(shape)
-
- array = numpy.zeros( shape , dtype=numpy.float32)
- super(ImageData, self).__init__(array, deep_copy,
- dimension_labels, **kwargs)
-
- else:
- raise ValueError('Please pass either a DataContainer, ' +\
- 'a numpy array or a geometry')
- else:
- if issubclass(type(array) , DataContainer):
- # if the array is a DataContainer get the info from there
- if not ( array.number_of_dimensions == 2 or \
- array.number_of_dimensions == 3 or \
- array.number_of_dimensions == 4):
- raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\
- .format(array.number_of_dimensions))
-
- #DataContainer.__init__(self, array.as_array(), deep_copy,
- # array.dimension_labels, **kwargs)
- super(ImageData, self).__init__(array.as_array(), deep_copy,
- array.dimension_labels, **kwargs)
- elif issubclass(type(array) , numpy.ndarray):
- if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ):
- raise ValueError(
- 'Number of dimensions are not 2 or 3 or 4 : {0}'\
- .format(array.ndim))
-
- if dimension_labels is None:
- if array.ndim == 4:
- dimension_labels = [ImageGeometry.CHANNEL,
- ImageGeometry.VERTICAL,
- ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
- elif array.ndim == 3:
- dimension_labels = [ImageGeometry.VERTICAL,
- ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
- else:
- dimension_labels = [ ImageGeometry.HORIZONTAL_Y,
- ImageGeometry.HORIZONTAL_X]
-
- #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs)
- super(ImageData, self).__init__(array, deep_copy,
- dimension_labels, **kwargs)
-
- # load metadata from kwargs if present
- for key, value in kwargs.items():
- if (type(value) == list or type(value) == tuple) and \
- ( len (value) == 3 and len (value) == 2) :
- if key == 'origin' :
- self.origin = value
- if key == 'spacing' :
- self.spacing = value
-
- def subset(self, dimensions=None, **kw):
- # FIXME: this is clearly not rigth
- # it should be something like
- # out = DataContainer.subset(self, dimensions, **kw)
- # followed by regeneration of the proper geometry.
- out = super(ImageData, self).subset(dimensions, **kw)
- #out.geometry = self.recalculate_geometry(dimensions , **kw)
- out.geometry = self.geometry
- return out
-
-
-class AcquisitionData(DataContainer):
- '''DataContainer for holding 2D or 3D sinogram'''
-
- def __init__(self,
- array = None,
- deep_copy=True,
- dimension_labels=None,
- **kwargs):
- self.geometry = None
- if array is None:
- if 'geometry' in kwargs.keys():
- geometry = kwargs['geometry']
- self.geometry = geometry
- channels = geometry.channels
- horiz = geometry.pixel_num_h
- vert = geometry.pixel_num_v
- angles = geometry.angles
- num_of_angles = numpy.shape(angles)[0]
-
- if dimension_labels is None:
- if channels > 1:
- if vert > 1:
- shape = (channels, num_of_angles , vert, horiz)
- dim_labels = [AcquisitionGeometry.CHANNEL,
- AcquisitionGeometry.ANGLE,
- AcquisitionGeometry.VERTICAL,
- AcquisitionGeometry.HORIZONTAL]
- else:
- shape = (channels , num_of_angles, horiz)
- dim_labels = [AcquisitionGeometry.CHANNEL,
- AcquisitionGeometry.ANGLE,
- AcquisitionGeometry.HORIZONTAL]
- else:
- if vert > 1:
- shape = (num_of_angles, vert, horiz)
- dim_labels = [AcquisitionGeometry.ANGLE,
- AcquisitionGeometry.VERTICAL,
- AcquisitionGeometry.HORIZONTAL
- ]
- else:
- shape = (num_of_angles, horiz)
- dim_labels = [AcquisitionGeometry.ANGLE,
- AcquisitionGeometry.HORIZONTAL
- ]
-
- dimension_labels = dim_labels
- else:
- shape = []
- for dim in dimension_labels:
- if dim == AcquisitionGeometry.CHANNEL:
- shape.append(channels)
- elif dim == AcquisitionGeometry.ANGLE:
- shape.append(num_of_angles)
- elif dim == AcquisitionGeometry.VERTICAL:
- shape.append(vert)
- elif dim == AcquisitionGeometry.HORIZONTAL:
- shape.append(horiz)
- if len(shape) != len(dimension_labels):
- raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\
- .format(
- len(dimension_labels) - len(shape),
- dimension_labels, shape)
- )
- shape = tuple(shape)
-
- array = numpy.zeros( shape , dtype=numpy.float32)
- super(AcquisitionData, self).__init__(array, deep_copy,
- dimension_labels, **kwargs)
- else:
-
- if issubclass(type(array) ,DataContainer):
- # if the array is a DataContainer get the info from there
- if not ( array.number_of_dimensions == 2 or \
- array.number_of_dimensions == 3 or \
- array.number_of_dimensions == 4):
- raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\
- .format(array.number_of_dimensions))
-
- #DataContainer.__init__(self, array.as_array(), deep_copy,
- # array.dimension_labels, **kwargs)
- super(AcquisitionData, self).__init__(array.as_array(), deep_copy,
- array.dimension_labels, **kwargs)
- elif issubclass(type(array) ,numpy.ndarray):
- if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ):
- raise ValueError(
- 'Number of dimensions are not 2 or 3 or 4 : {0}'\
- .format(array.ndim))
-
- if dimension_labels is None:
- if array.ndim == 4:
- dimension_labels = ['channel' ,'angle' , 'vertical' ,
- 'horizontal']
- elif array.ndim == 3:
- dimension_labels = ['angle' , 'vertical' ,
- 'horizontal']
- else:
- dimension_labels = ['angle' ,
- 'horizontal']
-
- #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs)
- super(AcquisitionData, self).__init__(array, deep_copy,
- dimension_labels, **kwargs)
-
-
-class DataProcessor(object):
- '''Defines a generic DataContainer processor
-
- accepts DataContainer as inputs and
- outputs DataContainer
- additional attributes can be defined with __setattr__
- '''
-
- def __init__(self, **attributes):
- if not 'store_output' in attributes.keys():
- attributes['store_output'] = True
- attributes['output'] = False
- attributes['runTime'] = -1
- attributes['mTime'] = datetime.now()
- attributes['input'] = None
- for key, value in attributes.items():
- self.__dict__[key] = value
-
-
- def __setattr__(self, name, value):
- if name == 'input':
- self.set_input(value)
- elif name in self.__dict__.keys():
- self.__dict__[name] = value
- self.__dict__['mTime'] = datetime.now()
- else:
- raise KeyError('Attribute {0} not found'.format(name))
- #pass
-
- def set_input(self, dataset):
- if issubclass(type(dataset), DataContainer):
- if self.check_input(dataset):
- self.__dict__['input'] = dataset
- else:
- raise TypeError("Input type mismatch: got {0} expecting {1}"\
- .format(type(dataset), DataContainer))
-
- def check_input(self, dataset):
- '''Checks parameters of the input DataContainer
-
- Should raise an Error if the DataContainer does not match expectation, e.g.
- if the expected input DataContainer is 3D and the Processor expects 2D.
- '''
- raise NotImplementedError('Implement basic checks for input DataContainer')
-
- def get_output(self, out=None):
- for k,v in self.__dict__.items():
- if v is None and k != 'output':
- raise ValueError('Key {0} is None'.format(k))
- shouldRun = False
- if self.runTime == -1:
- shouldRun = True
- elif self.mTime > self.runTime:
- shouldRun = True
-
- # CHECK this
- if self.store_output and shouldRun:
- self.runTime = datetime.now()
- try:
- self.output = self.process(out=out)
- return self.output
- except TypeError as te:
- self.output = self.process()
- return self.output
- self.runTime = datetime.now()
- try:
- return self.process(out=out)
- except TypeError as te:
- return self.process()
-
-
- def set_input_processor(self, processor):
- if issubclass(type(processor), DataProcessor):
- self.__dict__['input'] = processor
- else:
- raise TypeError("Input type mismatch: got {0} expecting {1}"\
- .format(type(processor), DataProcessor))
-
- def get_input(self):
- '''returns the input DataContainer
-
- It is useful in the case the user has provided a DataProcessor as
- input
- '''
- if issubclass(type(self.input), DataProcessor):
- dsi = self.input.get_output()
- else:
- dsi = self.input
- return dsi
-
- def process(self, out=None):
- raise NotImplementedError('process must be implemented')
-
-
-
-
-class DataProcessor23D(DataProcessor):
- '''Regularizers DataProcessor
- '''
-
- def check_input(self, dataset):
- '''Checks number of dimensions input DataContainer
-
- Expected input is 2D or 3D
- '''
- if dataset.number_of_dimensions == 2 or \
- dataset.number_of_dimensions == 3:
- return True
- else:
- raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
- .format(dataset.number_of_dimensions))
-
-###### Example of DataProcessors
-
-class AX(DataProcessor):
- '''Example DataProcessor
- The AXPY routines perform a vector multiplication operation defined as
-
- y := a*x
- where:
-
- a is a scalar
-
- x a DataContainer.
- '''
-
- def __init__(self):
- kwargs = {'scalar':None,
- 'input':None,
- }
-
- #DataProcessor.__init__(self, **kwargs)
- super(AX, self).__init__(**kwargs)
-
- def check_input(self, dataset):
- return True
-
- def process(self, out=None):
-
- dsi = self.get_input()
- a = self.scalar
- if out is None:
- y = DataContainer( a * dsi.as_array() , True,
- dimension_labels=dsi.dimension_labels )
- #self.setParameter(output_dataset=y)
- return y
- else:
- out.fill(a * dsi.as_array())
-
-
-###### Example of DataProcessors
-
-class CastDataContainer(DataProcessor):
- '''Example DataProcessor
- Cast a DataContainer array to a different type.
-
- y := a*x
- where:
-
- a is a scalar
-
- x a DataContainer.
- '''
-
- def __init__(self, dtype=None):
- kwargs = {'dtype':dtype,
- 'input':None,
- }
-
- #DataProcessor.__init__(self, **kwargs)
- super(CastDataContainer, self).__init__(**kwargs)
-
- def check_input(self, dataset):
- return True
-
- def process(self, out=None):
-
- dsi = self.get_input()
- dtype = self.dtype
- if out is None:
- y = numpy.asarray(dsi.as_array(), dtype=dtype)
-
- return type(dsi)(numpy.asarray(dsi.as_array(), dtype=dtype),
- dimension_labels=dsi.dimension_labels )
- else:
- out.fill(numpy.asarray(dsi.as_array(), dtype=dtype))
-
-
-
-
-
-class PixelByPixelDataProcessor(DataProcessor):
- '''Example DataProcessor
-
- This processor applies a python function to each pixel of the DataContainer
-
- f is a python function
-
- x a DataSet.
- '''
-
- def __init__(self):
- kwargs = {'pyfunc':None,
- 'input':None,
- }
- #DataProcessor.__init__(self, **kwargs)
- super(PixelByPixelDataProcessor, self).__init__(**kwargs)
-
- def check_input(self, dataset):
- return True
-
- def process(self, out=None):
-
- pyfunc = self.pyfunc
- dsi = self.get_input()
-
- eval_func = numpy.frompyfunc(pyfunc,1,1)
-
-
- y = DataContainer( eval_func( dsi.as_array() ) , True,
- dimension_labels=dsi.dimension_labels )
- return y
-
-
-
-
-if __name__ == '__main__':
- shape = (2,3,4,5)
- size = shape[0]
- for i in range(1, len(shape)):
- size = size * shape[i]
- #print("a refcount " , sys.getrefcount(a))
- a = numpy.asarray([i for i in range( size )])
- print("a refcount " , sys.getrefcount(a))
- a = numpy.reshape(a, shape)
- print("a refcount " , sys.getrefcount(a))
- ds = DataContainer(a, False, ['X', 'Y','Z' ,'W'])
- print("a refcount " , sys.getrefcount(a))
- print ("ds label {0}".format(ds.dimension_labels))
- subset = ['W' ,'X']
- b = ds.subset( subset )
- print("a refcount " , sys.getrefcount(a))
- print ("b label {0} shape {1}".format(b.dimension_labels,
- numpy.shape(b.as_array())))
- c = ds.subset(['Z','W','X'])
- print("a refcount " , sys.getrefcount(a))
-
- # Create a ImageData sharing the array with c
- volume0 = ImageData(c.as_array(), False, dimensions = c.dimension_labels)
- volume1 = ImageData(c, False)
-
- print ("volume0 {0} volume1 {1}".format(id(volume0.array),
- id(volume1.array)))
-
- # Create a ImageData copying the array from c
- volume2 = ImageData(c.as_array(), dimensions = c.dimension_labels)
- volume3 = ImageData(c)
-
- print ("volume2 {0} volume3 {1}".format(id(volume2.array),
- id(volume3.array)))
-
- # single number DataSet
- sn = DataContainer(numpy.asarray([1]))
-
- ax = AX()
- ax.scalar = 2
- ax.set_input(c)
- #ax.apply()
- print ("ax in {0} out {1}".format(c.as_array().flatten(),
- ax.get_output().as_array().flatten()))
-
- cast = CastDataContainer(dtype=numpy.float32)
- cast.set_input(c)
- out = cast.get_output()
- out *= 0
- axm = AX()
- axm.scalar = 0.5
- axm.set_input_processor(cast)
- axm.get_output(out)
- #axm.apply()
- print ("axm in {0} out {1}".format(c.as_array(), axm.get_output().as_array()))
-
- # check out in DataSetProcessor
- #a = numpy.asarray([i for i in range( size )])
-
-
- # create a PixelByPixelDataProcessor
-
- #define a python function which will take only one input (the pixel value)
- pyfunc = lambda x: -x if x > 20 else x
- clip = PixelByPixelDataProcessor()
- clip.pyfunc = pyfunc
- clip.set_input(c)
- #clip.apply()
-
- print ("clip in {0} out {1}".format(c.as_array(), clip.get_output().as_array()))
-
- #dsp = DataProcessor()
- #dsp.set_input(ds)
- #dsp.input = a
- # pipeline
-
- chain = AX()
- chain.scalar = 0.5
- chain.set_input_processor(ax)
- print ("chain in {0} out {1}".format(ax.get_output().as_array(), chain.get_output().as_array()))
-
- # testing arithmetic operations
-
- print (b)
- print ((b+1))
- print ((1+b))
-
- print (b)
- print ((b*2))
-
- print (b)
- print ((2*b))
-
- print (b)
- print ((b/2))
-
- print (b)
- print ((2/b))
-
- print (b)
- print ((b**2))
-
- print (b)
- print ((2**b))
-
- print (type(volume3 + 2))
-
- s = [i for i in range(3 * 4 * 4)]
- s = numpy.reshape(numpy.asarray(s), (3,4,4))
- sino = AcquisitionData( s )
-
- shape = (4,3,2)
- a = [i for i in range(2*3*4)]
- a = numpy.asarray(a)
- a = numpy.reshape(a, shape)
- print (numpy.shape(a))
- ds = DataContainer(a, True, ['X', 'Y','Z'])
- # this means that I expect the X to be of length 2 ,
- # y of length 3 and z of length 4
- subset = ['Y' ,'Z']
- b0 = ds.subset( subset )
- print ("shape b 3,2? {0}".format(numpy.shape(b0.as_array())))
- # expectation on b is that it is
- # 3x2 cut at z = 0
-
- subset = ['X' ,'Y']
- b1 = ds.subset( subset , Z=1)
- print ("shape b 2,3? {0}".format(numpy.shape(b1.as_array())))
-
-
-
- # create VolumeData from geometry
- vgeometry = ImageGeometry(voxel_num_x=2, voxel_num_y=3, channels=2)
- vol = ImageData(geometry=vgeometry)
-
- sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=20),
- geom_type='parallel', pixel_num_v=3,
- pixel_num_h=5 , channels=2)
- sino = AcquisitionData(geometry=sgeometry)
- sino2 = sino.clone()
-
- a0 = numpy.asarray([i for i in range(2*3*4)])
- a1 = numpy.asarray([2*i for i in range(2*3*4)])
-
-
- ds0 = DataContainer(numpy.reshape(a0,(2,3,4)))
- ds1 = DataContainer(numpy.reshape(a1,(2,3,4)))
-
- numpy.testing.assert_equal(ds0.dot(ds1), a0.dot(a1))
-
- a2 = numpy.asarray([2*i for i in range(2*3*5)])
- ds2 = DataContainer(numpy.reshape(a2,(2,3,5)))
-
-# # it should fail if the shape is wrong
-# try:
-# ds2.dot(ds0)
-# self.assertTrue(False)
-# except ValueError as ve:
-# self.assertTrue(True)
-
diff --git a/Wrappers/Python/build/lib/ccpi/io/__init__.py b/Wrappers/Python/build/lib/ccpi/io/__init__.py
deleted file mode 100644
index 9233d7a..0000000
--- a/Wrappers/Python/build/lib/ccpi/io/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/io/reader.py b/Wrappers/Python/build/lib/ccpi/io/reader.py
deleted file mode 100644
index 856f5e0..0000000
--- a/Wrappers/Python/build/lib/ccpi/io/reader.py
+++ /dev/null
@@ -1,500 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev, Edoardo Pasca and Srikanth Nagella
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''
-This is a reader module with classes for loading 3D datasets.
-
-@author: Mr. Srikanth Nagella
-'''
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-from ccpi.framework import AcquisitionGeometry
-from ccpi.framework import AcquisitionData
-import numpy as np
-import os
-
-h5pyAvailable = True
-try:
- from h5py import File as NexusFile
-except:
- h5pyAvailable = False
-
-pilAvailable = True
-try:
- from PIL import Image
-except:
- pilAvailable = False
-
-class NexusReader(object):
- '''
- Reader class for loading Nexus files.
- '''
-
- def __init__(self, nexus_filename=None):
- '''
- This takes in input as filename and loads the data dataset.
- '''
- self.flat = None
- self.dark = None
- self.angles = None
- self.geometry = None
- self.filename = nexus_filename
- self.key_path = 'entry1/tomo_entry/instrument/detector/image_key'
- self.data_path = 'entry1/tomo_entry/data/data'
- self.angle_path = 'entry1/tomo_entry/data/rotation_angle'
-
- def get_image_keys(self):
- try:
- with NexusFile(self.filename,'r') as file:
- return np.array(file[self.key_path])
- except KeyError as ke:
- raise KeyError("get_image_keys: " , ke.args[0] , self.key_path)
-
-
- def load(self, dimensions=None, image_key_id=0):
- '''
- This is generic loading function of flat field, dark field and projection data.
- '''
- if not h5pyAvailable:
- raise Exception("Error: h5py is not installed")
- if self.filename is None:
- return
- try:
- with NexusFile(self.filename,'r') as file:
- image_keys = np.array(file[self.key_path])
- projections = None
- if dimensions == None:
- projections = np.array(file[self.data_path])
- result = projections[image_keys==image_key_id]
- return result
- else:
- #When dimensions are specified they need to be mapped to image_keys
- index_array = np.where(image_keys==image_key_id)
- projection_indexes = index_array[0][dimensions[0]]
- new_dimensions = list(dimensions)
- new_dimensions[0]= projection_indexes
- new_dimensions = tuple(new_dimensions)
- result = np.array(file[self.data_path][new_dimensions])
- return result
- except:
- print("Error reading nexus file")
- raise
-
- def load_projection(self, dimensions=None):
- '''
- Loads the projection data from the nexus file.
- returns: numpy array with projection data
- '''
- try:
- if 0 not in self.get_image_keys():
- raise ValueError("Projections are not in the data. Data Path " ,
- self.data_path)
- except KeyError as ke:
- raise KeyError(ke.args[0] , self.data_path)
- return self.load(dimensions, 0)
-
- def load_flat(self, dimensions=None):
- '''
- Loads the flat field data from the nexus file.
- returns: numpy array with flat field data
- '''
- try:
- if 1 not in self.get_image_keys():
- raise ValueError("Flats are not in the data. Data Path " ,
- self.data_path)
- except KeyError as ke:
- raise KeyError(ke.args[0] , self.data_path)
- return self.load(dimensions, 1)
-
- def load_dark(self, dimensions=None):
- '''
- Loads the Dark field data from the nexus file.
- returns: numpy array with dark field data
- '''
- try:
- if 2 not in self.get_image_keys():
- raise ValueError("Darks are not in the data. Data Path " ,
- self.data_path)
- except KeyError as ke:
- raise KeyError(ke.args[0] , self.data_path)
- return self.load(dimensions, 2)
-
- def get_projection_angles(self):
- '''
- This function returns the projection angles
- '''
- if not h5pyAvailable:
- raise Exception("Error: h5py is not installed")
- if self.filename is None:
- return
- try:
- with NexusFile(self.filename,'r') as file:
- angles = np.array(file[self.angle_path],np.float32)
- image_keys = np.array(file[self.key_path])
- return angles[image_keys==0]
- except:
- print("get_projection_angles Error reading nexus file")
- raise
-
-
- def get_sinogram_dimensions(self):
- '''
- Return the dimensions of the dataset
- '''
- if not h5pyAvailable:
- raise Exception("Error: h5py is not installed")
- if self.filename is None:
- return
- try:
- with NexusFile(self.filename,'r') as file:
- projections = file[self.data_path]
- image_keys = np.array(file[self.key_path])
- dims = list(projections.shape)
- dims[0] = dims[1]
- dims[1] = np.sum(image_keys==0)
- return tuple(dims)
- except:
- print("Error reading nexus file")
- raise
-
- def get_projection_dimensions(self):
- '''
- Return the dimensions of the dataset
- '''
- if not h5pyAvailable:
- raise Exception("Error: h5py is not installed")
- if self.filename is None:
- return
- try:
- with NexusFile(self.filename,'r') as file:
- try:
- projections = file[self.data_path]
- except KeyError as ke:
- raise KeyError('Error: data path {0} not found\n{1}'\
- .format(self.data_path,
- ke.args[0]))
- #image_keys = np.array(file[self.key_path])
- image_keys = self.get_image_keys()
- dims = list(projections.shape)
- dims[0] = np.sum(image_keys==0)
- return tuple(dims)
- except:
- print("Warning: Error reading image_keys trying accessing data on " , self.data_path)
- with NexusFile(self.filename,'r') as file:
- dims = file[self.data_path].shape
- return tuple(dims)
-
-
-
- def get_acquisition_data(self, dimensions=None):
- '''
- This method load the acquisition data and given dimension and returns an AcquisitionData Object
- '''
- data = self.load_projection(dimensions)
- dims = self.get_projection_dimensions()
- geometry = AcquisitionGeometry('parallel', '3D',
- self.get_projection_angles(),
- pixel_num_h = dims[2],
- pixel_size_h = 1 ,
- pixel_num_v = dims[1],
- pixel_size_v = 1,
- dist_source_center = None,
- dist_center_detector = None,
- channels = 1)
- return AcquisitionData(data, geometry=geometry,
- dimension_labels=['angle','vertical','horizontal'])
-
- def get_acquisition_data_subset(self, ymin=None, ymax=None):
- '''
- This method load the acquisition data and given dimension and returns an AcquisitionData Object
- '''
- if not h5pyAvailable:
- raise Exception("Error: h5py is not installed")
- if self.filename is None:
- return
- try:
-
-
- with NexusFile(self.filename,'r') as file:
- try:
- dims = self.get_projection_dimensions()
- except KeyError:
- pass
- dims = file[self.data_path].shape
- if ymin is None and ymax is None:
- data = np.array(file[self.data_path])
- else:
- if ymin is None:
- ymin = 0
- if ymax > dims[1]:
- raise ValueError('ymax out of range')
- data = np.array(file[self.data_path][:,:ymax,:])
- elif ymax is None:
- ymax = dims[1]
- if ymin < 0:
- raise ValueError('ymin out of range')
- data = np.array(file[self.data_path][:,ymin:,:])
- else:
- if ymax > dims[1]:
- raise ValueError('ymax out of range')
- if ymin < 0:
- raise ValueError('ymin out of range')
-
- data = np.array(file[self.data_path]
- [: , ymin:ymax , :] )
-
- except:
- print("Error reading nexus file")
- raise
-
-
- try:
- angles = self.get_projection_angles()
- except KeyError as ke:
- n = data.shape[0]
- angles = np.linspace(0, n, n+1, dtype=np.float32)
-
- if ymax-ymin > 1:
-
- geometry = AcquisitionGeometry('parallel', '3D',
- angles,
- pixel_num_h = dims[2],
- pixel_size_h = 1 ,
- pixel_num_v = ymax-ymin,
- pixel_size_v = 1,
- dist_source_center = None,
- dist_center_detector = None,
- channels = 1)
- return AcquisitionData(data, False, geometry=geometry,
- dimension_labels=['angle','vertical','horizontal'])
- elif ymax-ymin == 1:
- geometry = AcquisitionGeometry('parallel', '2D',
- angles,
- pixel_num_h = dims[2],
- pixel_size_h = 1 ,
- dist_source_center = None,
- dist_center_detector = None,
- channels = 1)
- return AcquisitionData(data.squeeze(), False, geometry=geometry,
- dimension_labels=['angle','horizontal'])
- def get_acquisition_data_slice(self, y_slice=0):
- return self.get_acquisition_data_subset(ymin=y_slice , ymax=y_slice+1)
- def get_acquisition_data_whole(self):
- with NexusFile(self.filename,'r') as file:
- try:
- dims = self.get_projection_dimensions()
- except KeyError:
- print ("Warning: ")
- dims = file[self.data_path].shape
-
- ymin = 0
- ymax = dims[1] - 1
-
- return self.get_acquisition_data_subset(ymin=ymin, ymax=ymax)
-
-
-
- def list_file_content(self):
- try:
- with NexusFile(self.filename,'r') as file:
- file.visit(print)
- except:
- print("Error reading nexus file")
- raise
- def get_acquisition_data_batch(self, bmin=None, bmax=None):
- if not h5pyAvailable:
- raise Exception("Error: h5py is not installed")
- if self.filename is None:
- return
- try:
-
-
- with NexusFile(self.filename,'r') as file:
- try:
- dims = self.get_projection_dimensions()
- except KeyError:
- dims = file[self.data_path].shape
- if bmin is None or bmax is None:
- raise ValueError('get_acquisition_data_batch: please specify fastest index batch limits')
-
- if bmin >= 0 and bmin < bmax and bmax <= dims[0]:
- data = np.array(file[self.data_path][bmin:bmax])
- else:
- raise ValueError('get_acquisition_data_batch: bmin {0}>0 bmax {1}<{2}'.format(bmin, bmax, dims[0]))
-
- except:
- print("Error reading nexus file")
- raise
-
-
- try:
- angles = self.get_projection_angles()[bmin:bmax]
- except KeyError as ke:
- n = data.shape[0]
- angles = np.linspace(0, n, n+1, dtype=np.float32)[bmin:bmax]
-
- if bmax-bmin > 1:
-
- geometry = AcquisitionGeometry('parallel', '3D',
- angles,
- pixel_num_h = dims[2],
- pixel_size_h = 1 ,
- pixel_num_v = bmax-bmin,
- pixel_size_v = 1,
- dist_source_center = None,
- dist_center_detector = None,
- channels = 1)
- return AcquisitionData(data, False, geometry=geometry,
- dimension_labels=['angle','vertical','horizontal'])
- elif bmax-bmin == 1:
- geometry = AcquisitionGeometry('parallel', '2D',
- angles,
- pixel_num_h = dims[2],
- pixel_size_h = 1 ,
- dist_source_center = None,
- dist_center_detector = None,
- channels = 1)
- return AcquisitionData(data.squeeze(), False, geometry=geometry,
- dimension_labels=['angle','horizontal'])
-
-
-
-class XTEKReader(object):
- '''
- Reader class for loading XTEK files
- '''
-
- def __init__(self, xtek_config_filename=None):
- '''
- This takes in the xtek config filename and loads the dataset and the
- required geometry parameters
- '''
- self.projections = None
- self.geometry = {}
- self.filename = xtek_config_filename
- self.load()
-
- def load(self):
- pixel_num_h = 0
- pixel_num_v = 0
- xpixel_size = 0
- ypixel_size = 0
- source_x = 0
- detector_x = 0
- with open(self.filename) as f:
- content = f.readlines()
- content = [x.strip() for x in content]
- for line in content:
- if line.startswith("SrcToObject"):
- source_x = float(line.split('=')[1])
- elif line.startswith("SrcToDetector"):
- detector_x = float(line.split('=')[1])
- elif line.startswith("DetectorPixelsY"):
- pixel_num_v = int(line.split('=')[1])
- #self.num_of_vertical_pixels = self.calc_v_alighment(self.num_of_vertical_pixels, self.pixels_per_voxel)
- elif line.startswith("DetectorPixelsX"):
- pixel_num_h = int(line.split('=')[1])
- elif line.startswith("DetectorPixelSizeX"):
- xpixel_size = float(line.split('=')[1])
- elif line.startswith("DetectorPixelSizeY"):
- ypixel_size = float(line.split('=')[1])
- elif line.startswith("Projections"):
- self.num_projections = int(line.split('=')[1])
- elif line.startswith("InitialAngle"):
- self.initial_angle = float(line.split('=')[1])
- elif line.startswith("Name"):
- self.experiment_name = line.split('=')[1]
- elif line.startswith("Scattering"):
- self.scattering = float(line.split('=')[1])
- elif line.startswith("WhiteLevel"):
- self.white_level = float(line.split('=')[1])
- elif line.startswith("MaskRadius"):
- self.mask_radius = float(line.split('=')[1])
-
- #Read Angles
- angles = self.read_angles()
- self.geometry = AcquisitionGeometry('cone', '3D', angles, pixel_num_h, xpixel_size, pixel_num_v, ypixel_size, -1 * source_x,
- detector_x - source_x,
- )
-
- def read_angles(self):
- """
- Read the angles file .ang or _ctdata.txt file and returns the angles
- as an numpy array.
- """
- input_path = os.path.dirname(self.filename)
- angles_ctdata_file = os.path.join(input_path, '_ctdata.txt')
- angles_named_file = os.path.join(input_path, self.experiment_name+'.ang')
- angles = np.zeros(self.num_projections,dtype='f')
- #look for _ctdata.txt
- if os.path.exists(angles_ctdata_file):
- #read txt file with angles
- with open(angles_ctdata_file) as f:
- content = f.readlines()
- #skip firt three lines
- #read the middle value of 3 values in each line as angles in degrees
- index = 0
- for line in content[3:]:
- self.angles[index]=float(line.split(' ')[1])
- index+=1
- angles = np.deg2rad(self.angles+self.initial_angle);
- elif os.path.exists(angles_named_file):
- #read the angles file which is text with first line as header
- with open(angles_named_file) as f:
- content = f.readlines()
- #skip first line
- index = 0
- for line in content[1:]:
- angles[index] = float(line.split(':')[1])
- index+=1
- angles = np.flipud(angles+self.initial_angle) #angles are in the reverse order
- else:
- raise RuntimeError("Can't find angles file")
- return angles
-
- def load_projection(self, dimensions=None):
- '''
- This method reads the projection images from the directory and returns a numpy array
- '''
- if not pilAvailable:
- raise('Image library pillow is not installed')
- if dimensions != None:
- raise('Extracting subset of data is not implemented')
- input_path = os.path.dirname(self.filename)
- pixels = np.zeros((self.num_projections, self.geometry.pixel_num_h, self.geometry.pixel_num_v), dtype='float32')
- for i in range(1, self.num_projections+1):
- im = Image.open(os.path.join(input_path,self.experiment_name+"_%04d"%i+".tif"))
- pixels[i-1,:,:] = np.fliplr(np.transpose(np.array(im))) ##Not sure this is the correct way to populate the image
-
- #normalising the data
- #TODO: Move this to a processor
- pixels = pixels - (self.white_level*self.scattering)/100.0
- pixels[pixels < 0.0] = 0.000001 # all negative values to approximately 0 as the std log of zero and non negative number is not defined
- return pixels
-
- def get_acquisition_data(self, dimensions=None):
- '''
- This method load the acquisition data and given dimension and returns an AcquisitionData Object
- '''
- data = self.load_projection(dimensions)
- return AcquisitionData(data, geometry=self.geometry)
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py
deleted file mode 100644
index cf2d93d..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py
deleted file mode 100644
index ed95c3f..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2019 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import time
-from numbers import Integral
-
-class Algorithm(object):
- '''Base class for iterative algorithms
-
- provides the minimal infrastructure.
- Algorithms are iterables so can be easily run in a for loop. They will
- stop as soon as the stop cryterion is met.
- The user is required to implement the set_up, __init__, update and
- and update_objective methods
-
- A courtesy method run is available to run n iterations. The method accepts
- a callback function that receives the current iteration number and the actual objective
- value and can be used to trigger print to screens and other user interactions. The run
- method will stop when the stopping cryterion is met.
- '''
-
- def __init__(self):
- '''Constructor
-
- Set the minimal number of parameters:
- iteration: current iteration number
- max_iteration: maximum number of iterations
- memopt: whether to use memory optimisation ()
- timing: list to hold the times it took to run each iteration
- update_objectice_interval: the interval every which we would save the current
- objective. 1 means every iteration, 2 every 2 iteration
- and so forth. This is by default 1 and should be increased
- when evaluating the objective is computationally expensive.
- '''
- self.iteration = 0
- self.__max_iteration = 0
- self.__loss = []
- self.memopt = False
- self.timing = []
- self.update_objective_interval = 1
- def set_up(self, *args, **kwargs):
- '''Set up the algorithm'''
- raise NotImplementedError()
- def update(self):
- '''A single iteration of the algorithm'''
- raise NotImplementedError()
-
- def should_stop(self):
- '''default stopping cryterion: number of iterations
-
- The user can change this in concrete implementatition of iterative algorithms.'''
- return self.max_iteration_stop_cryterion()
-
- def max_iteration_stop_cryterion(self):
- '''default stop cryterion for iterative algorithm: max_iteration reached'''
- return self.iteration >= self.max_iteration
- def __iter__(self):
- '''Algorithm is an iterable'''
- return self
- def next(self):
- '''Algorithm is an iterable
-
- python2 backwards compatibility'''
- return self.__next__()
- def __next__(self):
- '''Algorithm is an iterable
-
- calling this method triggers update and update_objective
- '''
- if self.should_stop():
- raise StopIteration()
- else:
- time0 = time.time()
- self.update()
- self.timing.append( time.time() - time0 )
- if self.iteration % self.update_objective_interval == 0:
- self.update_objective()
- self.iteration += 1
- def get_output(self):
- '''Returns the solution found'''
- return self.x
- def get_last_loss(self):
- '''Returns the last stored value of the loss function
-
- if update_objective_interval is 1 it is the value of the objective at the current
- iteration. If update_objective_interval > 1 it is the last stored value.
- '''
- return self.__loss[-1]
- def get_last_objective(self):
- '''alias to get_last_loss'''
- return self.get_last_loss()
- def update_objective(self):
- '''calculates the objective with the current solution'''
- raise NotImplementedError()
- @property
- def loss(self):
- '''returns the list of the values of the objective during the iteration
-
- The length of this list may be shorter than the number of iterations run when
- the update_objective_interval > 1
- '''
- return self.__loss
- @property
- def objective(self):
- '''alias of loss'''
- return self.loss
- @property
- def max_iteration(self):
- '''gets the maximum number of iterations'''
- return self.__max_iteration
- @max_iteration.setter
- def max_iteration(self, value):
- '''sets the maximum number of iterations'''
- assert isinstance(value, int)
- self.__max_iteration = value
- @property
- def update_objective_interval(self):
- return self.__update_objective_interval
- @update_objective_interval.setter
- def update_objective_interval(self, value):
- if isinstance(value, Integral):
- if value >= 1:
- self.__update_objective_interval = value
- else:
- raise ValueError('Update objective interval must be an integer >= 1')
- else:
- raise ValueError('Update objective interval must be an integer >= 1')
- def run(self, iterations, verbose=True, callback=None):
- '''run n iterations and update the user with the callback if specified'''
- if self.should_stop():
- print ("Stop cryterion has been reached.")
- i = 0
- for _ in self:
- if verbose and self.iteration % self.update_objective_interval == 0:
- print ("Iteration {}/{}, objective {}".format(self.iteration,
- self.max_iteration, self.get_last_objective()) )
- else:
- if callback is not None:
- callback(self.iteration, self.get_last_objective())
- i += 1
- if i == iterations:
- break
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py
deleted file mode 100644
index 7194eb8..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Created on Thu Feb 21 11:11:23 2019
-
-@author: ofn77899
-"""
-
-from ccpi.optimisation.algorithms import Algorithm
-#from collections.abc import Iterable
-class CGLS(Algorithm):
-
- '''Conjugate Gradient Least Squares algorithm
-
- Parameters:
- x_init: initial guess
- operator: operator for forward/backward projections
- data: data to operate on
- '''
- def __init__(self, **kwargs):
- super(CGLS, self).__init__()
- self.x = kwargs.get('x_init', None)
- self.operator = kwargs.get('operator', None)
- self.data = kwargs.get('data', None)
- if self.x is not None and self.operator is not None and \
- self.data is not None:
- print ("Calling from creator")
- self.set_up(x_init =kwargs['x_init'],
- operator=kwargs['operator'],
- data =kwargs['data'])
-
- def set_up(self, x_init, operator , data ):
-
- self.r = data.copy()
- self.x = x_init.copy()
-
- self.operator = operator
- self.d = operator.adjoint(self.r)
-
-
- self.normr2 = self.d.squared_norm()
- #if isinstance(self.normr2, Iterable):
- # self.normr2 = sum(self.normr2)
- #self.normr2 = numpy.sqrt(self.normr2)
- #print ("set_up" , self.normr2)
-
- def update(self):
-
- Ad = self.operator.direct(self.d)
- #norm = (Ad*Ad).sum()
- #if isinstance(norm, Iterable):
- # norm = sum(norm)
- norm = Ad.squared_norm()
-
- alpha = self.normr2/norm
- self.x += (self.d * alpha)
- self.r -= (Ad * alpha)
- s = self.operator.adjoint(self.r)
-
- normr2_new = s.squared_norm()
- #if isinstance(normr2_new, Iterable):
- # normr2_new = sum(normr2_new)
- #normr2_new = numpy.sqrt(normr2_new)
- #print (normr2_new)
-
- beta = normr2_new/self.normr2
- self.normr2 = normr2_new
- self.d = s + beta*self.d
-
- def update_objective(self):
- self.loss.append(self.r.squared_norm()) \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py
deleted file mode 100644
index 445ba7a..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2019 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Created on Thu Feb 21 11:09:03 2019
-
-@author: ofn77899
-"""
-
-from ccpi.optimisation.algorithms import Algorithm
-from ccpi.optimisation.functions import ZeroFun
-
-class FBPD(Algorithm):
- '''FBPD Algorithm
-
- Parameters:
- x_init: initial guess
- f: constraint
- g: data fidelity
- h: regularizer
- opt: additional algorithm
- '''
- constraint = None
- data_fidelity = None
- regulariser = None
- def __init__(self, **kwargs):
- pass
- def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\
- regulariser=None, opt=None):
-
- # default inputs
- if constraint is None:
- self.constraint = ZeroFun()
- else:
- self.constraint = constraint
- if data_fidelity is None:
- data_fidelity = ZeroFun()
- else:
- self.data_fidelity = data_fidelity
- if regulariser is None:
- self.regulariser = ZeroFun()
- else:
- self.regulariser = regulariser
-
- # algorithmic parameters
-
-
- # step-sizes
- self.tau = 2 / (self.data_fidelity.L + 2)
- self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L
-
- self.inv_sigma = 1/self.sigma
-
- # initialization
- self.x = x_init
- self.y = operator.direct(self.x)
-
-
- def update(self):
-
- # primal forward-backward step
- x_old = self.x
- self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) )
- self.x = self.constraint.prox(self.x, self.tau);
-
- # dual forward-backward step
- self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old);
- self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma);
-
- # time and criterion
- self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x))
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py
deleted file mode 100644
index 93ba178..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Feb 21 11:07:30 2019
-
-@author: ofn77899
-"""
-
-from ccpi.optimisation.algorithms import Algorithm
-from ccpi.optimisation.functions import ZeroFun
-import numpy
-
-class FISTA(Algorithm):
- '''Fast Iterative Shrinkage-Thresholding Algorithm
-
- Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding
- algorithm for linear inverse problems.
- SIAM journal on imaging sciences,2(1), pp.183-202.
-
- Parameters:
- x_init: initial guess
- f: data fidelity
- g: regularizer
- h:
- opt: additional algorithm
- '''
-
- def __init__(self, **kwargs):
- '''initialisation can be done at creation time if all
- proper variables are passed or later with set_up'''
- super(FISTA, self).__init__()
- self.f = None
- self.g = None
- self.invL = None
- self.t_old = 1
- args = ['x_init', 'f', 'g', 'opt']
- for k,v in kwargs.items():
- if k in args:
- args.pop(args.index(k))
- if len(args) == 0:
- return self.set_up(kwargs['x_init'],
- f=kwargs['f'],
- g=kwargs['g'],
- opt=kwargs['opt'])
-
- def set_up(self, x_init, f=None, g=None, opt=None):
-
- # default inputs
- if f is None:
- self.f = ZeroFun()
- else:
- self.f = f
- if g is None:
- g = ZeroFun()
- self.g = g
- else:
- self.g = g
-
- # algorithmic parameters
- if opt is None:
- opt = {'tol': 1e-4, 'memopt':False}
-
- self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4
- memopt = opt['memopt'] if 'memopt' in opt.keys() else False
- self.memopt = memopt
-
- # initialization
- if memopt:
- self.y = x_init.clone()
- self.x_old = x_init.clone()
- self.x = x_init.clone()
- self.u = x_init.clone()
- else:
- self.x_old = x_init.copy()
- self.y = x_init.copy()
-
- #timing = numpy.zeros(max_iter)
- #criter = numpy.zeros(max_iter)
-
-
- self.invL = 1/f.L
-
- self.t_old = 1
-
- def update(self):
- # algorithm loop
- #for it in range(0, max_iter):
-
- if self.memopt:
- # u = y - invL*f.grad(y)
- # store the result in x_old
- self.f.gradient(self.y, out=self.u)
- self.u.__imul__( -self.invL )
- self.u.__iadd__( self.y )
- # x = g.prox(u,invL)
- self.g.proximal(self.u, self.invL, out=self.x)
-
- self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
-
- # y = x + (t_old-1)/t*(x-x_old)
- self.x.subtract(self.x_old, out=self.y)
- self.y.__imul__ ((self.t_old-1)/self.t)
- self.y.__iadd__( self.x )
-
- self.x_old.fill(self.x)
- self.t_old = self.t
-
-
- else:
- u = self.y - self.invL*self.f.grad(self.y)
-
- self.x = self.g.prox(u,self.invL)
-
- self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
-
- self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old)
-
- self.x_old = self.x.copy()
- self.t_old = self.t
-
- def update_objective(self):
- self.loss.append( self.f(self.x) + self.g(self.x) ) \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py
deleted file mode 100644
index f1e4132..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2019 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Created on Thu Feb 21 11:05:09 2019
-
-@author: ofn77899
-"""
-from ccpi.optimisation.algorithms import Algorithm
-
-class GradientDescent(Algorithm):
- '''Implementation of Gradient Descent algorithm
- '''
-
- def __init__(self, **kwargs):
- '''initialisation can be done at creation time if all
- proper variables are passed or later with set_up'''
- super(GradientDescent, self).__init__()
- self.x = None
- self.rate = 0
- self.objective_function = None
- self.regulariser = None
- args = ['x_init', 'objective_function', 'rate']
- for k,v in kwargs.items():
- if k in args:
- args.pop(args.index(k))
- if len(args) == 0:
- return self.set_up(x_init=kwargs['x_init'],
- objective_function=kwargs['objective_function'],
- rate=kwargs['rate'])
-
- def should_stop(self):
- '''stopping cryterion, currently only based on number of iterations'''
- return self.iteration >= self.max_iteration
-
- def set_up(self, x_init, objective_function, rate):
- '''initialisation of the algorithm'''
- self.x = x_init.copy()
- self.objective_function = objective_function
- self.rate = rate
- self.loss.append(objective_function(x_init))
- self.iteration = 0
- try:
- self.memopt = self.objective_function.memopt
- except AttributeError as ae:
- self.memopt = False
- if self.memopt:
- self.x_update = x_init.copy()
-
- def update(self):
- '''Single iteration'''
- if self.memopt:
- self.objective_function.gradient(self.x, out=self.x_update)
- self.x_update *= -self.rate
- self.x += self.x_update
- else:
- self.x += -self.rate * self.objective_function.gradient(self.x)
-
- def update_objective(self):
- self.loss.append(self.objective_function(self.x))
- \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py
deleted file mode 100644
index d0e27ae..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Mon Feb 4 16:18:06 2019
-
-@author: evangelos
-"""
-from ccpi.optimisation.algorithms import Algorithm
-from ccpi.framework import ImageData
-import numpy as np
-import matplotlib.pyplot as plt
-import time
-from ccpi.optimisation.operators import BlockOperator
-from ccpi.framework import BlockDataContainer
-
-
-import matplotlib.pyplot as plt
-
-class PDHG(Algorithm):
- '''Primal Dual Hybrid Gradient'''
-
- def __init__(self, **kwargs):
- super(PDHG, self).__init__()
- self.f = kwargs.get('f', None)
- self.operator = kwargs.get('operator', None)
- self.g = kwargs.get('g', None)
- self.tau = kwargs.get('tau', None)
- self.sigma = kwargs.get('sigma', None)
-
- if self.f is not None and self.operator is not None and \
- self.g is not None:
- print ("Calling from creator")
- self.set_up(self.f,
- self.operator,
- self.g,
- self.tau,
- self.sigma)
-
- def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
- # algorithmic parameters
-
- if sigma is None and tau is None:
- raise ValueError('Need sigma*tau||K||^2<1')
-
-
- self.x_old = self.operator.domain_geometry().allocate()
- self.y_old = self.operator.range_geometry().allocate()
-
- self.xbar = self.x_old.copy()
- #x_tmp = x_old
- self.x = self.x_old.copy()
- self.y = self.y_old.copy()
- #y_tmp = y_old
- #y = y_tmp
-
- # relaxation parameter
- self.theta = 1
-
- def update(self):
- # Gradient descent, Dual problem solution
- self.y_old += self.sigma * self.operator.direct(self.xbar)
- self.y = self.f.proximal_conjugate(self.y_old, self.sigma)
-
- # Gradient ascent, Primal problem solution
- self.x_old -= self.tau * self.operator.adjoint(self.y)
- self.x = self.g.proximal(self.x_old, self.tau)
-
- #Update
- #xbar = x + theta * (x - x_old)
- self.xbar.fill(self.x)
- self.xbar -= self.x_old
- self.xbar *= self.theta
- self.xbar += self.x
-
-# self.x_old.fill(self.x)
-# self.y_old.fill(self.y)
- self.y_old = self.y.copy()
- self.x_old = self.x.copy()
- #self.y = self.y_old
-
- def update_objective(self):
- self.loss.append([self.f(self.operator.direct(self.x)) + self.g(self.x),
- -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(- 1 * self.operator.adjoint(self.y)))
- ])
-
-
-
-def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
-
- # algorithmic parameters
- if opt is None:
- opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \
- 'memopt': False}
-
- if sigma is None and tau is None:
- raise ValueError('Need sigma*tau||K||^2<1')
-
- niter = opt['niter'] if 'niter' in opt.keys() else 1000
- tol = opt['tol'] if 'tol' in opt.keys() else 1e-4
- memopt = opt['memopt'] if 'memopt' in opt.keys() else False
- show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False
- stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False
-
-
- x_old = operator.domain_geometry().allocate()
- y_old = operator.range_geometry().allocate()
-
-
- xbar = x_old
- x_tmp = x_old
- x = x_old
-
- y_tmp = y_old
- y = y_tmp
-
- # relaxation parameter
- theta = 1
-
- t = time.time()
-
- objective = []
-
-
- for i in range(niter):
-
- # Gradient descent, Dual problem solution
- y_tmp = y_old + sigma * operator.direct(xbar)
- y = f.proximal_conjugate(y_tmp, sigma)
-
- # Gradient ascent, Primal problem solution
- x_tmp = x_old - tau * operator.adjoint(y)
- x = g.proximal(x_tmp, tau)
-
- #Update
- xbar = x + theta * (x - x_old)
-
- x_old = x
- y_old = y
-
- if i%100==0:
-
- primal = f(operator.direct(x)) + g(x)
- dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y)))
- print( i, primal, dual, primal-dual)
-
-# plt.imshow(x.as_array())
-# plt.show()
-# print(f(operator.direct(x)) + g(x), i)
-
- t_end = time.time()
-
- return x, t_end - t, objective
-
-
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py
deleted file mode 100644
index f562973..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2019 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Created on Thu Feb 21 11:03:13 2019
-
-@author: ofn77899
-"""
-
-from .Algorithm import Algorithm
-from .CGLS import CGLS
-from .GradientDescent import GradientDescent
-from .FISTA import FISTA
-from .FBPD import FBPD
-from .PDHG import PDHG
-from .PDHG import PDHG_old
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py b/Wrappers/Python/build/lib/ccpi/optimisation/algs.py
deleted file mode 100644
index 6b6ae2c..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py
+++ /dev/null
@@ -1,319 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy
-import time
-
-from ccpi.optimisation.functions import Function
-from ccpi.optimisation.functions import ZeroFun
-from ccpi.framework import ImageData
-from ccpi.framework import AcquisitionData
-from ccpi.optimisation.spdhg import spdhg
-from ccpi.optimisation.spdhg import KullbackLeibler
-from ccpi.optimisation.spdhg import KullbackLeiblerConvexConjugate
-
-def FISTA(x_init, f=None, g=None, opt=None):
- '''Fast Iterative Shrinkage-Thresholding Algorithm
-
- Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding
- algorithm for linear inverse problems.
- SIAM journal on imaging sciences,2(1), pp.183-202.
-
- Parameters:
- x_init: initial guess
- f: data fidelity
- g: regularizer
- h:
- opt: additional algorithm
- '''
- # default inputs
- if f is None: f = ZeroFun()
- if g is None: g = ZeroFun()
-
- # algorithmic parameters
- if opt is None:
- opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False}
-
- max_iter = opt['iter'] if 'iter' in opt.keys() else 1000
- tol = opt['tol'] if 'tol' in opt.keys() else 1e-4
- memopt = opt['memopt'] if 'memopt' in opt.keys() else False
-
-
- # initialization
- if memopt:
- y = x_init.clone()
- x_old = x_init.clone()
- x = x_init.clone()
- u = x_init.clone()
- else:
- x_old = x_init
- y = x_init;
-
- timing = numpy.zeros(max_iter)
- criter = numpy.zeros(max_iter)
-
- invL = 1/f.L
-
- t_old = 1
-
- c = f(x_init) + g(x_init)
-
- # algorithm loop
- for it in range(0, max_iter):
-
- time0 = time.time()
- if memopt:
- # u = y - invL*f.grad(y)
- # store the result in x_old
- f.gradient(y, out=u)
- u.__imul__( -invL )
- u.__iadd__( y )
- # x = g.prox(u,invL)
- g.proximal(u, invL, out=x)
-
- t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2)))
-
- # y = x + (t_old-1)/t*(x-x_old)
- x.subtract(x_old, out=y)
- y.__imul__ ((t_old-1)/t)
- y.__iadd__( x )
-
- x_old.fill(x)
- t_old = t
-
-
- else:
- u = y - invL*f.grad(y)
-
- x = g.prox(u,invL)
-
- t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2)))
-
- y = x + (t_old-1)/t*(x-x_old)
-
- x_old = x.copy()
- t_old = t
-
- # time and criterion
- timing[it] = time.time() - time0
- criter[it] = f(x) + g(x);
-
- # stopping rule
- #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10:
- # break
-
- #print(it, 'out of', 10, 'iterations', end='\r');
-
- #criter = criter[0:it+1];
- timing = numpy.cumsum(timing[0:it+1]);
-
- return x, it, timing, criter
-
-def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\
- regulariser=None, opt=None):
- '''FBPD Algorithm
-
- Parameters:
- x_init: initial guess
- f: constraint
- g: data fidelity
- h: regularizer
- opt: additional algorithm
- '''
- # default inputs
- if constraint is None: constraint = ZeroFun()
- if data_fidelity is None: data_fidelity = ZeroFun()
- if regulariser is None: regulariser = ZeroFun()
-
- # algorithmic parameters
- if opt is None:
- opt = {'tol': 1e-4, 'iter': 1000}
- else:
- try:
- max_iter = opt['iter']
- except KeyError as ke:
- opt[ke] = 1000
- try:
- opt['tol'] = 1000
- except KeyError as ke:
- opt[ke] = 1e-4
- tol = opt['tol']
- max_iter = opt['iter']
- memopt = opt['memopts'] if 'memopts' in opt.keys() else False
-
- # step-sizes
- tau = 2 / (data_fidelity.L + 2)
- sigma = (1/tau - data_fidelity.L/2) / regulariser.L
- inv_sigma = 1/sigma
-
- # initialization
- x = x_init
- y = operator.direct(x);
-
- timing = numpy.zeros(max_iter)
- criter = numpy.zeros(max_iter)
-
-
-
-
- # algorithm loop
- for it in range(0, max_iter):
-
- t = time.time()
-
- # primal forward-backward step
- x_old = x;
- x = x - tau * ( data_fidelity.grad(x) + operator.adjoint(y) );
- x = constraint.prox(x, tau);
-
- # dual forward-backward step
- y = y + sigma * operator.direct(2*x - x_old);
- y = y - sigma * regulariser.prox(inv_sigma*y, inv_sigma);
-
- # time and criterion
- timing[it] = time.time() - t
- criter[it] = constraint(x) + data_fidelity(x) + regulariser(operator.direct(x))
-
- # stopping rule
- #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10:
- # break
-
- criter = criter[0:it+1]
- timing = numpy.cumsum(timing[0:it+1])
-
- return x, it, timing, criter
-
-def CGLS(x_init, operator , data , opt=None):
- '''Conjugate Gradient Least Squares algorithm
-
- Parameters:
- x_init: initial guess
- operator: operator for forward/backward projections
- data: data to operate on
- opt: additional algorithm
- '''
-
- if opt is None:
- opt = {'tol': 1e-4, 'iter': 1000}
- else:
- try:
- max_iter = opt['iter']
- except KeyError as ke:
- opt[ke] = 1000
- try:
- opt['tol'] = 1000
- except KeyError as ke:
- opt[ke] = 1e-4
- tol = opt['tol']
- max_iter = opt['iter']
-
- r = data.copy()
- x = x_init.copy()
-
- d = operator.adjoint(r)
-
- normr2 = (d**2).sum()
-
- timing = numpy.zeros(max_iter)
- criter = numpy.zeros(max_iter)
-
- # algorithm loop
- for it in range(0, max_iter):
-
- t = time.time()
-
- Ad = operator.direct(d)
- alpha = normr2/( (Ad**2).sum() )
- x = x + alpha*d
- r = r - alpha*Ad
- s = operator.adjoint(r)
-
- normr2_new = (s**2).sum()
- beta = normr2_new/normr2
- normr2 = normr2_new
- d = s + beta*d
-
- # time and criterion
- timing[it] = time.time() - t
- criter[it] = (r**2).sum()
-
- return x, it, timing, criter
-
-def SIRT(x_init, operator , data , opt=None, constraint=None):
- '''Simultaneous Iterative Reconstruction Technique
-
- Parameters:
- x_init: initial guess
- operator: operator for forward/backward projections
- data: data to operate on
- opt: additional algorithm
- constraint: func of Indicator type specifying convex constraint.
- '''
-
- if opt is None:
- opt = {'tol': 1e-4, 'iter': 1000}
- else:
- try:
- max_iter = opt['iter']
- except KeyError as ke:
- opt[ke] = 1000
- try:
- opt['tol'] = 1000
- except KeyError as ke:
- opt[ke] = 1e-4
- tol = opt['tol']
- max_iter = opt['iter']
-
- # Set default constraint to unconstrained
- if constraint==None:
- constraint = Function()
-
- x = x_init.clone()
-
- timing = numpy.zeros(max_iter)
- criter = numpy.zeros(max_iter)
-
- # Relaxation parameter must be strictly between 0 and 2. For now fix at 1.0
- relax_par = 1.0
-
- # Set up scaling matrices D and M.
- im1 = ImageData(geometry=x_init.geometry)
- im1.array[:] = 1.0
- M = 1/operator.direct(im1)
- del im1
- aq1 = AcquisitionData(geometry=M.geometry)
- aq1.array[:] = 1.0
- D = 1/operator.adjoint(aq1)
- del aq1
-
- # algorithm loop
- for it in range(0, max_iter):
- t = time.time()
- r = data - operator.direct(x)
-
- x = constraint.prox(x + relax_par * (D*operator.adjoint(M*r)),None)
-
- timing[it] = time.time() - t
- if it > 0:
- criter[it-1] = (r**2).sum()
-
- r = data - operator.direct(x)
- criter[it] = (r**2).sum()
- return x, it, timing, criter
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py b/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py
deleted file mode 100644
index efc465c..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py
+++ /dev/null
@@ -1,272 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.optimisation.ops import Identity, FiniteDiff2D
-import numpy
-from ccpi.framework import DataContainer
-import warnings
-from ccpi.optimisation.functions import Function
-def isSizeCorrect(data1 ,data2):
- if issubclass(type(data1), DataContainer) and \
- issubclass(type(data2), DataContainer):
- # check dimensionality
- if data1.check_dimensions(data2):
- return True
- elif issubclass(type(data1) , numpy.ndarray) and \
- issubclass(type(data2) , numpy.ndarray):
- return data1.shape == data2.shape
- else:
- raise ValueError("{0}: getting two incompatible types: {1} {2}"\
- .format('Function', type(data1), type(data2)))
- return False
-class Norm2(Function):
-
- def __init__(self,
- gamma=1.0,
- direction=None):
- super(Norm2, self).__init__()
- self.gamma = gamma;
- self.direction = direction;
-
- def __call__(self, x, out=None):
-
- if out is None:
- xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction,
- keepdims=True))
- else:
- if isSizeCorrect(out, x):
- # check dimensionality
- if issubclass(type(out), DataContainer):
- arr = out.as_array()
- numpy.square(x.as_array(), out=arr)
- xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True))
-
- elif issubclass(type(out) , numpy.ndarray):
- numpy.square(x.as_array(), out=out)
- xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True))
- else:
- raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) )
-
- p = numpy.sum(self.gamma*xx)
-
- return p
-
- def prox(self, x, tau):
-
- xx = numpy.sqrt(numpy.sum( numpy.square(x.as_array()), self.direction,
- keepdims=True ))
- xx = numpy.maximum(0, 1 - tau*self.gamma / xx)
- p = x.as_array() * xx
-
- return type(x)(p,geometry=x.geometry)
- def proximal(self, x, tau, out=None):
- if out is None:
- return self.prox(x,tau)
- else:
- if isSizeCorrect(out, x):
- # check dimensionality
- if issubclass(type(out), DataContainer):
- numpy.square(x.as_array(), out = out.as_array())
- xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction,
- keepdims=True ))
- xx = numpy.maximum(0, 1 - tau*self.gamma / xx)
- x.multiply(xx, out= out.as_array())
-
-
- elif issubclass(type(out) , numpy.ndarray):
- numpy.square(x.as_array(), out=out)
- xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True))
-
- xx = numpy.maximum(0, 1 - tau*self.gamma / xx)
- x.multiply(xx, out= out)
- else:
- raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) )
-
-
-class TV2D(Norm2):
-
- def __init__(self, gamma):
- super(TV2D,self).__init__(gamma, 0)
- self.op = FiniteDiff2D()
- self.L = self.op.get_max_sing_val()
-
-
-# Define a class for squared 2-norm
-class Norm2sq(Function):
- '''
- f(x) = c*||A*x-b||_2^2
-
- which has
-
- grad[f](x) = 2*c*A^T*(A*x-b)
-
- and Lipschitz constant
-
- L = 2*c*||A||_2^2 = 2*s1(A)^2
-
- where s1(A) is the largest singular value of A.
-
- '''
-
- def __init__(self,A,b,c=1.0,memopt=False):
- super(Norm2sq, self).__init__()
-
- self.A = A # Should be an operator, default identity
- self.b = b # Default zero DataSet?
- self.c = c # Default 1.
- if memopt:
- try:
- self.range_tmp = A.range_geometry().allocate()
- self.domain_tmp = A.domain_geometry().allocate()
- self.memopt = True
- except NameError as ne:
- warnings.warn(str(ne))
- self.memopt = False
- except NotImplementedError as nie:
- print (nie)
- warnings.warn(str(nie))
- self.memopt = False
- else:
- self.memopt = False
-
- # Compute the Lipschitz parameter from the operator if possible
- # Leave it initialised to None otherwise
- try:
- self.L = 2.0*self.c*(self.A.norm()**2)
- except AttributeError as ae:
- pass
- except NotImplementedError as noe:
- pass
-
- #def grad(self,x):
- # return self.gradient(x, out=None)
-
- def __call__(self,x):
- #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel()))
- #if out is None:
- # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() )
- #else:
- y = self.A.direct(x)
- y.__isub__(self.b)
- #y.__imul__(y)
- #return y.sum() * self.c
- try:
- return y.squared_norm() * self.c
- except AttributeError as ae:
- # added for compatibility with SIRF
- return (y.norm()**2) * self.c
-
- def gradient(self, x, out = None):
- if self.memopt:
- #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b )
-
- self.A.direct(x, out=self.range_tmp)
- self.range_tmp -= self.b
- self.A.adjoint(self.range_tmp, out=out)
- #self.direct_placehold.multiply(2.0*self.c, out=out)
- out *= (self.c * 2.0)
- else:
- return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
-
-
-
-# Box constraints indicator function. Calling returns 0 if argument is within
-# the box. The prox operator is projection onto the box. Only implements one
-# scalar lower and one upper as constraint on all elements. Should generalise
-# to vectors to allow different constraints one elements.
-class IndicatorBox(Function):
-
- def __init__(self,lower=-numpy.inf,upper=numpy.inf):
- # Do nothing
- super(IndicatorBox, self).__init__()
- self.lower = lower
- self.upper = upper
-
-
- def __call__(self,x):
-
- if (numpy.all(x.array>=self.lower) and
- numpy.all(x.array <= self.upper) ):
- val = 0
- else:
- val = numpy.inf
- return val
-
- def prox(self,x,tau=None):
- return (x.maximum(self.lower)).minimum(self.upper)
-
- def proximal(self, x, tau, out=None):
- if out is None:
- return self.prox(x, tau)
- else:
- if not x.shape == out.shape:
- raise ValueError('Norm1 Incompatible size:',
- x.shape, out.shape)
- #(x.abs() - tau*self.gamma).maximum(0) * x.sign()
- x.abs(out = out)
- out.__isub__(tau*self.gamma)
- out.maximum(0, out=out)
- if self.sign_x is None or not x.shape == self.sign_x.shape:
- self.sign_x = x.sign()
- else:
- x.sign(out=self.sign_x)
-
- out.__imul__( self.sign_x )
-
-# A more interesting example, least squares plus 1-norm minimization.
-# Define class to represent 1-norm including prox function
-class Norm1(Function):
-
- def __init__(self,gamma):
- super(Norm1, self).__init__()
- self.gamma = gamma
- self.L = 1
- self.sign_x = None
-
- def __call__(self,x,out=None):
- if out is None:
- return self.gamma*(x.abs().sum())
- else:
- if not x.shape == out.shape:
- raise ValueError('Norm1 Incompatible size:',
- x.shape, out.shape)
- x.abs(out=out)
- return out.sum() * self.gamma
-
- def prox(self,x,tau):
- return (x.abs() - tau*self.gamma).maximum(0) * x.sign()
-
- def proximal(self, x, tau, out=None):
- if out is None:
- return self.prox(x, tau)
- else:
- if isSizeCorrect(x,out):
- # check dimensionality
- if issubclass(type(out), DataContainer):
- v = (x.abs() - tau*self.gamma).maximum(0)
- x.sign(out=out)
- out *= v
- #out.fill(self.prox(x,tau))
- elif issubclass(type(out) , numpy.ndarray):
- v = (x.abs() - tau*self.gamma).maximum(0)
- out[:] = x.sign()
- out *= v
- #out[:] = self.prox(x,tau)
- else:
- raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) )
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py
deleted file mode 100644
index 81c16cd..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Mar 8 10:01:31 2019
-
-@author: evangelos
-"""
-
-import numpy as np
-#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
-from ccpi.framework import BlockDataContainer
-from numbers import Number
-
-class BlockFunction(Function):
- '''A Block vector of Functions
-
- .. math::
-
- f = [f_1,f_2,f_3]
- f([x_1,x_2,x_3]) = f_1(x_1) + f_2(x_2) + f_3(x_3)
-
- '''
- def __init__(self, *functions):
- '''Creator'''
- self.functions = functions
- self.length = len(self.functions)
-
- super(BlockFunction, self).__init__()
-
- def __call__(self, x):
- '''evaluates the BlockFunction on the BlockDataContainer
-
- :param: x (BlockDataContainer): must have as many rows as self.length
-
- returns sum(f_i(x_i))
- '''
- if self.length != x.shape[0]:
- raise ValueError('BlockFunction and BlockDataContainer have incompatible size')
- t = 0
- for i in range(x.shape[0]):
- t += self.functions[i](x.get_item(i))
- return t
-
- def convex_conjugate(self, x):
- '''Convex_conjugate does not take into account the BlockOperator'''
- t = 0
- for i in range(x.shape[0]):
- t += self.functions[i].convex_conjugate(x.get_item(i))
- return t
-
-
- def proximal_conjugate(self, x, tau, out = None):
- '''proximal_conjugate does not take into account the BlockOperator'''
- out = [None]*self.length
- if isinstance(tau, Number):
- for i in range(self.length):
- out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau)
- else:
- for i in range(self.length):
- out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i))
-
- return BlockDataContainer(*out)
-
- def proximal(self, x, tau, out = None):
- '''proximal does not take into account the BlockOperator'''
- out = [None]*self.length
- if isinstance(tau, Number):
- for i in range(self.length):
- out[i] = self.functions[i].proximal(x.get_item(i), tau)
- else:
- for i in range(self.length):
- out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i))
-
- return BlockDataContainer(*out)
-
- def gradient(self,x, out=None):
- '''FIXME: gradient returns pass'''
- pass \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py
deleted file mode 100644
index 82f24a6..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import warnings
-from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
-
-class Function(object):
- '''Abstract class representing a function
-
- Members:
- L is the Lipschitz constant of the gradient of the Function
- '''
- def __init__(self):
- self.L = None
-
- def __call__(self,x, out=None):
- '''Evaluates the function at x '''
- raise NotImplementedError
-
- def gradient(self, x, out=None):
- '''Returns the gradient of the function at x, if the function is differentiable'''
- raise NotImplementedError
-
- def proximal(self, x, tau, out=None):
- '''This returns the proximal operator for the function at x, tau'''
- raise NotImplementedError
-
- def convex_conjugate(self, x, out=None):
- '''This evaluates the convex conjugate of the function at x'''
- raise NotImplementedError
-
- def proximal_conjugate(self, x, tau, out = None):
- '''This returns the proximal operator for the convex conjugate of the function at x, tau'''
- raise NotImplementedError
-
- def grad(self, x):
- '''Alias of gradient(x,None)'''
- warnings.warn('''This method will disappear in following
- versions of the CIL. Use gradient instead''', DeprecationWarning)
- return self.gradient(x, out=None)
-
- def prox(self, x, tau):
- '''Alias of proximal(x, tau, None)'''
- warnings.warn('''This method will disappear in following
- versions of the CIL. Use proximal instead''', DeprecationWarning)
- return self.proximal(x, out=None)
-
- def __rmul__(self, scalar):
- '''Defines the multiplication by a scalar on the left
-
- returns a ScaledFunction'''
- return ScaledFunction(self, scalar)
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py
deleted file mode 100644
index 34b7e35..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Mar 8 09:55:36 2019
-
-@author: evangelos
-"""
-
-import numpy as np
-#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
-from ccpi.optimisation.functions import ScaledFunction
-
-
-class FunctionOperatorComposition(Function):
-
- def __init__(self, operator, function):
- super(FunctionOperatorComposition, self).__init__()
- self.function = function
- self.operator = operator
- alpha = 1
- if isinstance (function, ScaledFunction):
- alpha = function.scalar
- self.L = 2 * alpha * operator.norm()**2
-
-
- def __call__(self, x):
-
- return self.function(self.operator.direct(x))
-
- def call_adjoint(self, x):
-
- return self.function(self.operator.adjoint(x))
-
- def convex_conjugate(self, x):
-
- ''' convex_conjugate does not take into account the Operator'''
- return self.function.convex_conjugate(x)
-
- def proximal(self, x, tau, out=None):
-
- '''proximal does not take into account the Operator'''
-
- return self.function.proximal(x, tau, out=out)
-
- def proximal_conjugate(self, x, tau, out=None):
-
- ''' proximal conjugate does not take into account the Operator'''
-
- return self.function.proximal_conjugate(x, tau, out=out)
-
- def gradient(self, x, out=None):
-
- ''' Gradient takes into account the Operator'''
- if out is None:
- return self.operator.adjoint(
- self.function.gradient(self.operator.direct(x))
- )
- else:
- self.operator.adjoint(
- self.function.gradient(self.operator.direct(x),
- out=out)
- )
-
- \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py
deleted file mode 100644
index df8dc89..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from ccpi.optimisation.functions import Function
-import numpy
-
-class IndicatorBox(Function):
- '''Box constraints indicator function.
-
- Calling returns 0 if argument is within the box. The prox operator is projection onto the box.
- Only implements one scalar lower and one upper as constraint on all elements. Should generalise
- to vectors to allow different constraints one elements.
-'''
-
- def __init__(self,lower=-numpy.inf,upper=numpy.inf):
- # Do nothing
- super(IndicatorBox, self).__init__()
- self.lower = lower
- self.upper = upper
-
-
- def __call__(self,x):
-
- if (numpy.all(x.array>=self.lower) and
- numpy.all(x.array <= self.upper) ):
- val = 0
- else:
- val = numpy.inf
- return val
-
- def prox(self,x,tau=None):
- return (x.maximum(self.lower)).minimum(self.upper)
-
- def proximal(self, x, tau, out=None):
- if out is None:
- return self.prox(x, tau)
- else:
- if not x.shape == out.shape:
- raise ValueError('Norm1 Incompatible size:',
- x.shape, out.shape)
- #(x.abs() - tau*self.gamma).maximum(0) * x.sign()
- x.abs(out = out)
- out.__isub__(tau*self.gamma)
- out.maximum(0, out=out)
- if self.sign_x is None or not x.shape == self.sign_x.shape:
- self.sign_x = x.sign()
- else:
- x.sign(out=self.sign_x)
-
- out.__imul__( self.sign_x )
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py
deleted file mode 100644
index 5a47edd..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Created on Wed Mar 6 19:42:34 2019
-
-@author: evangelos
-"""
-
-import numpy as np
-#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
-from ccpi.framework import DataContainer, ImageData, ImageGeometry
-
-
-############################ L1NORM FUNCTIONS #############################
-class SimpleL1Norm(Function):
-
- def __init__(self, alpha=1):
-
- super(SimpleL1Norm, self).__init__()
- self.alpha = alpha
-
- def __call__(self, x):
- return self.alpha * x.abs().sum()
-
- def gradient(self,x):
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
- return 0
-
- def proximal(self, x, tau):
- ''' Soft Threshold'''
- return x.sign() * (x.abs() - tau * self.alpha).maximum(0)
-
- def proximal_conjugate(self, x, tau):
- return x.divide((x.abs()/self.alpha).maximum(1.0))
-
-class L1Norm(SimpleL1Norm):
-
- def __init__(self, alpha=1, **kwargs):
-
- super(L1Norm, self).__init__()
- self.alpha = alpha
- self.b = kwargs.get('b',None)
-
- def __call__(self, x):
-
- if self.b is None:
- return SimpleL1Norm.__call__(self, x)
- else:
- return SimpleL1Norm.__call__(self, x - self.b)
-
- def gradient(self, x):
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
- if self.b is None:
- return SimpleL1Norm.convex_conjugate(self, x)
- else:
- return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum()
-
- def proximal(self, x, tau):
-
- if self.b is None:
- return SimpleL1Norm.proximal(self, x, tau)
- else:
- return self.b + SimpleL1Norm.proximal(self, x - self.b , tau)
-
- def proximal_conjugate(self, x, tau):
-
- if self.b is None:
- return SimpleL1Norm.proximal_conjugate(self, x, tau)
- else:
- return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau)
- \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py
deleted file mode 100644
index 889d703..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy
-from ccpi.optimisation.functions import Function
-from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
-from ccpi.framework import DataContainer, ImageData, ImageGeometry
-
-############################ L2NORM FUNCTION #############################
-class L2NormSquared(Function):
-
- def __init__(self, **kwargs):
-
- ''' L2NormSquared class
- f : ImageGeometry --> R
-
- Cases: f(x) = ||x||^{2}_{2}
- f(x) = || x - b ||^{2}_{2}
-
- '''
-
- #TODO need x, b to live in the same geometry if b is not None
-
- super(L2NormSquared, self).__init__()
- self.b = kwargs.get('b',None)
-
- def __call__(self, x):
- ''' Evaluates L2NormSq at point x'''
-
- y = x
- if self.b is not None:
-# x.subtract(self.b, out = x)
- y = x - self.b
-# else:
-# y
-# if out is None:
-# return x.squared_norm()
-# else:
- try:
- return y.squared_norm()
- except AttributeError as ae:
- # added for compatibility with SIRF
- return (y.norm()**2)
-
-
-
- def gradient(self, x, out=None):
- ''' Evaluates gradient of L2NormSq at point x'''
- if out is not None:
- out.fill(x)
- if self.b is not None:
- out -= self.b
- out *= 2
- else:
- y = x
- if self.b is not None:
-# x.subtract(self.b, out=x)
- y = x - self.b
- return 2*y
-
-
- def convex_conjugate(self, x, out=None):
- ''' Evaluate convex conjugate of L2NormSq'''
-
- tmp = 0
- if self.b is not None:
-# tmp = (self.b * x).sum()
- tmp = (x * self.b).sum()
-
- if out is None:
- # FIXME: this is a number
- return (1./4.) * x.squared_norm() + tmp
- else:
- # FIXME: this is a DataContainer
- out.fill((1./4.) * x.squared_norm() + tmp)
-
-
- def proximal(self, x, tau, out = None):
-
- ''' The proximal operator ( prox_\{tau * f\}(x) ) evaluates i.e.,
- argmin_x { 0.5||x - u||^{2} + tau f(x) }
- '''
-
- if out is None:
- if self.b is not None:
- return (x - self.b)/(1+2*tau) + self.b
- else:
- return x/(1+2*tau)
- else:
- out.fill(x)
- if self.b is not None:
- out -= self.b
- out /= (1+2*tau)
- if self.b is not None:
- out += self.b
- #out.fill((x - self.b)/(1+2*tau) + self.b)
- #else:
- # out.fill(x/(1+2*tau))
-
-
- def proximal_conjugate(self, x, tau, out=None):
-
- if out is None:
- if self.b is not None:
- # change the order cannot add ImageData + NestedBlock
- return (-1* tau*self.b + x)/(1 + tau/2)
- else:
- return x/(1 + tau/2 )
- else:
- if self.b is not None:
- out.fill((x - tau*self.b)/(1 + tau/2))
- else:
- out.fill(x/(1 + tau/2 ))
-
- def __rmul__(self, scalar):
- return ScaledFunction(self, scalar)
-
-
-if __name__ == '__main__':
-
-
- # TESTS for L2 and scalar * L2
-
- M, N, K = 2,3,5
- ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K)
- u = ig.allocate('random_int')
- b = ig.allocate('random_int')
-
- # check grad/call no data
- f = L2NormSquared()
- a1 = f.gradient(u)
- a2 = 2 * u
- numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4)
- numpy.testing.assert_equal(f(u), u.squared_norm())
-
- # check grad/call with data
- f1 = L2NormSquared(b=b)
- b1 = f1.gradient(u)
- b2 = 2 * (u-b)
-
- numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4)
- numpy.testing.assert_equal(f1(u), (u-b).squared_norm())
-
- #check convex conjuagate no data
- c1 = f.convex_conjugate(u)
- c2 = 1/4 * u.squared_norm()
- numpy.testing.assert_equal(c1, c2)
-
- #check convex conjuagate with data
- d1 = f1.convex_conjugate(u)
- d2 = (1/4) * u.squared_norm() + (u*b).sum()
- numpy.testing.assert_equal(d1, d2)
-
- # check proximal no data
- tau = 5
- e1 = f.proximal(u, tau)
- e2 = u/(1+2*tau)
- numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4)
-
- # check proximal with data
- tau = 5
- h1 = f1.proximal(u, tau)
- h2 = (u-b)/(1+2*tau) + b
- numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4)
-
- # check proximal conjugate no data
- tau = 0.2
- k1 = f.proximal_conjugate(u, tau)
- k2 = u/(1 + tau/2 )
- numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4)
-
- # check proximal conjugate with data
- l1 = f1.proximal_conjugate(u, tau)
- l2 = (u - tau * b)/(1 + tau/2 )
- numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4)
-
-
- # check scaled function properties
-
- # scalar
- scalar = 100
- f_scaled_no_data = scalar * L2NormSquared()
- f_scaled_data = scalar * L2NormSquared(b=b)
-
- # call
- numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u))
- numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u))
-
- # grad
- numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4)
- numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4)
-
- # conj
- numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \
- f.convex_conjugate(u/scalar) * scalar, decimal=4)
-
- numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \
- scalar * f1.convex_conjugate(u/scalar), decimal=4)
-
- # proximal
- numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \
- f.proximal(u, tau*scalar).as_array())
-
-
- numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \
- f1.proximal(u, tau*scalar).as_array())
-
-
- # proximal conjugate
- numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \
- (u/(1 + tau/(2*scalar) )).as_array(), decimal=4)
-
- numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \
- ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4)
-
-
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py
deleted file mode 100644
index 1c51236..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy as np
-from ccpi.optimisation.functions import Function, ScaledFunction
-from ccpi.framework import DataContainer, ImageData, \
- ImageGeometry, BlockDataContainer
-
-############################ mixed_L1,2NORM FUNCTIONS #####################
-class MixedL21Norm(Function):
-
- def __init__(self, **kwargs):
-
- super(MixedL21Norm, self).__init__()
- self.SymTensor = kwargs.get('SymTensor',False)
-
- def __call__(self, x, out=None):
-
- ''' Evaluates L1,2Norm at point x
-
- :param: x is a BlockDataContainer
-
- '''
- if self.SymTensor:
-
- param = [1]*x.shape[0]
- param[-1] = 2
- tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])]
- res = sum(tmp).sqrt().sum()
- else:
-
-# tmp = [ x[i]**2 for i in range(x.shape[0])]
- tmp = [ el**2 for el in x.containers ]
-
-# print(x.containers)
-# print(tmp)
-# print(type(sum(tmp)))
-# print(type(tmp))
- res = sum(tmp).sqrt().sum()
-# print(res)
- return res
-
- def gradient(self, x, out=None):
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
-
- ''' This is the Indicator function of ||\cdot||_{2, \infty}
- which is either 0 if ||x||_{2, \infty} or \infty
- '''
- return 0.0
-
- def proximal(self, x, tau, out=None):
-
- '''
- For this we need to define a MixedL2,2 norm acting on BDC,
- different form L2NormSquared which acts on DC
-
- '''
-
- pass
-
- def proximal_conjugate(self, x, tau, out=None):
-
- if self.SymTensor:
-
- param = [1]*x.shape[0]
- param[-1] = 2
- tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])]
- frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])]
- res = BlockDataContainer(*frac)
-
- return res
-
-# tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha
-# res = x.divide(ImageData(tmp2).maximum(1.0))
- else:
-
- tmp = [ el*el for el in x]
- res = (sum(tmp).sqrt()).maximum(1.0)
- frac = [x[i]/res for i in range(x.shape[0])]
- res = BlockDataContainer(*frac)
-
- return res
-
- def __rmul__(self, scalar):
- return ScaledFunction(self, scalar)
-
-#class MixedL21Norm_tensor(Function):
-#
-# def __init__(self):
-# print("feerf")
-#
-#
-if __name__ == '__main__':
-
- M, N, K = 2,3,5
- ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N)
- u1 = ig.allocate('random_int')
- u2 = ig.allocate('random_int')
-
- U = BlockDataContainer(u1, u2, shape=(2,1))
-
- # Define no scale and scaled
- f_no_scaled = MixedL21Norm()
- f_scaled = 0.5 * MixedL21Norm()
-
- # call
-
- a1 = f_no_scaled(U)
- a2 = f_scaled(U)
-
- z = f_no_scaled.proximal_conjugate(U, 1)
-
- f_no_scaled = MixedL21Norm()
-
- tmp = [el*el for el in U]
-
-
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py
deleted file mode 100644
index b553d7c..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from ccpi.optimisation.functions import Function
-import numpy
-import warnings
-
-# Define a class for squared 2-norm
-class Norm2sq(Function):
- '''
- f(x) = c*||A*x-b||_2^2
-
- which has
-
- grad[f](x) = 2*c*A^T*(A*x-b)
-
- and Lipschitz constant
-
- L = 2*c*||A||_2^2 = 2*s1(A)^2
-
- where s1(A) is the largest singular value of A.
-
- '''
-
- def __init__(self,A,b,c=1.0,memopt=False):
- super(Norm2sq, self).__init__()
-
- self.A = A # Should be an operator, default identity
- self.b = b # Default zero DataSet?
- self.c = c # Default 1.
- if memopt:
- try:
- self.range_tmp = A.range_geometry().allocate()
- self.domain_tmp = A.domain_geometry().allocate()
- self.memopt = True
- except NameError as ne:
- warnings.warn(str(ne))
- self.memopt = False
- except NotImplementedError as nie:
- print (nie)
- warnings.warn(str(nie))
- self.memopt = False
- else:
- self.memopt = False
-
- # Compute the Lipschitz parameter from the operator if possible
- # Leave it initialised to None otherwise
- try:
- self.L = 2.0*self.c*(self.A.norm()**2)
- except AttributeError as ae:
- pass
- except NotImplementedError as noe:
- pass
-
- #def grad(self,x):
- # return self.gradient(x, out=None)
-
- def __call__(self,x):
- #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel()))
- #if out is None:
- # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() )
- #else:
- y = self.A.direct(x)
- y.__isub__(self.b)
- #y.__imul__(y)
- #return y.sum() * self.c
- try:
- return y.squared_norm() * self.c
- except AttributeError as ae:
- # added for compatibility with SIRF
- return (y.norm()**2) * self.c
-
- def gradient(self, x, out = None):
- if self.memopt:
- #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b )
-
- self.A.direct(x, out=self.range_tmp)
- self.range_tmp -= self.b
- self.A.adjoint(self.range_tmp, out=out)
- #self.direct_placehold.multiply(2.0*self.c, out=out)
- out *= (self.c * 2.0)
- else:
- return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py
deleted file mode 100644
index 046a4a6..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from numbers import Number
-import numpy
-
-class ScaledFunction(object):
- '''ScaledFunction
-
- A class to represent the scalar multiplication of an Function with a scalar.
- It holds a function and a scalar. Basically it returns the multiplication
- of the product of the function __call__, convex_conjugate and gradient with the scalar.
- For the rest it behaves like the function it holds.
-
- Args:
- function (Function): a Function or BlockOperator
- scalar (Number): a scalar multiplier
- Example:
- The scaled operator behaves like the following:
-
- '''
- def __init__(self, function, scalar):
- super(ScaledFunction, self).__init__()
- self.L = None
- if not isinstance (scalar, Number):
- raise TypeError('expected scalar: got {}'.format(type(scalar)))
- self.scalar = scalar
- self.function = function
-
- def __call__(self,x, out=None):
- '''Evaluates the function at x '''
- return self.scalar * self.function(x)
-
- def convex_conjugate(self, x):
- '''returns the convex_conjugate of the scaled function '''
- # if out is None:
- # return self.scalar * self.function.convex_conjugate(x/self.scalar)
- # else:
- # out.fill(self.function.convex_conjugate(x/self.scalar))
- # out *= self.scalar
- return self.scalar * self.function.convex_conjugate(x/self.scalar)
-
- def proximal_conjugate(self, x, tau, out = None):
- '''This returns the proximal operator for the function at x, tau
- '''
- if out is None:
- return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)
- else:
- out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar))
-
- def grad(self, x):
- '''Alias of gradient(x,None)'''
- warnings.warn('''This method will disappear in following
- versions of the CIL. Use gradient instead''', DeprecationWarning)
- return self.gradient(x, out=None)
-
- def prox(self, x, tau):
- '''Alias of proximal(x, tau, None)'''
- warnings.warn('''This method will disappear in following
- versions of the CIL. Use proximal instead''', DeprecationWarning)
- return self.proximal(x, out=None)
-
- def gradient(self, x, out=None):
- '''Returns the gradient of the function at x, if the function is differentiable'''
- if out is None:
- return self.scalar * self.function.gradient(x)
- else:
- out.fill( self.scalar * self.function.gradient(x) )
-
- def proximal(self, x, tau, out=None):
- '''This returns the proximal operator for the function at x, tau
- '''
- if out is None:
- return self.function.proximal(x, tau*self.scalar)
- else:
- out.fill( self.function.proximal(x, tau*self.scalar) )
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py
deleted file mode 100644
index 88d9b64..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy as np
-#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
-from ccpi.framework import DataContainer, ImageData
-from ccpi.framework import BlockDataContainer
-
-class ZeroFun(Function):
-
- def __init__(self):
- super(ZeroFun, self).__init__()
-
- def __call__(self,x):
- return 0
-
- def convex_conjugate(self, x):
- ''' This is the support function sup <x, x^{*}> which in fact is the
- indicator function for the set = {0}
- So 0 if x=0, or inf if x neq 0
- '''
-
- if x.shape[0]==1:
- return x.maximum(0).sum()
- else:
- if isinstance(x, BlockDataContainer):
- return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum()
- else:
- return x.maximum(0).sum() + x.maximum(0).sum()
-
- def proximal(self,x,tau, out=None):
- if out is None:
- return x.copy()
- else:
- out.fill(x)
-
- def proximal_conjugate(self, x, tau):
- return 0
-
- def domain_geometry(self):
- pass
- def range_geometry(self):
- pass \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py
deleted file mode 100644
index 2ed36f5..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from .Function import Function
-from .ZeroFun import ZeroFun
-from .L1Norm import SimpleL1Norm, L1Norm
-#from .L2NormSquared import L2NormSq, SimpleL2NormSq
-from .L2NormSquared import L2NormSquared
-from .BlockFunction import BlockFunction
-from .ScaledFunction import ScaledFunction
-from .FunctionOperatorComposition import FunctionOperatorComposition
-from .MixedL21Norm import MixedL21Norm
-from .IndicatorBox import IndicatorBox
-from .Norm2Sq import Norm2sq
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py
deleted file mode 100644
index 8632920..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py
+++ /dev/null
@@ -1,312 +0,0 @@
-# -*- coding: utf-8 -*-
-
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Feb 7 13:10:56 2019
-
-@author: evangelos
-"""
-
-import numpy as np
-#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
-from ccpi.framework import DataContainer, ImageData, ImageGeometry
-from operators import CompositeDataContainer, Identity, CompositeOperator
-from numbers import Number
-
-
-############################ L2NORM FUNCTIONS #############################
-class SimpleL2NormSq(Function):
-
- def __init__(self, alpha=1):
-
- super(SimpleL2NormSq, self).__init__()
- self.alpha = alpha
-
- def __call__(self, x):
- return self.alpha * x.power(2).sum()
-
- def gradient(self,x):
- return 2 * self.alpha * x
-
- def convex_conjugate(self,x):
- return (1/4*self.alpha) * x.power(2).sum()
-
- def proximal(self, x, tau):
- return x.divide(1+2*tau*self.alpha)
-
- def proximal_conjugate(self, x, tau):
- return x.divide(1 + tau/2*self.alpha )
-
-
-class L2NormSq(SimpleL2NormSq):
-
- def __init__(self, A, b = None, alpha=1, **kwargs):
-
- super(L2NormSq, self).__init__(alpha=alpha)
- self.alpha = alpha
- self.A = A
- self.b = b
-
- def __call__(self, x):
-
- if self.b is None:
- return SimpleL2NormSq.__call__(self, self.A.direct(x))
- else:
- return SimpleL2NormSq.__call__(self, self.A.direct(x) - self.b)
-
- def convex_conjugate(self, x):
-
- ''' The convex conjugate corresponds to the simple functional i.e.,
- f(x) = alpha * ||x - b||_{2}^{2}
- '''
-
- if self.b is None:
- return SimpleL2NormSq.convex_conjugate(self, x)
- else:
- return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum()
-
- def gradient(self, x):
-
- if self.b is None:
- return 2*self.alpha * self.A.adjoint(self.A.direct(x))
- else:
- return 2*self.alpha * self.A.adjoint(self.A.direct(x) - self.b)
-
- def proximal(self, x, tau):
-
- ''' The proximal operator corresponds to the simple functional i.e.,
- f(x) = alpha * ||x - b||_{2}^{2}
-
- argmin_x { 0.5||x - u||^{2} + tau f(x) }
- '''
-
- if self.b is None:
- return SimpleL2NormSq.proximal(self, x, tau)
- else:
- return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau)
-
-
- def proximal_conjugate(self, x, tau):
-
- ''' The proximal operator corresponds to the simple convex conjugate
- functional i.e., f^{*}(x^{)
- argmin_x { 0.5||x - u||^{2} + tau f(x) }
- '''
- if self.b is None:
- return SimpleL2NormSq.proximal_conjugate(self, x, tau)
- else:
- return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau)
-
-
-############################ L1NORM FUNCTIONS #############################
-class SimpleL1Norm(Function):
-
- def __init__(self, alpha=1):
-
- super(SimpleL1Norm, self).__init__()
- self.alpha = alpha
-
- def __call__(self, x):
- return self.alpha * x.abs().sum()
-
- def gradient(self,x):
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
- return 0
-
- def proximal(self, x, tau):
- ''' Soft Threshold'''
- return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0)
-
- def proximal_conjugate(self, x, tau):
- return x.divide((x.abs()/self.alpha).maximum(1.0))
-
-class L1Norm(SimpleL1Norm):
-
- def __init__(self, A, b = None, alpha=1, **kwargs):
-
- super(L1Norm, self).__init__()
- self.alpha = alpha
- self.A = A
- self.b = b
-
- def __call__(self, x):
-
- if self.b is None:
- return SimpleL1Norm.__call__(self, self.A.direct(x))
- else:
- return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b)
-
- def gradient(self, x):
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
- if self.b is None:
- return SimpleL1Norm.convex_conjugate(self, x)
- else:
- return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum()
-
- def proximal(self, x, tau):
-
- if self.b is None:
- return SimpleL1Norm.proximal(self, x, tau)
- else:
- return self.b + SimpleL1Norm.proximal(self, x + self.b , tau)
-
- def proximal_conjugate(self, x, tau):
-
- if self.b is None:
- return SimpleL1Norm.proximal_conjugate(self, x, tau)
- else:
- return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau)
-
-
-############################ mixed_L1,2NORM FUNCTIONS #############################
-class mixed_L12Norm(Function):
-
- def __init__(self, A, b=None, alpha=1, **kwargs):
-
- super(mixed_L12Norm, self).__init__()
- self.alpha = alpha
- self.A = A
- self.b = b
-
- self.sym_grad = kwargs.get('sym_grad',False)
-
-
-
- def gradient(self,x):
- return ValueError('Not Differentiable')
-
-
- def __call__(self,x):
-
- y = self.A.direct(x)
- eucl_norm = ImageData(y.power(2).sum(axis=0)).sqrt()
- eucl_norm.__isub__(self.b)
- return eucl_norm.sum() * self.alpha
-
- def convex_conjugate(self,x):
- return 0
-
- def proximal_conjugate(self, x, tau):
-
- if self.b is None:
-
- if self.sym_grad:
- tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha
- res = x.divide(ImageData(tmp2).maximum(1.0))
- else:
- res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0))
-
- else:
- res = (x - tau*self.b)/ ((x - tau*self.b)).abs().maximum(1.0)
-
- return res
-
-
-#%%
-
-class ZeroFun(Function):
-
- def __init__(self):
- super(ZeroFun, self).__init__()
-
- def __call__(self,x):
- return 0
-
- def convex_conjugate(self, x):
- ''' This is the support function sup <x, x^{*}> which in fact is the
- indicator function for the set = {0}
- So 0 if x=0, or inf if x neq 0
- '''
- return x.maximum(0).sum()
-
- def proximal(self,x,tau):
- return x.copy()
-
- def proximal_conjugate(self, x, tau):
- return 0
-
-
-class CompositeFunction(Function):
-
- def __init__(self, *args):
- self.functions = args
- self.length = len(self.functions)
-
- def get_item(self, ind):
- return self.functions[ind]
-
- def __call__(self,x):
-
- t = 0
- for i in range(self.length):
- for j in range(x.shape[0]):
- t +=self.functions[i](x.get_item(j))
- return t
-
- def convex_conjugate(self, x):
-
- z = 0
- t = 0
- for i in range(x.shape[0]):
- t += self.functions[z].convex_conjugate(x.get_item(i))
- z += 1
-
- return t
-
- def proximal_conjugate(self, x, tau, out = None):
-
- if isinstance(tau, Number):
- tau = CompositeDataContainer(tau)
- out = [None]*self.length
- for i in range(self.length):
- out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0))
- return CompositeDataContainer(*out)
-
-
- def proximal_conjugate(self, x, tau, out = None):
-
- if isinstance(tau, Number):
- tau = CompositeDataContainer(tau)
- out = [None]*self.length
- for i in range(self.length):
- out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0))
- return CompositeDataContainer(*out)
-
-
-
-
-if __name__ == '__main__':
-
- N = 3
- ig = (N,N)
- ag = ig
- op1 = Gradient(ig)
- op2 = Identity(ig, ag)
-
- # Form Composite Operator
- operator = CompositeOperator((2,1), op1, op2 )
-
- # Create functions
- alpha = 1
- noisy_data = ImageData(np.random.randint(10, size=ag))
- f = CompositeFunction(L1Norm(op1,alpha), \
- L2NormSq(op2, noisy_data, c = 0.5, memopt = False) )
-
- u = ImageData(np.random.randint(10, size=ig))
- uComp = CompositeDataContainer(u)
-
- print(f(uComp)) # This is f(Kx) = f1(K1*u) + f2(K2*u)
-
- f1 = L1Norm(op1,alpha)
- f2 = L2NormSq(op2, noisy_data, c = 0.5, memopt = False)
-
- print(f1(u) + f2(u))
-
-
-
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py
deleted file mode 100644
index ffeb32e..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Mar 6 19:43:12 2019
-
-@author: evangelos
-"""
-
-import numpy as np
-#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
-from ccpi.framework import DataContainer, ImageData, ImageGeometry
-
-############################ mixed_L1,2NORM FUNCTIONS #############################
-class mixed_L12Norm(Function):
-
- def __init__(self, alpha, **kwargs):
-
- super(mixed_L12Norm, self).__init__()
-
- self.alpha = alpha
- self.b = kwargs.get('b',None)
- self.sym_grad = kwargs.get('sym_grad',False)
-
- def __call__(self,x):
-
- if self.b is None:
- tmp1 = x
- else:
- tmp1 = x - self.b
-#
- if self.sym_grad:
- tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2)
- else:
- tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt()
-
- return self.alpha*tmp.sum()
-
- def gradient(self,x):
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
- return 0
-
- def proximal(self, x, tau):
- pass
-
- def proximal_conjugate(self, x, tau):
-
- if self.sym_grad:
- tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha
- res = x.divide(ImageData(tmp2).maximum(1.0))
- else:
- res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0))
-
- return res
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py
deleted file mode 100644
index ee8f609..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Feb 14 12:36:40 2019
-
-@author: ofn77899
-"""
-#from ccpi.optimisation.ops import Operator
-import numpy
-from numbers import Number
-import functools
-from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer
-from ccpi.optimisation.operators import Operator, LinearOperator
-from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator
-from ccpi.framework import BlockGeometry
-
-class BlockOperator(Operator):
- '''A Block matrix containing Operators
-
- The Block Framework is a generic strategy to treat variational problems in the
- following form:
-
- .. math::
-
- min Regulariser + Fidelity
-
-
- BlockOperators have a generic shape M x N, and when applied on an
- Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer.
- Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with
- N rows and 1 column.
-
- User may specify the shape of the block, by default is a row vector
-
- Operators in a Block are required to have the same domain column-wise and the
- same range row-wise.
- '''
- __array_priority__ = 1
- def __init__(self, *args, **kwargs):
- '''
- Class creator
-
- Note:
- Do not include the `self` parameter in the ``Args`` section.
-
- Args:
- :param: vararg (Operator): Operators in the block.
- :param: shape (:obj:`tuple`, optional): If shape is passed the Operators in
- vararg are considered input in a row-by-row fashion.
- Shape and number of Operators must match.
-
- Example:
- BlockOperator(op0,op1) results in a row block
- BlockOperator(op0,op1,shape=(1,2)) results in a column block
- '''
- self.operators = args
- shape = kwargs.get('shape', None)
- if shape is None:
- shape = (len(args),1)
- self.shape = shape
- n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
- if len(args) != n_elements:
- raise ValueError(
- 'Dimension and size do not match: expected {} got {}'
- .format(n_elements,len(args)))
- # test if operators are compatible
- if not self.column_wise_compatible():
- raise ValueError('Operators in each column must have the same domain')
- if not self.row_wise_compatible():
- raise ValueError('Operators in each row must have the same range')
-
- def column_wise_compatible(self):
- '''Operators in a Block should have the same domain per column'''
- rows, cols = self.shape
- compatible = True
- for col in range(cols):
- column_compatible = True
- for row in range(1,rows):
- dg0 = self.get_item(row-1,col).domain_geometry()
- dg1 = self.get_item(row,col).domain_geometry()
- column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible
- compatible = compatible and column_compatible
- return compatible
-
- def row_wise_compatible(self):
- '''Operators in a Block should have the same range per row'''
- rows, cols = self.shape
- compatible = True
- for row in range(rows):
- row_compatible = True
- for col in range(1,cols):
- dg0 = self.get_item(row,col-1).range_geometry()
- dg1 = self.get_item(row,col).range_geometry()
- row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible
- compatible = compatible and row_compatible
- return compatible
-
- def get_item(self, row, col):
- '''returns the Operator at specified row and col'''
- if row > self.shape[0]:
- raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
- if col > self.shape[1]:
- raise ValueError('Requested col {} > max {}'.format(col, self.shape[1]))
-
- index = row*self.shape[1]+col
- return self.operators[index]
-
- def norm(self):
- norm = [op.norm()**2 for op in self.operators]
- return numpy.sqrt(sum(norm))
-
- def direct(self, x, out=None):
- '''Direct operation for the BlockOperator
-
- BlockOperator work on BlockDataContainer, but they will work on DataContainers
- and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1)
- '''
- if not isinstance (x, BlockDataContainer):
- x_b = BlockDataContainer(x)
- else:
- x_b = x
- shape = self.get_output_shape(x_b.shape)
- res = []
- for row in range(self.shape[0]):
- for col in range(self.shape[1]):
- if col == 0:
- prod = self.get_item(row,col).direct(x_b.get_item(col))
- else:
- prod += self.get_item(row,col).direct(x_b.get_item(col))
- res.append(prod)
- return BlockDataContainer(*res, shape=shape)
-
- def adjoint(self, x, out=None):
- '''Adjoint operation for the BlockOperator
-
- BlockOperator may contain both LinearOperator and Operator
- This method exists in BlockOperator as it is not known what type of
- Operator it will contain.
-
- BlockOperator work on BlockDataContainer, but they will work on DataContainers
- and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1)
-
- Raises: ValueError if the contained Operators are not linear
- '''
- if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True):
- raise ValueError('Not all operators in Block are linear.')
- if not isinstance (x, BlockDataContainer):
- x_b = BlockDataContainer(x)
- else:
- x_b = x
- shape = self.get_output_shape(x_b.shape, adjoint=True)
- res = []
- for row in range(self.shape[1]):
- for col in range(self.shape[0]):
- if col == 0:
- prod = self.get_item(row, col).adjoint(x_b.get_item(col))
- else:
- prod += self.get_item(row, col).adjoint(x_b.get_item(col))
- res.append(prod)
- if self.shape[1]==1:
- return ImageData(*res)
- else:
- return BlockDataContainer(*res, shape=shape)
-
- def get_output_shape(self, xshape, adjoint=False):
- sshape = self.shape[1]
- oshape = self.shape[0]
- if adjoint:
- sshape = self.shape[0]
- oshape = self.shape[1]
- if sshape != xshape[0]:
- raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape))
- return (oshape, xshape[-1])
-
- def __rmul__(self, scalar):
- '''Defines the left multiplication with a scalar
-
- Args: scalar (number or iterable containing numbers):
-
- Returns: a block operator with Scaled Operators inside'''
- if isinstance (scalar, list) or isinstance(scalar, tuple) or \
- isinstance(scalar, numpy.ndarray):
- if len(scalar) != len(self.operators):
- raise ValueError('dimensions of scalars and operators do not match')
- scalars = scalar
- else:
- scalars = [scalar for _ in self.operators]
- # create a list of ScaledOperator-s
- ops = [ v * op for v,op in zip(scalars, self.operators)]
- #return BlockScaledOperator(self, scalars ,shape=self.shape)
- return type(self)(*ops, shape=self.shape)
- @property
- def T(self):
- '''Return the transposed of self
-
- input in a row-by-row'''
- newshape = (self.shape[1], self.shape[0])
- oplist = []
- for col in range(newshape[1]):
- for row in range(newshape[0]):
- oplist.append(self.get_item(col,row))
- return type(self)(*oplist, shape=newshape)
-
- def domain_geometry(self):
- '''returns the domain of the BlockOperator
-
- If the shape of the BlockOperator is (N,1) the domain is a ImageGeometry or AcquisitionGeometry.
- Otherwise it is a BlockGeometry.
- '''
- if self.shape[1] == 1:
- # column BlockOperator
- return self.get_item(0,0).domain_geometry()
- else:
- shape = (self.shape[0], 1)
- return BlockGeometry(*[el.domain_geometry() for el in self.operators],
- shape=shape)
-
- def range_geometry(self):
- '''returns the range of the BlockOperator'''
- shape = (self.shape[1], 1)
- return BlockGeometry(*[el.range_geometry() for el in self.operators],
- shape=shape)
-if __name__ == '__main__':
- pass
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py
deleted file mode 100644
index aeb6c53..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from numbers import Number
-import numpy
-from ccpi.optimisation.operators import ScaledOperator
-import functools
-
-class BlockScaledOperator(ScaledOperator):
- '''ScaledOperator
-
- A class to represent the scalar multiplication of an Operator with a scalar.
- It holds an operator and a scalar. Basically it returns the multiplication
- of the result of direct and adjoint of the operator with the scalar.
- For the rest it behaves like the operator it holds.
-
- Args:
- operator (Operator): a Operator or LinearOperator
- scalar (Number): a scalar multiplier
- Example:
- The scaled operator behaves like the following:
- sop = ScaledOperator(operator, scalar)
- sop.direct(x) = scalar * operator.direct(x)
- sop.adjoint(x) = scalar * operator.adjoint(x)
- sop.norm() = operator.norm()
- sop.range_geometry() = operator.range_geometry()
- sop.domain_geometry() = operator.domain_geometry()
- '''
- def __init__(self, operator, scalar, shape=None):
- if shape is None:
- shape = operator.shape
-
- if isinstance(scalar, (list, tuple, numpy.ndarray)):
- size = functools.reduce(lambda x,y:x*y, shape, 1)
- if len(scalar) != size:
- raise ValueError('Scalar and operators size do not match: {}!={}'
- .format(len(scalar), len(operator)))
- self.scalar = scalar[:]
- print ("BlockScaledOperator ", self.scalar)
- elif isinstance (scalar, Number):
- self.scalar = scalar
- else:
- raise TypeError('expected scalar to be a number of an iterable: got {}'.format(type(scalar)))
- self.operator = operator
- self.shape = shape
- def direct(self, x, out=None):
- print ("BlockScaledOperator self.scalar", self.scalar)
- #print ("self.scalar", self.scalar[0]* x.get_item(0).as_array())
- return self.scalar * (self.operator.direct(x, out=out))
- def adjoint(self, x, out=None):
- if self.operator.is_linear():
- return self.scalar * self.operator.adjoint(x, out=out)
- else:
- raise TypeError('Operator is not linear')
- def norm(self):
- return numpy.abs(self.scalar) * self.operator.norm()
- def range_geometry(self):
- return self.operator.range_geometry()
- def domain_geometry(self):
- return self.operator.domain_geometry()
- @property
- def T(self):
- '''Return the transposed of self'''
- #print ("transpose before" , self.shape)
- #shape = (self.shape[1], self.shape[0])
- ##self.shape = shape
- ##self.operator.shape = shape
- #print ("transpose" , shape)
- #return self
- return type(self)(self.operator.T, self.scalar) \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py
deleted file mode 100644
index 24c4e4b..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ /dev/null
@@ -1,322 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Mar 1 22:51:17 2019
-
-@author: evangelos
-"""
-
-from ccpi.optimisation.operators import Operator
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.framework import ImageData, BlockDataContainer
-import numpy as np
-
-class FiniteDiff(Operator):
-
- # Works for Neum/Symmetric & periodic boundary conditions
- # TODO add central differences???
- # TODO not very well optimised, too many conditions
- # TODO add discretisation step, should get that from imageGeometry
-
- # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x']
- # Grad_order = ['channels', 'direction_y', 'direction_x']
- # Grad_order = ['direction_z', 'direction_y', 'direction_x']
- # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x']
-
- def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'):
- ''''''
- super(FiniteDiff, self).__init__()
- '''FIXME: domain and range should be geometries'''
- self.gm_domain = gm_domain
- self.gm_range = gm_range
- self.direction = direction
- self.bnd_cond = bnd_cond
-
- # Domain Geometry = Range Geometry if not stated
- if self.gm_range is None:
- self.gm_range = self.gm_domain
- # check direction and "length" of geometry
- if self.direction + 1 > len(self.gm_domain.shape):
- raise ValueError('Gradient directions more than geometry domain')
-
- #self.voxel_size = kwargs.get('voxel_size',1)
- # this wrongly assumes a homogeneous voxel size
- self.voxel_size = self.gm_domain.voxel_size_x
-
-
- def direct(self, x, out=None):
-
- x_asarr = x.as_array()
- x_sz = len(x.shape)
-
- if out is None:
- out = np.zeros(x.shape)
-
- fd_arr = out
-
- ######################## Direct for 2D ###############################
- if x_sz == 2:
-
- if self.direction == 1:
-
- np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 0:
-
- np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- ######################## Direct for 3D ###############################
- elif x_sz == 3:
-
- if self.direction == 0:
-
- np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 1:
-
- np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
-
- if self.direction == 2:
-
- np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] )
- else:
- raise ValueError('No valid boundary conditions')
-
- ######################## Direct for 4D ###############################
- elif x_sz == 4:
-
- if self.direction == 0:
- np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 1:
- np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 2:
- np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 3:
- np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] )
-
- if self.bnd_cond == 'Neumann':
- pass
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] )
- else:
- raise ValueError('No valid boundary conditions')
-
- else:
- raise NotImplementedError
-
- res = out/self.voxel_size
- return type(x)(res)
-
- def adjoint(self, x, out=None):
-
- x_asarr = x.as_array()
- #x_asarr = x
- x_sz = len(x.shape)
-
- if out is None:
- out = np.zeros(x.shape)
-
- fd_arr = out
-
- ######################## Adjoint for 2D ###############################
- if x_sz == 2:
-
- if self.direction == 1:
-
- np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] )
- np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] )
-
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] )
-
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 0:
-
- np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] )
- np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] )
-
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] )
-
- else:
- raise ValueError('No valid boundary conditions')
-
- ######################## Adjoint for 3D ###############################
- elif x_sz == 3:
-
- if self.direction == 0:
-
- np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] )
- np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] )
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 1:
- np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] )
- np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] )
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 2:
- np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] )
- np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] )
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] )
- else:
- raise ValueError('No valid boundary conditions')
-
- ######################## Adjoint for 4D ###############################
- elif x_sz == 4:
-
- if self.direction == 0:
- np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] )
- np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] )
-
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 1:
- np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] )
- np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] )
-
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
-
- if self.direction == 2:
- np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] )
- np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] )
-
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] )
- else:
- raise ValueError('No valid boundary conditions')
-
- if self.direction == 3:
- np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] )
-
- if self.bnd_cond == 'Neumann':
- np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] )
- np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] )
-
- elif self.bnd_cond == 'Periodic':
- np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] )
- else:
- raise ValueError('No valid boundary conditions')
-
- else:
- raise NotImplementedError
-
- res = out/self.voxel_size
- return type(x)(-res)
-
- def range_geometry(self):
- '''Returns the range geometry'''
- return self.gm_range
-
- def domain_geometry(self):
- '''Returns the domain geometry'''
- return self.gm_domain
-
- def norm(self):
- x0 = self.gm_domain.allocate()
- x0.fill( np.random.random_sample(x0.shape) )
- self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0)
- return self.s1
-
-
-
-
- \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py
deleted file mode 100644
index e00de0c..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Mar 1 22:50:04 2019
-
-@author: evangelos
-"""
-
-from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer
-import numpy
-from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff
-
-#%%
-
-class Gradient(LinearOperator):
-
- def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs):
-
- super(Gradient, self).__init__()
-
- self.gm_domain = gm_domain # Domain of Grad Operator
-
- self.correlation = kwargs.get('correlation','Space')
-
- if self.correlation=='Space':
- if self.gm_domain.channels>1:
- self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length-1)] )
- self.ind = numpy.arange(1,self.gm_domain.length)
- else:
- self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length) ] )
- self.ind = numpy.arange(self.gm_domain.length)
- elif self.correlation=='SpaceChannels':
- if self.gm_domain.channels>1:
- self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length)])
- self.ind = range(self.gm_domain.length)
- else:
- raise ValueError('No channels to correlate')
-
- self.bnd_cond = bnd_cond
-
-
- def direct(self, x, out=None):
-
- tmp = self.gm_range.allocate()
-
- for i in range(tmp.shape[0]):
- tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x))
- return tmp
-
- def adjoint(self, x, out=None):
-
- tmp = self.gm_domain.allocate()
- for i in range(x.shape[0]):
- tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i))
- return tmp
-
-
- def domain_geometry(self):
- return self.gm_domain
-
- def range_geometry(self):
- return self.gm_range
-
- def norm(self):
-
- x0 = self.gm_domain.allocate('random')
- self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0)
- return self.s1
-
- def __rmul__(self, scalar):
- return ScaledOperator(self, scalar)
-
-
- def matrix(self):
-
- tmp = self.gm_range.allocate()
-
- mat = []
- for i in range(tmp.shape[0]):
-
- spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond)
- mat.append(spMat.matrix())
-
- return BlockDataContainer(*mat)
-
-
- def sum_abs_row(self):
-
- tmp = self.gm_range.allocate()
- res = self.gm_domain.allocate()
- for i in range(tmp.shape[0]):
- spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond)
- res += spMat.sum_abs_row()
- return res
-
- def sum_abs_col(self):
-
- tmp = self.gm_range.allocate()
- res = []
- for i in range(tmp.shape[0]):
- spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond)
- res.append(spMat.sum_abs_col())
- return BlockDataContainer(*res)
-
-
-if __name__ == '__main__':
-
-
- from ccpi.optimisation.operators import Identity, BlockOperator
-
- M, N = 2, 3
- ig = ImageGeometry(M, N)
- arr = ig.allocate('random_int' )
-
- # check direct of Gradient and sparse matrix
- G = Gradient(ig)
- G_sp = G.matrix()
-
- res1 = G.direct(arr)
- res1y = numpy.reshape(G_sp[0].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F')
-
- print(res1[0].as_array())
- print(res1y)
-
- res1x = numpy.reshape(G_sp[1].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F')
-
- print(res1[1].as_array())
- print(res1x)
-
- #check sum abs row
- conc_spmat = numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() )))
- print(numpy.reshape(conc_spmat.sum(axis=0), ig.shape, 'F'))
- print(G.sum_abs_row().as_array())
-
- print(numpy.reshape(conc_spmat.sum(axis=1), ((2,) + ig.shape), 'F'))
-
- print(G.sum_abs_col()[0].as_array())
- print(G.sum_abs_col()[1].as_array())
-
- # Check Blockoperator sum abs col and row
-
- op1 = Gradient(ig)
- op2 = Identity(ig)
-
- B = BlockOperator( op1, op2)
-
- Brow = B.sum_abs_row()
- Bcol = B.sum_abs_col()
-
- concB = numpy.concatenate( (numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))), op2.matrix().toarray()))
-
- print(numpy.reshape(concB.sum(axis=0), ig.shape, 'F'))
- print(Brow.as_array())
-
- print(numpy.reshape(concB.sum(axis=1)[0:12], ((2,) + ig.shape), 'F'))
- print(Bcol[1].as_array())
-
-
-# print(numpy.concatene(G_sp[0].toarray()+ ))
-# print(G_sp[1].toarray())
-#
-# d1 = G.sum_abs_row()
-# print(d1.as_array())
-#
-# d2 = G_neum.sum_abs_col()
-## print(d2)
-#
-#
-# ###########################################################
- a = BlockDataContainer( BlockDataContainer(arr, arr), arr)
- b = BlockDataContainer( BlockDataContainer(arr+5, arr+3), arr+2)
- c = a/b
-
- print(c[0][0].as_array(), (arr/(arr+5)).as_array())
- print(c[0][1].as_array(), (arr/(arr+3)).as_array())
- print(c[1].as_array(), (arr/(arr+2)).as_array())
-
-
- a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr))
-#
-# c1 = arr + a
-# c2 = arr + a
-# c2 = a1 + arr
-#
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py
deleted file mode 100644
index a58a296..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Mar 6 19:30:51 2019
-
-@author: evangelos
-"""
-
-from ccpi.optimisation.operators import LinearOperator
-import scipy.sparse as sp
-import numpy as np
-from ccpi.framework import ImageData
-
-
-class Identity(LinearOperator):
-
- def __init__(self, gm_domain, gm_range=None):
-
- self.gm_domain = gm_domain
- self.gm_range = gm_range
- if self.gm_range is None:
- self.gm_range = self.gm_domain
-
- super(Identity, self).__init__()
-
- def direct(self,x,out=None):
- if out is None:
- return x.copy()
- else:
- out.fill(x)
-
- def adjoint(self,x, out=None):
- if out is None:
- return x.copy()
- else:
- out.fill(x)
-
- def norm(self):
- return 1.0
-
- def domain_geometry(self):
- return self.gm_domain
-
- def range_geometry(self):
- return self.gm_range
-
- def matrix(self):
-
- return sp.eye(np.prod(self.gm_domain.shape))
-
- def sum_abs_row(self):
-
- return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F')))
-
- def sum_abs_col(self):
-
- return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F')))
-
-
-if __name__ == '__main__':
-
- from ccpi.framework import ImageGeometry
-
- M, N = 2, 3
- ig = ImageGeometry(M, N)
- arr = ig.allocate('random_int')
-
- Id = Identity(ig)
- d = Id.matrix()
- print(d.toarray())
-
- d1 = Id.sum_abs_col()
- print(d1.as_array())
-
-
-
-
-
- \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py
deleted file mode 100644
index e19304f..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Mar 5 15:57:52 2019
-
-@author: ofn77899
-"""
-
-from ccpi.optimisation.operators import Operator
-
-
-class LinearOperator(Operator):
- '''A Linear Operator that maps from a space X <-> Y'''
- def __init__(self):
- super(LinearOperator, self).__init__()
- def is_linear(self):
- '''Returns if the operator is linear'''
- return True
- def adjoint(self,x, out=None):
- '''returns the adjoint/inverse operation
-
- only available to linear operators'''
- raise NotImplementedError
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py
deleted file mode 100644
index 2d2089b..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Mar 5 15:55:56 2019
-
-@author: ofn77899
-"""
-from ccpi.optimisation.operators.ScaledOperator import ScaledOperator
-
-class Operator(object):
- '''Operator that maps from a space X -> Y'''
- def is_linear(self):
- '''Returns if the operator is linear'''
- return False
- def direct(self,x, out=None):
- '''Returns the application of the Operator on x'''
- raise NotImplementedError
- def norm(self):
- '''Returns the norm of the Operator'''
- raise NotImplementedError
- def range_geometry(self):
- '''Returns the range of the Operator: Y space'''
- raise NotImplementedError
- def domain_geometry(self):
- '''Returns the domain of the Operator: X space'''
- raise NotImplementedError
- def __rmul__(self, scalar):
- '''Defines the multiplication by a scalar on the left
-
- returns a ScaledOperator'''
- return ScaledOperator(self, scalar)
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py
deleted file mode 100644
index adcc6d9..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from numbers import Number
-import numpy
-
-class ScaledOperator(object):
- '''ScaledOperator
-
- A class to represent the scalar multiplication of an Operator with a scalar.
- It holds an operator and a scalar. Basically it returns the multiplication
- of the result of direct and adjoint of the operator with the scalar.
- For the rest it behaves like the operator it holds.
-
- Args:
- operator (Operator): a Operator or LinearOperator
- scalar (Number): a scalar multiplier
- Example:
- The scaled operator behaves like the following:
- sop = ScaledOperator(operator, scalar)
- sop.direct(x) = scalar * operator.direct(x)
- sop.adjoint(x) = scalar * operator.adjoint(x)
- sop.norm() = operator.norm()
- sop.range_geometry() = operator.range_geometry()
- sop.domain_geometry() = operator.domain_geometry()
- '''
- def __init__(self, operator, scalar):
- super(ScaledOperator, self).__init__()
- if not isinstance (scalar, Number):
- raise TypeError('expected scalar: got {}'.format(type(scalar)))
- self.scalar = scalar
- self.operator = operator
- def direct(self, x, out=None):
- return self.scalar * self.operator.direct(x, out=out)
- def adjoint(self, x, out=None):
- if self.operator.is_linear():
- return self.scalar * self.operator.adjoint(x, out=out)
- else:
- raise TypeError('Operator is not linear')
- def norm(self):
- return numpy.abs(self.scalar) * self.operator.norm()
- def range_geometry(self):
- return self.operator.range_geometry()
- def domain_geometry(self):
- return self.operator.domain_geometry()
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py
deleted file mode 100644
index 1b88cba..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Apr 2 14:06:15 2019
-
-@author: vaggelis
-"""
-
-import scipy.sparse as sp
-import numpy as np
-from ccpi.framework import ImageData
-
-class SparseFiniteDiff():
-
- def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'):
-
- super(SparseFiniteDiff, self).__init__()
- self.gm_domain = gm_domain
- self.gm_range = gm_range
- self.direction = direction
- self.bnd_cond = bnd_cond
-
- if self.gm_range is None:
- self.gm_range = self.gm_domain
-
- self.get_dims = [i for i in gm_domain.shape]
-
- if self.direction + 1 > len(self.gm_domain.shape):
- raise ValueError('Gradient directions more than geometry domain')
-
- def matrix(self):
-
- i = self.direction
-
- mat = sp.spdiags(np.vstack([-np.ones((1,self.get_dims[i])),np.ones((1,self.get_dims[i]))]), [0,1], self.get_dims[i], self.get_dims[i], format = 'lil')
-
- if self.bnd_cond == 'Neumann':
- mat[-1,:] = 0
- elif self.bnd_cond == 'Periodic':
- mat[-1,0] = 1
-
- tmpGrad = mat if i == 0 else sp.eye(self.get_dims[0])
-
- for j in range(1, self.gm_domain.length):
-
- tmpGrad = sp.kron(mat, tmpGrad ) if j == i else sp.kron(sp.eye(self.get_dims[j]), tmpGrad )
-
- return tmpGrad
-
- def T(self):
- return self.matrix().T
-
- def direct(self, x):
-
- x_asarr = x.as_array()
- res = np.reshape( self.matrix() * x_asarr.flatten('F'), self.gm_domain.shape, 'F')
- return type(x)(res)
-
- def adjoint(self, x):
-
- x_asarr = x.as_array()
- res = np.reshape( self.matrix().T * x_asarr.flatten('F'), self.gm_domain.shape, 'F')
- return type(x)(res)
-
- def sum_abs_row(self):
-
- res = np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))
- res[res==0]=1
- return ImageData(res)
-
- def sum_abs_col(self):
-
- res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C'))
- res[res==0]=1
- return ImageData(res)
-
-if __name__ == '__main__':
-
- from ccpi.framework import ImageGeometry
- from ccpi.optimisation.operators import FiniteDiff
-
- # 2D
- M, N= 2, 3
- ig = ImageGeometry(M, N)
- arr = ig.allocate('random_int')
-
- for i in [0,1]:
-
- # Neumann
- sFD_neum = SparseFiniteDiff(ig, direction=i, bnd_cond='Neumann')
- G_neum = FiniteDiff(ig, direction=i, bnd_cond='Neumann')
-
- # Periodic
- sFD_per = SparseFiniteDiff(ig, direction=i, bnd_cond='Periodic')
- G_per = FiniteDiff(ig, direction=i, bnd_cond='Periodic')
-
- u_neum_direct = G_neum.direct(arr)
- u_neum_sp_direct = sFD_neum.direct(arr)
- np.testing.assert_array_almost_equal(u_neum_direct.as_array(), u_neum_sp_direct.as_array(), decimal=4)
-
- u_neum_adjoint = G_neum.adjoint(arr)
- u_neum_sp_adjoint = sFD_neum.adjoint(arr)
- np.testing.assert_array_almost_equal(u_neum_adjoint.as_array(), u_neum_sp_adjoint.as_array(), decimal=4)
-
- u_per_direct = G_neum.direct(arr)
- u_per_sp_direct = sFD_neum.direct(arr)
- np.testing.assert_array_almost_equal(u_per_direct.as_array(), u_per_sp_direct.as_array(), decimal=4)
-
- u_per_adjoint = G_per.adjoint(arr)
- u_per_sp_adjoint = sFD_per.adjoint(arr)
- np.testing.assert_array_almost_equal(u_per_adjoint.as_array(), u_per_sp_adjoint.as_array(), decimal=4)
-
- # 3D
- M, N, K = 2, 3, 4
- ig3D = ImageGeometry(M, N, K)
- arr3D = ig3D.allocate('random_int')
-
- for i in [0,1,2]:
-
- # Neumann
- sFD_neum3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Neumann')
- G_neum3D = FiniteDiff(ig3D, direction=i, bnd_cond='Neumann')
-
- # Periodic
- sFD_per3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Periodic')
- G_per3D = FiniteDiff(ig3D, direction=i, bnd_cond='Periodic')
-
- u_neum_direct3D = G_neum3D.direct(arr3D)
- u_neum_sp_direct3D = sFD_neum3D.direct(arr3D)
- np.testing.assert_array_almost_equal(u_neum_direct3D.as_array(), u_neum_sp_direct3D.as_array(), decimal=4)
-
- u_neum_adjoint3D = G_neum3D.adjoint(arr3D)
- u_neum_sp_adjoint3D = sFD_neum3D.adjoint(arr3D)
- np.testing.assert_array_almost_equal(u_neum_adjoint3D.as_array(), u_neum_sp_adjoint3D.as_array(), decimal=4)
-
- u_per_direct3D = G_neum3D.direct(arr3D)
- u_per_sp_direct3D = sFD_neum3D.direct(arr3D)
- np.testing.assert_array_almost_equal(u_per_direct3D.as_array(), u_per_sp_direct3D.as_array(), decimal=4)
-
- u_per_adjoint3D = G_per3D.adjoint(arr3D)
- u_per_sp_adjoint3D = sFD_per3D.adjoint(arr3D)
- np.testing.assert_array_almost_equal(u_per_adjoint3D.as_array(), u_per_sp_adjoint3D.as_array(), decimal=4)
-
- \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
deleted file mode 100644
index d908e49..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Mar 1 22:53:55 2019
-
-@author: evangelos
-"""
-
-from ccpi.optimisation.operators import Operator
-from ccpi.optimisation.operators import FiniteDiff
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.framework import ImageData, DataContainer
-import numpy as np
-
-
-class SymmetrizedGradient(Operator):
-
- def __init__(self, gm_domain, gm_range, bnd_cond = 'Neumann', **kwargs):
-
- super(SymmetrizedGradient, self).__init__()
-
- self.gm_domain = gm_domain # Domain of Grad Operator
- self.gm_range = gm_range # Range of Grad Operator
- self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences
-
- # Kwargs Default options
- self.memopt = kwargs.get('memopt',False)
- self.correlation = kwargs.get('correlation','Space')
-
- #TODO not tested yet, operator norm???
- self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain))
-
-
- def direct(self, x, out=None):
-
- tmp = np.zeros(self.gm_range)
- tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0])
- tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1])
- tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) +
- FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) )
-
- return type(x)(tmp)
-
-
- def adjoint(self, x, out=None):
-
- tmp = np.zeros(self.gm_domain)
-
- tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \
- FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2])
-
- tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \
- FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1])
-
- return type(x)(tmp)
-
- def alloc_domain_dim(self):
- return ImageData(np.zeros(self.gm_domain))
-
- def alloc_range_dim(self):
- return ImageData(np.zeros(self.range_dim))
-
- def domain_dim(self):
- return self.gm_domain
-
- def range_dim(self):
- return self.gm_range
-
- def norm(self):
-# return np.sqrt(4*len(self.domainDim()))
- #TODO this takes time for big ImageData
- # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12)
- x0 = ImageData(np.random.random_sample(self.domain_dim()))
- self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0)
- return self.s1
-
-
-
-if __name__ == '__main__':
-
- ###########################################################################
- ## Symmetrized Gradient
-
- N, M = 2, 3
- ig = (N,M)
- ig2 = (2,) + ig
- ig3 = (3,) + ig
- u1 = DataContainer(np.random.randint(10, size=ig2))
- w1 = DataContainer(np.random.randint(10, size=ig3))
-
- E = SymmetrizedGradient(ig2,ig3)
-
- d1 = E.direct(u1)
- d2 = E.adjoint(w1)
-
- LHS = (d1.as_array()[0]*w1.as_array()[0] + \
- d1.as_array()[1]*w1.as_array()[1] + \
- 2*d1.as_array()[2]*w1.as_array()[2]).sum()
-
- RHS = (u1.as_array()[0]*d2.as_array()[0] + \
- u1.as_array()[1]*d2.as_array()[1]).sum()
-
-
- print(LHS, RHS, E.norm())
-
-
-#
-
-
-
-
-
-
-
-
-
-
- \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py
deleted file mode 100644
index a7c5f09..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Mar 6 19:25:53 2019
-
-@author: evangelos
-"""
-
-import numpy as np
-from ccpi.framework import ImageData
-from ccpi.optimisation.operators import Operator
-
-class ZeroOp(Operator):
-
- def __init__(self, gm_domain, gm_range):
- self.gm_domain = gm_domain
- self.gm_range = gm_range
- super(ZeroOp, self).__init__()
-
- def direct(self,x,out=None):
- if out is None:
- return ImageData(np.zeros(self.gm_range))
- else:
- return ImageData(np.zeros(self.gm_range))
-
- def adjoint(self,x, out=None):
- if out is None:
- return ImageData(np.zeros(self.gm_domain))
- else:
- return ImageData(np.zeros(self.gm_domain))
-
- def norm(self):
- return 0
-
- def domain_dim(self):
- return self.gm_domain
-
- def range_dim(self):
- return self.gm_range \ No newline at end of file
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py
deleted file mode 100644
index 1c09faf..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Mar 5 15:56:27 2019
-
-@author: ofn77899
-"""
-
-from .Operator import Operator
-from .LinearOperator import LinearOperator
-from .ScaledOperator import ScaledOperator
-from .BlockOperator import BlockOperator
-from .BlockScaledOperator import BlockScaledOperator
-
-
-from .FiniteDifferenceOperator import FiniteDiff
-from .GradientOperator import Gradient
-from .SymmetrizedGradientOperator import SymmetrizedGradient
-from .IdentityOperator import Identity
-from .ZeroOperator import ZeroOp
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/ops.py b/Wrappers/Python/build/lib/ccpi/optimisation/ops.py
deleted file mode 100644
index 6afb97a..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/ops.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy
-from scipy.sparse.linalg import svds
-from ccpi.framework import DataContainer
-from ccpi.framework import AcquisitionData
-from ccpi.framework import ImageData
-from ccpi.framework import ImageGeometry
-from ccpi.framework import AcquisitionGeometry
-from numbers import Number
-# Maybe operators need to know what types they take as inputs/outputs
-# to not just use generic DataContainer
-
-
-class Operator(object):
- '''Operator that maps from a space X -> Y'''
- def __init__(self, **kwargs):
- self.scalar = 1
- def is_linear(self):
- '''Returns if the operator is linear'''
- return False
- def direct(self,x, out=None):
- raise NotImplementedError
- def size(self):
- # To be defined for specific class
- raise NotImplementedError
- def norm(self):
- raise NotImplementedError
- def allocate_direct(self):
- '''Allocates memory on the Y space'''
- raise NotImplementedError
- def allocate_adjoint(self):
- '''Allocates memory on the X space'''
- raise NotImplementedError
- def range_geometry(self):
- raise NotImplementedError
- def domain_geometry(self):
- raise NotImplementedError
- def __rmul__(self, other):
- '''reverse multiplication of Operator with number sets the variable scalar in the Operator'''
- assert isinstance(other, Number)
- self.scalar = other
- return self
-
-class LinearOperator(Operator):
- '''Operator that maps from a space X -> Y'''
- def is_linear(self):
- '''Returns if the operator is linear'''
- return True
- def adjoint(self,x, out=None):
- raise NotImplementedError
-
-class Identity(Operator):
- def __init__(self):
- self.s1 = 1.0
- self.L = 1
- super(Identity, self).__init__()
-
- def direct(self,x,out=None):
- if out is None:
- return x.copy()
- else:
- out.fill(x)
-
- def adjoint(self,x, out=None):
- if out is None:
- return x.copy()
- else:
- out.fill(x)
-
- def size(self):
- return NotImplemented
-
- def get_max_sing_val(self):
- return self.s1
-
-class TomoIdentity(Operator):
- def __init__(self, geometry, **kwargs):
- super(TomoIdentity, self).__init__()
- self.s1 = 1.0
- self.geometry = geometry
-
- def is_linear(self):
- return True
- def direct(self,x,out=None):
-
- if out is None:
- if self.scalar != 1:
- return x * self.scalar
- return x.copy()
- else:
- if self.scalar != 1:
- out.fill(x * self.scalar)
- return
- out.fill(x)
- return
-
- def adjoint(self,x, out=None):
- return self.direct(x, out)
-
- def size(self):
- return NotImplemented
-
- def get_max_sing_val(self):
- return self.s1
- def allocate_direct(self):
- if issubclass(type(self.geometry), ImageGeometry):
- return ImageData(geometry=self.geometry)
- elif issubclass(type(self.geometry), AcquisitionGeometry):
- return AcquisitionData(geometry=self.geometry)
- else:
- raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry))
- def allocate_adjoint(self):
- return self.allocate_direct()
- def range_geometry(self):
- return self.geometry
- def domain_geometry(self):
- return self.geometry
-
-
-
-class FiniteDiff2D(Operator):
- def __init__(self):
- self.s1 = 8.0
- super(FiniteDiff2D, self).__init__()
-
- def direct(self,x, out=None):
- '''Forward differences with Neumann BC.'''
- # FIXME this seems to be working only with numpy arrays
-
- d1 = numpy.zeros_like(x.as_array())
- d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1]
- d2 = numpy.zeros_like(x.as_array())
- d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:]
- d = numpy.stack((d1,d2),0)
- #x.geometry.voxel_num_z = 2
- return type(x)(d,False,geometry=x.geometry)
-
- def adjoint(self,x, out=None):
- '''Backward differences, Neumann BC.'''
- Nrows = x.get_dimension_size('horizontal_x')
- Ncols = x.get_dimension_size('horizontal_y')
- Nchannels = 1
- if len(x.shape) == 4:
- Nchannels = x.get_dimension_size('channel')
- zer = numpy.zeros((Nrows,1))
- xxx = x.as_array()[0,:,:-1]
- #
- h = numpy.concatenate((zer,xxx), 1)
- h -= numpy.concatenate((xxx,zer), 1)
-
- zer = numpy.zeros((1,Ncols))
- xxx = x.as_array()[1,:-1,:]
- #
- v = numpy.concatenate((zer,xxx), 0)
- v -= numpy.concatenate((xxx,zer), 0)
- return type(x)(h + v, False, geometry=x.geometry)
-
- def size(self):
- return NotImplemented
-
- def get_max_sing_val(self):
- return self.s1
-
-def PowerMethodNonsquareOld(op,numiters):
- # Initialise random
- # Jakob's
- #inputsize = op.size()[1]
- #x0 = ImageContainer(numpy.random.randn(*inputsize)
- # Edo's
- #vg = ImageGeometry(voxel_num_x=inputsize[0],
- # voxel_num_y=inputsize[1],
- # voxel_num_z=inputsize[2])
- #
- #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x'])
- #print (x0)
- #x0.fill(numpy.random.randn(*x0.shape))
-
- x0 = op.create_image_data()
-
- s = numpy.zeros(numiters)
- # Loop
- for it in numpy.arange(numiters):
- x1 = op.adjoint(op.direct(x0))
- x1norm = numpy.sqrt((x1**2).sum())
- #print ("x0 **********" ,x0)
- #print ("x1 **********" ,x1)
- s[it] = (x1*x0).sum() / (x0*x0).sum()
- x0 = (1.0/x1norm)*x1
- return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
-
-#def PowerMethod(op,numiters):
-# # Initialise random
-# x0 = np.random.randn(400)
-# s = np.zeros(numiters)
-# # Loop
-# for it in np.arange(numiters):
-# x1 = np.dot(op.transpose(),np.dot(op,x0))
-# x1norm = np.sqrt(np.sum(np.dot(x1,x1)))
-# s[it] = np.dot(x1,x0) / np.dot(x1,x0)
-# x0 = (1.0/x1norm)*x1
-# return s, x0
-
-
-def PowerMethodNonsquare(op,numiters , x0=None):
- # Initialise random
- # Jakob's
- # inputsize , outputsize = op.size()
- #x0 = ImageContainer(numpy.random.randn(*inputsize)
- # Edo's
- #vg = ImageGeometry(voxel_num_x=inputsize[0],
- # voxel_num_y=inputsize[1],
- # voxel_num_z=inputsize[2])
- #
- #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x'])
- #print (x0)
- #x0.fill(numpy.random.randn(*x0.shape))
-
- if x0 is None:
- #x0 = op.create_image_data()
- x0 = op.allocate_direct()
- x0.fill(numpy.random.randn(*x0.shape))
-
- s = numpy.zeros(numiters)
- # Loop
- for it in numpy.arange(numiters):
- x1 = op.adjoint(op.direct(x0))
- #x1norm = numpy.sqrt((x1*x1).sum())
- x1norm = x1.norm()
- #print ("x0 **********" ,x0)
- #print ("x1 **********" ,x1)
- s[it] = (x1*x0).sum() / (x0.squared_norm())
- x0 = (1.0/x1norm)*x1
- return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
-
-class LinearOperatorMatrix(Operator):
- def __init__(self,A):
- self.A = A
- self.s1 = None # Largest singular value, initially unknown
- super(LinearOperatorMatrix, self).__init__()
-
- def direct(self,x, out=None):
- if out is None:
- return type(x)(numpy.dot(self.A,x.as_array()))
- else:
- numpy.dot(self.A, x.as_array(), out=out.as_array())
-
-
- def adjoint(self,x, out=None):
- if out is None:
- return type(x)(numpy.dot(self.A.transpose(),x.as_array()))
- else:
- numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array())
-
-
- def size(self):
- return self.A.shape
-
- def get_max_sing_val(self):
- # If unknown, compute and store. If known, simply return it.
- if self.s1 is None:
- self.s1 = svds(self.A,1,return_singular_vectors=False)[0]
- return self.s1
- else:
- return self.s1
- def allocate_direct(self):
- '''allocates the memory to hold the result of adjoint'''
- #numpy.dot(self.A.transpose(),x.as_array())
- M_A, N_A = self.A.shape
- out = numpy.zeros((N_A,1))
- return DataContainer(out)
- def allocate_adjoint(self):
- '''allocate the memory to hold the result of direct'''
- #numpy.dot(self.A.transpose(),x.as_array())
- M_A, N_A = self.A.shape
- out = numpy.zeros((M_A,1))
- return DataContainer(out)
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py b/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py
deleted file mode 100644
index 263a7cd..0000000
--- a/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# Copyright 2018 Matthias Ehrhardt, Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import numpy
-
-from ccpi.optimisation.funcs import Function
-from ccpi.framework import ImageData
-from ccpi.framework import AcquisitionData
-
-
-class spdhg():
- """Computes a saddle point with a stochastic PDHG.
-
- This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that
-
- (x*, y*) in arg min_x max_y sum_i=1^n <y_i, A_i> - f*[i](y_i) + g(x)
-
- where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and
- proper functionals. For this algorithm, they all may be non-smooth and no
- strong convexity is assumed.
-
- Parameters
- ----------
- f : list of functions
- Functionals Y[i] -> IR_infty that all have a convex conjugate with a
- proximal operator, i.e.
- f[i].convex_conj.prox(sigma[i]) : Y[i] -> Y[i].
- g : function
- Functional X -> IR_infty that has a proximal operator, i.e.
- g.prox(tau) : X -> X.
- A : list of functions
- Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint
- x : primal variable, optional
- By default equals 0.
- y : dual variable, optional
- Part of a product space. By default equals 0.
- z : variable, optional
- Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0.
- tau : scalar / vector / matrix, optional
- Step size for primal variable. Note that the proximal operator of g
- has to be well-defined for this input.
- sigma : scalar, optional
- Scalar / vector / matrix used as step size for dual variable. Note that
- the proximal operator related to f (see above) has to be well-defined
- for this input.
- prob : list of scalars, optional
- Probabilities prob[i] that a subset i is selected in each iteration.
- If fun_select is not given, then the sum of all probabilities must
- equal 1.
- A_norms : list of scalars, optional
- Norms of the operators in A. Can be used to determine the step sizes
- tau and sigma and the probabilities prob.
- fun_select : function, optional
- Function that selects blocks at every iteration IN -> {1,...,n}. By
- default this is serial sampling, fun_select(k) selects an index
- i \in {1,...,n} with probability prob[i].
-
- References
- ----------
- [CERS2018] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
- *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
- and Imaging Applications*. SIAM Journal on Optimization, 28(4), 2783-2808
- (2018) http://doi.org/10.1007/s10851-010-0251-1
-
- [E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott,
- A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a
- stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII,
- 58 (2017) http://doi.org/10.1117/12.2272946.
-
- [EMS2018] M. J. Ehrhardt, P. J. Markiewicz and C.-B. Schoenlieb, *Faster
- PET Reconstruction with Non-Smooth Priors by Randomization and
- Preconditioning*. (2018) ArXiv: http://arxiv.org/abs/1808.07150
- """
-
- def __init__(self, f, g, A, x=None, y=None, z=None, tau=None, sigma=None,
- prob=None, A_norms=None, fun_select=None):
- # fun_select is optional and by default performs serial sampling
-
- if x is None:
- x = A[0].allocate_direct(0)
-
- if y is None:
- if z is not None:
- raise ValueError('y and z have to be defaulted together')
-
- y = [Ai.allocate_adjoint(0) for Ai in A]
- z = 0 * x.copy()
-
- else:
- if z is None:
- raise ValueError('y and z have to be defaulted together')
-
- if A_norms is not None:
- if tau is not None or sigma is not None or prob is not None:
- raise ValueError('Either A_norms or (tau, sigma, prob) must '
- 'be given')
-
- tau = 1 / sum(A_norms)
- sigma = [1 / nA for nA in A_norms]
- prob = [nA / sum(A_norms) for nA in A_norms]
-
- #uniform prob, needs different sigma and tau
- #n = len(A)
- #prob = [1./n] * n
-
- if fun_select is None:
- if prob is None:
- raise ValueError('prob was not determined')
-
- def fun_select(k):
- return [int(numpy.random.choice(len(A), 1, p=prob))]
-
- self.iter = 0
- self.x = x
-
- self.y = y
- self.z = z
-
- self.f = f
- self.g = g
- self.A = A
- self.tau = tau
- self.sigma = sigma
- self.prob = prob
- self.fun_select = fun_select
-
- # Initialize variables
- self.z_relax = z.copy()
- self.tmp = self.x.copy()
-
- def update(self):
- # select block
- selected = self.fun_select(self.iter)
-
- # update primal variable
- #tmp = (self.x - self.tau * self.z_relax).as_array()
- #self.x.fill(self.g.prox(tmp, self.tau))
- self.tmp = - self.tau * self.z_relax
- self.tmp += self.x
- self.x = self.g.prox(self.tmp, self.tau)
-
- # update dual variable and z, z_relax
- self.z_relax = self.z.copy()
- for i in selected:
- # save old yi
- y_old = self.y[i].copy()
-
- # y[i]= prox(tmp)
- tmp = y_old + self.sigma[i] * self.A[i].direct(self.x)
- self.y[i] = self.f[i].convex_conj.prox(tmp, self.sigma[i])
-
- # update adjoint of dual variable
- dz = self.A[i].adjoint(self.y[i] - y_old)
- self.z += dz
-
- # compute extrapolation
- self.z_relax += (1 + 1 / self.prob[i]) * dz
-
- self.iter += 1
-
-
-## Functions
-
-class KullbackLeibler(Function):
- def __init__(self, data, background):
- self.data = data
- self.background = background
- self.__offset = None
-
- def __call__(self, x):
- """Return the KL-diveregnce in the point ``x``.
-
- If any components of ``x`` is non-positive, the value is positive
- infinity.
-
- Needs one extra array of memory of the size of `prior`.
- """
-
- # define short variable names
- y = self.data
- r = self.background
-
- # Compute
- # sum(x + r - y + y * log(y / (x + r)))
- # = sum(x - y * log(x + r)) + self.offset
- # Assume that
- # x + r > 0
-
- # sum the result up
- obj = numpy.sum(x - y * numpy.log(x + r)) + self.offset()
-
- if numpy.isnan(obj):
- # In this case, some element was less than or equal to zero
- return numpy.inf
- else:
- return obj
-
- @property
- def convex_conj(self):
- """The convex conjugate functional of the KL-functional."""
- return KullbackLeiblerConvexConjugate(self.data, self.background)
-
- def offset(self):
- """The offset which is independent of the unknown."""
-
- if self.__offset is None:
- tmp = self.domain.element()
-
- # define short variable names
- y = self.data
- r = self.background
-
- tmp = self.domain.element(numpy.maximum(y, 1))
- tmp = r - y + y * numpy.log(tmp)
-
- # sum the result up
- self.__offset = numpy.sum(tmp)
-
- return self.__offset
-
-# def __repr__(self):
-# """to be added???"""
-# """Return ``repr(self)``."""
- # return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,
- ## self.domain, self.data,
- # self.background)
-
-
-class KullbackLeiblerConvexConjugate(Function):
- """The convex conjugate of Kullback-Leibler divergence functional.
-
- Notes
- -----
- The functional :math:`F^*` with prior :math:`g>0` is given by:
-
- .. math::
- F^*(x)
- =
- \\begin{cases}
- \\sum_{i} \left( -g_i \ln(1 - x_i) \\right)
- & \\text{if } x_i < 1 \\forall i
- \\\\
- +\\infty & \\text{else}
- \\end{cases}
-
- See Also
- --------
- KullbackLeibler : convex conjugate functional
- """
-
- def __init__(self, data, background):
- self.data = data
- self.background = background
-
- def __call__(self, x):
- y = self.data
- r = self.background
-
- tmp = numpy.sum(- x * r - y * numpy.log(1 - x))
-
- if numpy.isnan(tmp):
- # In this case, some element was larger than or equal to one
- return numpy.inf
- else:
- return tmp
-
-
- def prox(self, x, tau, out=None):
- # Let y = data, r = background, z = x + tau * r
- # Compute 0.5 * (z + 1 - sqrt((z - 1)**2 + 4 * tau * y))
- # Currently it needs 3 extra copies of memory.
-
- if out is None:
- out = x.copy()
-
- # define short variable names
- try: # this should be standard SIRF/CIL mode
- y = self.data.as_array()
- r = self.background.as_array()
- x = x.as_array()
-
- try:
- taua = tau.as_array()
- except:
- taua = tau
-
- z = x + tau * r
-
- out.fill(0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y)))
-
- return out
-
- except: # e.g. for NumPy
- y = self.data
- r = self.background
-
- try:
- taua = tau.as_array()
- except:
- taua = tau
-
- z = x + tau * r
-
- out[:] = 0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y))
-
- return out
-
- @property
- def convex_conj(self):
- return KullbackLeibler(self.data, self.background)
-
-
-def mult(x, y):
- try:
- xa = x.as_array()
- except:
- xa = x
-
- out = y.clone()
- out.fill(xa * y.as_array())
-
- return out
diff --git a/Wrappers/Python/build/lib/ccpi/processors.py b/Wrappers/Python/build/lib/ccpi/processors.py
deleted file mode 100644
index ccef410..0000000
--- a/Wrappers/Python/build/lib/ccpi/processors.py
+++ /dev/null
@@ -1,514 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\
- AcquisitionGeometry, ImageGeometry, ImageData
-from ccpi.reconstruction.parallelbeam import alg as pbalg
-import numpy
-from scipy import ndimage
-
-import matplotlib.pyplot as plt
-
-
-class Normalizer(DataProcessor):
- '''Normalization based on flat and dark
-
- This processor read in a AcquisitionData and normalises it based on
- the instrument reading with and without incident photons or neutrons.
-
- Input: AcquisitionData
- Parameter: 2D projection with flat field (or stack)
- 2D projection with dark field (or stack)
- Output: AcquisitionDataSetn
- '''
-
- def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5):
- kwargs = {
- 'flat_field' : flat_field,
- 'dark_field' : dark_field,
- # very small number. Used when there is a division by zero
- 'tolerance' : tolerance
- }
-
- #DataProcessor.__init__(self, **kwargs)
- super(Normalizer, self).__init__(**kwargs)
- if not flat_field is None:
- self.set_flat_field(flat_field)
- if not dark_field is None:
- self.set_dark_field(dark_field)
-
- def check_input(self, dataset):
- if dataset.number_of_dimensions == 3 or\
- dataset.number_of_dimensions == 2:
- return True
- else:
- raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
- .format(dataset.number_of_dimensions))
-
- def set_dark_field(self, df):
- if type(df) is numpy.ndarray:
- if len(numpy.shape(df)) == 3:
- raise ValueError('Dark Field should be 2D')
- elif len(numpy.shape(df)) == 2:
- self.dark_field = df
- elif issubclass(type(df), DataContainer):
- self.dark_field = self.set_dark_field(df.as_array())
-
- def set_flat_field(self, df):
- if type(df) is numpy.ndarray:
- if len(numpy.shape(df)) == 3:
- raise ValueError('Flat Field should be 2D')
- elif len(numpy.shape(df)) == 2:
- self.flat_field = df
- elif issubclass(type(df), DataContainer):
- self.flat_field = self.set_flat_field(df.as_array())
-
- @staticmethod
- def normalize_projection(projection, flat, dark, tolerance):
- a = (projection - dark)
- b = (flat-dark)
- with numpy.errstate(divide='ignore', invalid='ignore'):
- c = numpy.true_divide( a, b )
- c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0
- return c
-
- @staticmethod
- def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark):
- '''returns the estimated relative error of the normalised projection
-
- n = (projection - dark) / (flat - dark)
- Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d)
- '''
- a = (projection - dark)
- b = (flat-dark)
- df = delta_flat / flat
- dd = delta_dark / dark
- rel_norm_error = (b + a) / (b * a) * (df + dd)
- return rel_norm_error
-
- def process(self, out=None):
-
- projections = self.get_input()
- dark = self.dark_field
- flat = self.flat_field
-
- if projections.number_of_dimensions == 3:
- if not (projections.shape[1:] == dark.shape and \
- projections.shape[1:] == flat.shape):
- raise ValueError('Flats/Dark and projections size do not match.')
-
-
- a = numpy.asarray(
- [ Normalizer.normalize_projection(
- projection, flat, dark, self.tolerance) \
- for projection in projections.as_array() ]
- )
- elif projections.number_of_dimensions == 2:
- a = Normalizer.normalize_projection(projections.as_array(),
- flat, dark, self.tolerance)
- y = type(projections)( a , True,
- dimension_labels=projections.dimension_labels,
- geometry=projections.geometry)
- return y
-
-
-class CenterOfRotationFinder(DataProcessor):
- '''Processor to find the center of rotation in a parallel beam experiment
-
- This processor read in a AcquisitionDataSet and finds the center of rotation
- based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078
-
- Input: AcquisitionDataSet
-
- Output: float. center of rotation in pixel coordinate
- '''
-
- def __init__(self):
- kwargs = {
-
- }
-
- #DataProcessor.__init__(self, **kwargs)
- super(CenterOfRotationFinder, self).__init__(**kwargs)
-
- def check_input(self, dataset):
- if dataset.number_of_dimensions == 3:
- if dataset.geometry.geom_type == 'parallel':
- return True
- else:
- raise ValueError('{0} is suitable only for parallel beam geometry'\
- .format(self.__class__.__name__))
- else:
- raise ValueError("Expected input dimensions is 3, got {0}"\
- .format(dataset.number_of_dimensions))
-
-
- # #########################################################################
- # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #
- # #
- # Copyright 2015. UChicago Argonne, LLC. This software was produced #
- # under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
- # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
- # U.S. Department of Energy. The U.S. Government has rights to use, #
- # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
- # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
- # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
- # modified to produce derivative works, such modified software should #
- # be clearly marked, so as not to confuse it with the version available #
- # from ANL. #
- # #
- # Additionally, redistribution and use in source and binary forms, with #
- # or without modification, are permitted provided that the following #
- # conditions are met: #
- # #
- # * Redistributions of source code must retain the above copyright #
- # notice, this list of conditions and the following disclaimer. #
- # #
- # * Redistributions in binary form must reproduce the above copyright #
- # notice, this list of conditions and the following disclaimer in #
- # the documentation and/or other materials provided with the #
- # distribution. #
- # #
- # * Neither the name of UChicago Argonne, LLC, Argonne National #
- # Laboratory, ANL, the U.S. Government, nor the names of its #
- # contributors may be used to endorse or promote products derived #
- # from this software without specific prior written permission. #
- # #
- # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
- # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
- # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
- # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
- # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
- # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
- # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
- # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
- # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
- # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
- # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
- # POSSIBILITY OF SUCH DAMAGE. #
- # #########################################################################
-
- @staticmethod
- def as_ndarray(arr, dtype=None, copy=False):
- if not isinstance(arr, numpy.ndarray):
- arr = numpy.array(arr, dtype=dtype, copy=copy)
- return arr
-
- @staticmethod
- def as_dtype(arr, dtype, copy=False):
- if not arr.dtype == dtype:
- arr = numpy.array(arr, dtype=dtype, copy=copy)
- return arr
-
- @staticmethod
- def as_float32(arr):
- arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32)
- return CenterOfRotationFinder.as_dtype(arr, numpy.float32)
-
-
-
-
- @staticmethod
- def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5,
- ratio=2., drop=20):
- """
- Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`.
-
- Parameters
- ----------
- tomo : ndarray
- 3D tomographic data.
- ind : int, optional
- Index of the slice to be used for reconstruction.
- smin, smax : int, optional
- Reference to the horizontal center of the sinogram.
- srad : float, optional
- Fine search radius.
- step : float, optional
- Step of fine searching.
- ratio : float, optional
- The ratio between the FOV of the camera and the size of object.
- It's used to generate the mask.
- drop : int, optional
- Drop lines around vertical center of the mask.
-
- Returns
- -------
- float
- Rotation axis location.
-
- Notes
- -----
- The function may not yield a correct estimate, if:
-
- - the sample size is bigger than the field of view of the camera.
- In this case the ``ratio`` argument need to be set larger
- than the default of 2.0.
-
- - there is distortion in the imaging hardware. If there's
- no correction applied, the center of the projection image may
- yield a better estimate.
-
- - the sample contrast is weak. Paganin's filter need to be applied
- to overcome this.
-
- - the sample was changed during the scan.
- """
- tomo = CenterOfRotationFinder.as_float32(tomo)
-
- if ind is None:
- ind = tomo.shape[1] // 2
- _tomo = tomo[:, ind, :]
-
-
-
- # Reduce noise by smooth filters. Use different filters for coarse and fine search
- _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1))
- _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2))
-
- # Coarse and fine searches for finding the rotation center.
- if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k)
- #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :]
- #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop)
- #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop)
- init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin,
- smax, ratio, drop)
- fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,
- step, init_cen,
- ratio, drop)
- else:
- init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs,
- smin, smax,
- ratio, drop)
- fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,
- step, init_cen,
- ratio, drop)
-
- #logger.debug('Rotation center search finished: %i', fine_cen)
- return fine_cen
-
-
- @staticmethod
- def _search_coarse(sino, smin, smax, ratio, drop):
- """
- Coarse search for finding the rotation center.
- """
- (Nrow, Ncol) = sino.shape
- centerfliplr = (Ncol - 1.0) / 2.0
-
- # Copy the sinogram and flip left right, the purpose is to
- # make a full [0;2Pi] sinogram
- _copy_sino = numpy.fliplr(sino[1:])
-
- # This image is used for compensating the shift of sinogram 2
- temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32')
- temp_img[:] = sino[-1]
-
- # Start coarse search in which the shift step is 1
- listshift = numpy.arange(smin, smax + 1)
- listmetric = numpy.zeros(len(listshift), dtype='float32')
- mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol,
- 0.5 * ratio * Ncol, drop)
- for i in listshift:
- _sino = numpy.roll(_copy_sino, i, axis=1)
- if i >= 0:
- _sino[:, 0:i] = temp_img[:, 0:i]
- else:
- _sino[:, i:] = temp_img[:, i:]
- listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift(
- #pyfftw.interfaces.numpy_fft.fft2(
- # numpy.vstack((sino, _sino)))
- numpy.fft.fft2(numpy.vstack((sino, _sino)))
- )) * mask)
- minpos = numpy.argmin(listmetric)
- return centerfliplr + listshift[minpos] / 2.0
-
- @staticmethod
- def _search_fine(sino, srad, step, init_cen, ratio, drop):
- """
- Fine search for finding the rotation center.
- """
- Nrow, Ncol = sino.shape
- centerfliplr = (Ncol + 1.0) / 2.0 - 1.0
- # Use to shift the sinogram 2 to the raw CoR.
- shiftsino = numpy.int16(2 * (init_cen - centerfliplr))
- _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1)
- if init_cen <= centerfliplr:
- lefttake = numpy.int16(numpy.ceil(srad + 1))
- righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1))
- else:
- lefttake = numpy.int16(numpy.ceil(
- init_cen - (Ncol - 1 - init_cen) + srad + 1))
- righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1))
- Ncol1 = righttake - lefttake + 1
- mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1,
- 0.5 * ratio * Ncol, drop)
- numshift = numpy.int16((2 * srad) / step) + 1
- listshift = numpy.linspace(-srad, srad, num=numshift)
- listmetric = numpy.zeros(len(listshift), dtype='float32')
- factor1 = numpy.mean(sino[-1, lefttake:righttake])
- num1 = 0
- for i in listshift:
- _sino = ndimage.interpolation.shift(
- _copy_sino, (0, i), prefilter=False)
- factor2 = numpy.mean(_sino[0,lefttake:righttake])
- _sino = _sino * factor1 / factor2
- sinojoin = numpy.vstack((sino, _sino))
- listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift(
- #pyfftw.interfaces.numpy_fft.fft2(
- # sinojoin[:, lefttake:righttake + 1])
- numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1])
- )) * mask)
- num1 = num1 + 1
- minpos = numpy.argmin(listmetric)
- return init_cen + listshift[minpos] / 2.0
-
- @staticmethod
- def _create_mask(nrow, ncol, radius, drop):
- du = 1.0 / ncol
- dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi)
- centerrow = numpy.ceil(nrow / 2) - 1
- centercol = numpy.ceil(ncol / 2) - 1
- # added by Edoardo Pasca
- centerrow = int(centerrow)
- centercol = int(centercol)
- mask = numpy.zeros((nrow, ncol), dtype='float32')
- for i in range(nrow):
- num1 = numpy.round(((i - centerrow) * dv / radius) / du)
- (p1, p2) = numpy.int16(numpy.clip(numpy.sort(
- (-num1 + centercol, num1 + centercol)), 0, ncol - 1))
- mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32')
- if drop < centerrow:
- mask[centerrow - drop:centerrow + drop + 1,
- :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32')
- mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32')
- return mask
-
- def process(self, out=None):
-
- projections = self.get_input()
-
- cor = CenterOfRotationFinder.find_center_vo(projections.as_array())
-
- return cor
-
-
-class AcquisitionDataPadder(DataProcessor):
- '''Normalization based on flat and dark
-
- This processor read in a AcquisitionData and normalises it based on
- the instrument reading with and without incident photons or neutrons.
-
- Input: AcquisitionData
- Parameter: 2D projection with flat field (or stack)
- 2D projection with dark field (or stack)
- Output: AcquisitionDataSetn
- '''
-
- def __init__(self,
- center_of_rotation = None,
- acquisition_geometry = None,
- pad_value = 1e-5):
- kwargs = {
- 'acquisition_geometry' : acquisition_geometry,
- 'center_of_rotation' : center_of_rotation,
- 'pad_value' : pad_value
- }
-
- super(AcquisitionDataPadder, self).__init__(**kwargs)
-
- def check_input(self, dataset):
- if self.acquisition_geometry is None:
- self.acquisition_geometry = dataset.geometry
- if dataset.number_of_dimensions == 3:
- return True
- else:
- raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
- .format(dataset.number_of_dimensions))
-
- def process(self, out=None):
- projections = self.get_input()
- w = projections.get_dimension_size('horizontal')
- delta = w - 2 * self.center_of_rotation
-
- padded_width = int (
- numpy.ceil(abs(delta)) + w
- )
- delta_pix = padded_width - w
-
- voxel_per_pixel = 1
- geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(),
- self.acquisition_geometry.angles,
- self.center_of_rotation,
- voxel_per_pixel )
-
- padded_geometry = self.acquisition_geometry.clone()
-
- padded_geometry.pixel_num_h = geom['n_h']
- padded_geometry.pixel_num_v = geom['n_v']
-
- delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h
- delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v
-
- if delta_pix_h == 0:
- delta_pix_h = delta_pix
- padded_geometry.pixel_num_h = padded_width
- #initialize a new AcquisitionData with values close to 0
- out = AcquisitionData(geometry=padded_geometry)
- out = out + self.pad_value
-
-
- #pad in the horizontal-vertical plane -> slice on angles
- if delta > 0:
- #pad left of middle
- command = "out.array["
- for i in range(out.number_of_dimensions):
- if out.dimension_labels[i] == 'horizontal':
- value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w)
- command = command + str(value)
- else:
- if out.dimension_labels[i] == 'vertical' :
- value = '{0}:'.format(delta_pix_v)
- command = command + str(value)
- else:
- command = command + ":"
- if i < out.number_of_dimensions -1:
- command = command + ','
- command = command + '] = projections.array'
- #print (command)
- else:
- #pad right of middle
- command = "out.array["
- for i in range(out.number_of_dimensions):
- if out.dimension_labels[i] == 'horizontal':
- value = '{0}:{1}'.format(0, w)
- command = command + str(value)
- else:
- if out.dimension_labels[i] == 'vertical' :
- value = '{0}:'.format(delta_pix_v)
- command = command + str(value)
- else:
- command = command + ":"
- if i < out.number_of_dimensions -1:
- command = command + ','
- command = command + '] = projections.array'
- #print (command)
- #cleaned = eval(command)
- exec(command)
- return out \ No newline at end of file
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
index e9bd801..5bf96cc 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
@@ -8,15 +8,11 @@ Created on Mon Feb 4 16:18:06 2019
from ccpi.optimisation.algorithms import Algorithm
from ccpi.framework import ImageData
import numpy as np
-import matplotlib.pyplot as plt
import time
from ccpi.optimisation.operators import BlockOperator
from ccpi.framework import BlockDataContainer
from ccpi.optimisation.functions import FunctionOperatorComposition
-
-import matplotlib.pyplot as plt
-
class PDHG(Algorithm):
'''Primal Dual Hybrid Gradient'''
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
index c6a7f95..1d77510 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -139,11 +139,16 @@ class BlockOperator(Operator):
for row in range(self.shape[0]):
for col in range(self.shape[1]):
if col == 0:
- self.get_item(row,col).direct(x_b.get_item(col), out=tmp.get_item(col))
+ self.get_item(row,col).direct(
+ x_b.get_item(col),
+ out=out.get_item(row))
else:
- self.get_item(row,col).direct(x_b.get_item(col), out=out)
- out+=tmp
-
+ a = out.get_item(row)
+ self.get_item(row,col).direct(
+ x_b.get_item(col),
+ out=tmp.get_item(row))
+ a += tmp.get_item(row)
+
def adjoint(self, x, out=None):
'''Adjoint operation for the BlockOperator
@@ -156,36 +161,72 @@ class BlockOperator(Operator):
Raises: ValueError if the contained Operators are not linear
'''
- if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True):
+ if not self.is_linear():
raise ValueError('Not all operators in Block are linear.')
if not isinstance (x, BlockDataContainer):
x_b = BlockDataContainer(x)
else:
x_b = x
shape = self.get_output_shape(x_b.shape, adjoint=True)
- res = []
- for row in range(self.shape[1]):
- for col in range(self.shape[0]):
- if col == 0:
- prod = self.get_item(row, col).adjoint(x_b.get_item(col))
- else:
- prod += self.get_item(row, col).adjoint(x_b.get_item(col))
- res.append(prod)
- if self.shape[1]==1:
- return ImageData(*res)
+ if out is None:
+ res = []
+ for col in range(self.shape[1]):
+ for row in range(self.shape[0]):
+ if row == 0:
+ prod = self.get_item(row, col).adjoint(x_b.get_item(row))
+ else:
+ prod += self.get_item(row, col).adjoint(x_b.get_item(row))
+ res.append(prod)
+ if self.shape[1]==1:
+ return ImageData(*res)
+ else:
+ return BlockDataContainer(*res, shape=shape)
else:
- return BlockDataContainer(*res, shape=shape)
-
+ #tmp = self.domain_geometry().allocate()
+
+ for col in range(self.shape[1]):
+ for row in range(self.shape[0]):
+ if row == 0:
+ if issubclass(out.__class__, DataContainer):
+ self.get_item(row, col).adjoint(
+ x_b.get_item(row),
+ out=out)
+ else:
+ op = self.get_item(row,col)
+ self.get_item(row, col).adjoint(
+ x_b.get_item(row),
+ out=out.get_item(col))
+ else:
+ if issubclass(out.__class__, DataContainer):
+ out += self.get_item(row,col).adjoint(
+ x_b.get_item(row))
+ else:
+ a = out.get_item(col)
+ a += self.get_item(row,col).adjoint(
+ x_b.get_item(row),
+ )
+ def is_linear(self):
+ '''returns whether all the elements of the BlockOperator are linear'''
+ return functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True)
+
def get_output_shape(self, xshape, adjoint=False):
- sshape = self.shape[1]
- oshape = self.shape[0]
+ '''returns the shape of the output BlockDataContainer
+
+ A(N,M) direct u(M,1) -> N,1
+ A(N,M)^T adjoint u(N,1) -> M,1
+ '''
+ rows , cols = self.shape
+ xrows, xcols = xshape
+ if xcols != 1:
+ raise ValueError('BlockDataContainer cannot have more than 1 column')
if adjoint:
- sshape = self.shape[0]
- oshape = self.shape[1]
- if sshape != xshape[0]:
- raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape))
- return (oshape, xshape[-1])
-
+ if rows != xrows:
+ raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape))
+ return (cols,xcols)
+ if cols != xrows:
+ raise ValueError('Incompatible shapes {} {}'.format((rows,cols), xshape))
+ return (rows,xcols)
+
def __rmul__(self, scalar):
'''Defines the left multiplication with a scalar
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index 54456cc..9c639df 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -71,7 +71,7 @@ class Gradient(LinearOperator):
self.FD.direction=self.ind[i]
self.FD.adjoint(x.get_item(i), out = tmp)
# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp)
- out-=tmp
+ out+=tmp
else:
tmp = self.gm_domain.allocate()
for i in range(x.shape[0]):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
index adcc6d9..3203dde 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
@@ -28,10 +28,18 @@ class ScaledOperator(object):
self.scalar = scalar
self.operator = operator
def direct(self, x, out=None):
- return self.scalar * self.operator.direct(x, out=out)
+ if out is None:
+ return self.scalar * self.operator.direct(x, out=out)
+ else:
+ self.operator.direct(x, out=out)
+ out *= self.scalar
def adjoint(self, x, out=None):
if self.operator.is_linear():
- return self.scalar * self.operator.adjoint(x, out=out)
+ if out is None:
+ return self.scalar * self.operator.adjoint(x, out=out)
+ else:
+ self.operator.adjoint(x, out=out)
+ out *= self.scalar
else:
raise TypeError('Operator is not linear')
def norm(self):
@@ -40,3 +48,5 @@ class ScaledOperator(object):
return self.operator.range_geometry()
def domain_geometry(self):
return self.operator.domain_geometry()
+ def is_linear(self):
+ return self.operator.is_linear()
diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py
index 46e8c7c..6656d34 100644
--- a/Wrappers/Python/test/test_Operator.py
+++ b/Wrappers/Python/test/test_Operator.py
@@ -1,8 +1,15 @@
import unittest
#from ccpi.optimisation.operators import Operator
from ccpi.optimisation.ops import TomoIdentity
-from ccpi.framework import ImageGeometry, ImageData
+from ccpi.framework import ImageGeometry, ImageData, BlockDataContainer, DataContainer
+from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator
import numpy
+from timeit import default_timer as timer
+from ccpi.framework import ImageGeometry
+from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff
+
+def dt(steps):
+ return steps[-1] - steps[-2]
class TestOperator(unittest.TestCase):
def test_ScaledOperator(self):
@@ -22,3 +29,312 @@ class TestOperator(unittest.TestCase):
y = Id.direct(img)
numpy.testing.assert_array_equal(y.as_array(), img.as_array())
+
+
+class TestBlockOperator(unittest.TestCase):
+ def assertBlockDataContainerEqual(self, container1, container2):
+ print ("assert Block Data Container Equal")
+ self.assertTrue(issubclass(container1.__class__, container2.__class__))
+ for col in range(container1.shape[0]):
+ if issubclass(container1.get_item(col).__class__, DataContainer):
+ print ("Checking col ", col)
+ self.assertNumpyArrayEqual(
+ container1.get_item(col).as_array(),
+ container2.get_item(col).as_array()
+ )
+ else:
+ self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col))
+
+ def assertNumpyArrayEqual(self, first, second):
+ res = True
+ try:
+ numpy.testing.assert_array_equal(first, second)
+ except AssertionError as err:
+ res = False
+ print(err)
+ self.assertTrue(res)
+
+ def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
+ res = True
+ try:
+ numpy.testing.assert_array_almost_equal(first, second, decimal)
+ except AssertionError as err:
+ res = False
+ print(err)
+ print("expected " , second)
+ print("actual " , first)
+
+ self.assertTrue(res)
+
+ def test_BlockOperator(self):
+
+
+ M, N = 3, 4
+ ig = ImageGeometry(M, N)
+ arr = ig.allocate('random_int')
+
+ G = Gradient(ig)
+ Id = Identity(ig)
+
+ B = BlockOperator(G, Id)
+ # Nx1 case
+ u = ig.allocate('random_int')
+ z1 = B.direct(u)
+
+ res = B.range_geometry().allocate()
+ #res = z1.copy()
+ B.direct(u, out=res)
+
+
+ print (type(z1), type(res))
+ print (z1.shape)
+ print(z1[0][0].as_array())
+ print(res[0][0].as_array())
+
+ for col in range(z1.shape[0]):
+ a = z1.get_item(col)
+ b = res.get_item(col)
+ if isinstance(a, BlockDataContainer):
+ for col2 in range(a.shape[0]):
+ self.assertNumpyArrayEqual(
+ a.get_item(col2).as_array(),
+ b.get_item(col2).as_array()
+ )
+ else:
+ self.assertNumpyArrayEqual(
+ a.as_array(),
+ b.as_array()
+ )
+ z1 = B.direct(u)
+ res1 = B.adjoint(z1)
+ res2 = B.domain_geometry().allocate()
+ B.adjoint(z1, out=res2)
+
+ self.assertNumpyArrayEqual(res1.as_array(), res2.as_array())
+
+ BB = BlockOperator( Id, 2 * Id)
+ B = BlockOperator( BB, Id )
+ v = B.domain_geometry().allocate()
+ B.adjoint(res,out=v)
+ vv = B.adjoint(res)
+ el1 = B.get_item(0,0).adjoint(z1.get_item(0)) +\
+ B.get_item(1,0).adjoint(z1.get_item(1))
+ print ("el1" , el1.as_array())
+ print ("vv" , vv.as_array())
+ print ("v" , v.as_array())
+
+ self.assertNumpyArrayEqual(v.as_array(),vv.as_array())
+ # test adjoint
+ print ("############ 2x1 #############")
+
+ BB = BlockOperator( Id, 2 * Id)
+ u = ig.allocate(1)
+ z1 = BB.direct(u)
+ print ("z1 shape {} one\n{} two\n{}".format(z1.shape,
+ z1.get_item(0).as_array(),
+ z1.get_item(1).as_array()))
+ res = BB.range_geometry().allocate(0)
+ BB.direct(u, out=res)
+ print ("res shape {} one\n{} two\n{}".format(res.shape,
+ res.get_item(0).as_array(),
+ res.get_item(1).as_array()))
+
+
+ self.assertNumpyArrayEqual(z1.get_item(0).as_array(),
+ u.as_array())
+ self.assertNumpyArrayEqual(z1.get_item(1).as_array(),
+ 2 * u.as_array())
+ self.assertNumpyArrayEqual(res.get_item(0).as_array(),
+ u.as_array())
+ self.assertNumpyArrayEqual(res.get_item(1).as_array(),
+ 2 * u.as_array())
+
+ x1 = BB.adjoint(z1)
+ print("adjoint x1\n",x1.as_array())
+
+ res1 = BB.domain_geometry().allocate()
+ BB.adjoint(z1, out=res1)
+ print("res1\n",res1.as_array())
+ self.assertNumpyArrayEqual(x1.as_array(),
+ res1.as_array())
+
+ self.assertNumpyArrayEqual(x1.as_array(),
+ 5 * u.as_array())
+ self.assertNumpyArrayEqual(res1.as_array(),
+ 5 * u.as_array())
+ #################################################
+
+ print ("############ 2x2 #############")
+ BB = BlockOperator( Id, 2 * Id, 3 * Id, Id, shape=(2,2))
+ B = BB
+ u = ig.allocate(1)
+ U = BlockDataContainer(u,u)
+ z1 = B.direct(U)
+
+
+ print ("z1 shape {} one\n{} two\n{}".format(z1.shape,
+ z1.get_item(0).as_array(),
+ z1.get_item(1).as_array()))
+ self.assertNumpyArrayEqual(z1.get_item(0).as_array(),
+ 3 * u.as_array())
+ self.assertNumpyArrayEqual(z1.get_item(1).as_array(),
+ 4 * u.as_array())
+ res = B.range_geometry().allocate()
+ B.direct(U, out=res)
+ self.assertNumpyArrayEqual(res.get_item(0).as_array(),
+ 3 * u.as_array())
+ self.assertNumpyArrayEqual(res.get_item(1).as_array(),
+ 4 * u.as_array())
+
+
+ x1 = B.adjoint(z1)
+ # this should be [15 u, 10 u]
+ el1 = B.get_item(0,0).adjoint(z1.get_item(0)) + B.get_item(1,0).adjoint(z1.get_item(1))
+ el2 = B.get_item(0,1).adjoint(z1.get_item(0)) + B.get_item(1,1).adjoint(z1.get_item(1))
+
+ shape = B.get_output_shape(z1.shape, adjoint=True)
+ print ("shape ", shape)
+ out = B.domain_geometry().allocate()
+
+ for col in range(B.shape[1]):
+ for row in range(B.shape[0]):
+ if row == 0:
+ el = B.get_item(row,col).adjoint(z1.get_item(row))
+ else:
+ el += B.get_item(row,col).adjoint(z1.get_item(row))
+ out.get_item(col).fill(el)
+
+ print ("el1 " , el1.as_array())
+ print ("el2 " , el2.as_array())
+ print ("out shape {} one\n{} two\n{}".format(out.shape,
+ out.get_item(0).as_array(),
+ out.get_item(1).as_array()))
+
+ self.assertNumpyArrayEqual(out.get_item(0).as_array(),
+ 15 * u.as_array())
+ self.assertNumpyArrayEqual(out.get_item(1).as_array(),
+ 10 * u.as_array())
+
+ res2 = B.domain_geometry().allocate()
+ #print (res2, res2.as_array())
+ B.adjoint(z1, out = res2)
+
+ #print ("adjoint",x1.as_array(),"\n",res2.as_array())
+ self.assertNumpyArrayEqual(
+ out.get_item(0).as_array(),
+ res2.get_item(0).as_array()
+ )
+ self.assertNumpyArrayEqual(
+ out.get_item(1).as_array(),
+ res2.get_item(1).as_array()
+ )
+
+ if True:
+ #B1 = BlockOperator(Id, Id, Id, Id, shape=(2,2))
+ B1 = BlockOperator(G, Id)
+ U = ig.allocate(ImageGeometry.RANDOM_INT)
+ #U = BlockDataContainer(u,u)
+ RES1 = B1.range_geometry().allocate()
+
+ Z1 = B1.direct(U)
+ B1.direct(U, out = RES1)
+
+ self.assertBlockDataContainerEqual(Z1,RES1)
+
+
+
+ print("U", U.as_array())
+ print("Z1", Z1[0][0].as_array())
+ print("RES1", RES1[0][0].as_array())
+ print("Z1", Z1[0][1].as_array())
+ print("RES1", RES1[0][1].as_array())
+ def test_timedifference(self):
+
+ M, N ,W = 100, 512, 512
+ ig = ImageGeometry(M, N, W)
+ arr = ig.allocate('random_int')
+
+ G = Gradient(ig)
+ Id = Identity(ig)
+
+ B = BlockOperator(G, Id)
+
+
+ # Nx1 case
+ u = ig.allocate('random_int')
+ steps = [timer()]
+ i = 0
+ n = 25.
+ t1 = t2 = 0
+ res = B.range_geometry().allocate()
+
+ while (i < n):
+ print ("i ", i)
+ steps.append(timer())
+ z1 = B.direct(u)
+ steps.append(timer())
+ t = dt(steps)
+ #print ("B.direct(u) " ,t)
+ t1 += t/n
+
+ steps.append(timer())
+ B.direct(u, out = res)
+ steps.append(timer())
+ t = dt(steps)
+ #print ("B.direct(u, out=res) " ,t)
+ t2 += t/n
+ i += 1
+
+ print ("Time difference ", t1,t2)
+ self.assertGreater(t1,t2)
+
+ steps = [timer()]
+ i = 0
+ #n = 50.
+ t1 = t2 = 0
+ resd = B.domain_geometry().allocate()
+ z1 = B.direct(u)
+ #B.adjoint(z1, out=resd)
+
+ print (type(res))
+ while (i < n):
+ print ("i ", i)
+ steps.append(timer())
+ w1 = B.adjoint(z1)
+ steps.append(timer())
+ t = dt(steps)
+ #print ("B.adjoint(z1) " ,t)
+ t1 += t/n
+
+ steps.append(timer())
+ B.adjoint(z1, out=resd)
+ steps.append(timer())
+ t = dt(steps)
+ #print ("B.adjoint(z1, out=res) " ,t)
+ t2 += t/n
+ i += 1
+
+ print ("Time difference ", t1,t2)
+ self.assertGreater(t1,t2)
+
+ def test_BlockOperatorLinearValidity(self):
+
+
+ M, N = 3, 4
+ ig = ImageGeometry(M, N)
+ arr = ig.allocate('random_int')
+
+ G = Gradient(ig)
+ Id = Identity(ig)
+
+ B = BlockOperator(G, Id)
+ # Nx1 case
+ u = ig.allocate('random_int')
+ w = B.range_geometry().allocate(ImageGeometry.RANDOM_INT)
+ w1 = B.direct(u)
+ u1 = B.adjoint(w)
+ self.assertEqual((w * w1).sum() , (u1*u).sum())
+
+
+
+
diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py
index 54dfa57..19cb65f 100644
--- a/Wrappers/Python/test/test_functions.py
+++ b/Wrappers/Python/test/test_functions.py
@@ -19,7 +19,7 @@ from ccpi.optimisation.operators import Gradient
#from ccpi.optimisation.functions import SimpleL2NormSq
from ccpi.optimisation.functions import L2NormSquared
-from ccpi.optimisation.functions import SimpleL1Norm
+#from ccpi.optimisation.functions import SimpleL1Norm
from ccpi.optimisation.functions import L1Norm
from ccpi.optimisation.funcs import Norm2sq