diff options
author | Edoardo Pasca <edo.paskino@gmail.com> | 2019-03-29 17:07:46 +0000 |
---|---|---|
committer | Edoardo Pasca <edo.paskino@gmail.com> | 2019-03-29 17:07:46 +0000 |
commit | d9588f62186a9532890907d11536ebe3824a3bb9 (patch) | |
tree | ebd49e754be6a73e76992edb0add50391cb0e951 | |
parent | bb8d70e564139b10ce0a3eae327f0b5f91c38368 (diff) | |
download | framework-d9588f62186a9532890907d11536ebe3824a3bb9.tar.gz framework-d9588f62186a9532890907d11536ebe3824a3bb9.tar.bz2 framework-d9588f62186a9532890907d11536ebe3824a3bb9.tar.xz framework-d9588f62186a9532890907d11536ebe3824a3bb9.zip |
merge Vaggelis branch block_function
18 files changed, 514 insertions, 261 deletions
diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index f29f839..b4041e4 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -64,7 +64,7 @@ class BlockDataContainer(object): .format(type(ot)))
return len(self.containers) == len(other)
elif isinstance(other, numpy.ndarray):
- return self.shape == other.shape
+ return len(self.containers) == len(other)
elif issubclass(other.__class__, DataContainer):
return self.get_item(0).shape == other.shape
return len(self.containers) == len(other.containers)
@@ -91,25 +91,26 @@ class BlockDataContainer(object): return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape)
return type(self)(
- *[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
+ *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
shape=self.shape)
def subtract(self, other, *args, **kwargs):
if not self.is_compatible(other):
- raise ValueError('Incompatible for add')
+ raise ValueError('Incompatible for subtract')
out = kwargs.get('out', None)
if isinstance(other, Number):
- return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers], shape=self.shape)
+ return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape)
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
+ return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
elif issubclass(other.__class__, DataContainer):
# try to do algebra with one DataContainer. Will raise error if not compatible
return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape)
- return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
+ return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)],
shape=self.shape)
def multiply(self, other, *args, **kwargs):
- self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('{} Incompatible for multiply'.format(other))
out = kwargs.get('out', None)
if isinstance(other, Number):
return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape)
@@ -124,7 +125,8 @@ class BlockDataContainer(object): shape=self.shape)
def divide(self, other, *args, **kwargs):
- self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('Incompatible for divide')
out = kwargs.get('out', None)
if isinstance(other, Number):
return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape)
@@ -137,7 +139,8 @@ class BlockDataContainer(object): shape=self.shape)
def power(self, other, *args, **kwargs):
- assert self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('Incompatible for power')
out = kwargs.get('out', None)
if isinstance(other, Number):
return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape)
@@ -146,7 +149,8 @@ class BlockDataContainer(object): return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
def maximum(self,other, *args, **kwargs):
- assert self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('Incompatible for maximum')
out = kwargs.get('out', None)
if isinstance(other, Number):
return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape)
@@ -265,7 +269,8 @@ class BlockDataContainer(object): for el in self.containers:
el += other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('Incompatible for __iadd__')
for el,ot in zip(self.containers, other):
el += ot
return self
@@ -280,7 +285,8 @@ class BlockDataContainer(object): for el in self.containers:
el -= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- assert self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('Incompatible for __isub__')
for el,ot in zip(self.containers, other):
el -= ot
return self
@@ -295,7 +301,8 @@ class BlockDataContainer(object): for el in self.containers:
el *= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- assert self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('Incompatible for __imul__')
for el,ot in zip(self.containers, other):
el *= ot
return self
@@ -310,7 +317,8 @@ class BlockDataContainer(object): for el in self.containers:
el /= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
- assert self.is_compatible(other)
+ if not self.is_compatible(other):
+ raise ValueError('Incompatible for __idiv__')
for el,ot in zip(self.containers, other):
el /= ot
return self
diff --git a/Wrappers/Python/ccpi/framework/BlockGeometry.py b/Wrappers/Python/ccpi/framework/BlockGeometry.py index 632d320..d336305 100755 --- a/Wrappers/Python/ccpi/framework/BlockGeometry.py +++ b/Wrappers/Python/ccpi/framework/BlockGeometry.py @@ -6,7 +6,7 @@ from __future__ import unicode_literals import numpy
from numbers import Number
import functools
-#from ccpi.framework import AcquisitionData, ImageData
+from ccpi.framework import BlockDataContainer
#from ccpi.optimisation.operators import Operator, LinearOperator
class BlockGeometry(object):
@@ -28,7 +28,7 @@ class BlockGeometry(object): 'Dimension and size do not match: expected {} got {}'
.format(n_elements, len(args)))
- def allocate(self, value=0):
+ def allocate(self, value=0, dimension_labels=None):
containers = [geom.allocate(value) for geom in self.geometries]
- BlockDataContainer(*containers)
+ return BlockDataContainer(*containers)
diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index bf8273b..ae9faf7 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -161,6 +161,8 @@ class ImageGeometry(object): numpy.random.seed(seed) max_value = kwargs.get('max_value', 100) out.fill(numpy.random.randint(max_value,size=self.shape)) + else: + raise ValueError('Value {} unknown'.format(value)) if dimension_labels is not None: if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) @@ -305,6 +307,8 @@ class AcquisitionGeometry(object): numpy.random.seed(seed) max_value = kwargs.get('max_value', 100) out.fill(numpy.random.randint(max_value,size=self.shape)) + else: + raise ValueError('Value {} unknown'.format(value)) if dimension_labels is not None: if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 7488310..7e55ee8 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -10,8 +10,8 @@ from ccpi.framework import ImageData import numpy as np import matplotlib.pyplot as plt import time -from Operators.CompositeOperator import CompositeOperator -from Operators.CompositeDataContainer import CompositeDataContainer +from ccpi.optimisation.operators import BlockOperator +from ccpi.framework import BlockDataContainer def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): @@ -29,13 +29,12 @@ def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False - if isinstance(operator, CompositeOperator): -# if isinstance(operator, CompositeOperator_DataContainer): - x_old = operator.alloc_domain_dim() - y_old = operator.alloc_range_dim() + if isinstance(operator, BlockOperator): + x_old = operator.domain_geometry().allocate() + y_old = operator.range_geometry().allocate() else: - x_old = ImageData(np.zeros(operator.domain_dim())) - y_old = ImageData(np.zeros(operator.range_dim())) + x_old = operator.domain_geometry().allocate() + y_old = operator.range_geometry().allocate() xbar = x_old @@ -68,34 +67,12 @@ def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = x y_old = y -# pdgap - print(f(x) + g(x) + f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) - - - - - -# # TV denoising, pdgap with composite -# -# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ -# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) -# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() - - # TV denoising, pdgap with no composite - - - -# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ -# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) -# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() - - -# print(primal_obj) -# objective = primal_obj -# - - - +# if i%100==0: +# +# plt.imshow(x.as_array()[100]) +# plt.show() +# print(f(operator.direct(x)) + g(x), i) + t_end = time.time() return x, t_end - t, objective diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py index 7e500e8..443bc78 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py @@ -27,4 +27,4 @@ from .CGLS import CGLS from .GradientDescent import GradientDescent from .FISTA import FISTA from .FBPD import FBPD - +from .PDHG import PDHG diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 21cd82b..70216a9 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -66,5 +66,5 @@ class BlockFunction(Function): return BlockDataContainer(*out) def gradient(self,x, out=None): - '''gradient returns pass''' + '''FIXME: gradient returns pass''' pass
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 1baf365..54f8859 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -8,92 +8,203 @@ Created on Thu Feb 7 13:10:56 2019 @author: evangelos """ -import numpy as np -#from ccpi.optimisation.funcs import Function +import numpy from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction +from ccpi.framework import DataContainer, ImageData, ImageGeometry +############################ L2NORM FUNCTION ############################# +class L2NormSquared(Function): -class SimpleL2NormSq(Function): - - def __init__(self, alpha=1): + def __init__(self, **kwargs): - super(SimpleL2NormSq, self).__init__() - # Lispchitz constant of gradient - self.L = 2 + ''' L2NormSquared class + f : ImageGeometry --> R + + Cases: f(x) = ||x||^{2}_{2} + f(x) = || x - b ||^{2}_{2} - def __call__(self, x): - return x.power(2).sum() - - def gradient(self,x, out=None): + ''' + + #TODO need x, b to live in the same geometry if b is not None + + super(L2NormSquared, self).__init__() + self.b = kwargs.get('b',None) + + def __call__(self, x, out=None): + + ''' Evaluates L2NormSq at point x''' + + y = x + if self.b is not None: +# x.subtract(self.b, out = x) + y = x - self.b +# else: +# y +# if out is None: +# return x.squared_norm() +# else: + return y.squared_norm() + + + + def gradient(self, x, out=None): + + ''' Evaluates gradient of L2NormSq at point x''' + + if self.b is not None: +# x.subtract(self.b, out=x) + y = x - self.b if out is None: - return 2 * x + return 2*y else: - out.fill(2*x) - - def convex_conjugate(self,x): - return (1/4) * x.squared_norm() + return out.fill(2*y) + + def convex_conjugate(self, x, out=None): - def proximal(self, x, tau, out=None): + ''' Evaluate convex conjugate of L2NormSq ''' + + tmp = 0 + if self.b is not None: + tmp = (self.b * x).sum() + if out is None: - return x.divide(1+2*tau) + return (1/4) * x.squared_norm() + tmp else: - x.divide(1+2*tau, out=out) + out.fill((1/4) * x.squared_norm() + tmp) + + + def proximal(self, x, tau, out = None): + + ''' The proximal operator ( prox_\{tau * f\}(x) ) evaluates i.e., + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if out is None: + if self.b is not None: + return (x - self.b)/(1+2*tau) + self.b + else: + return x/(1+2*tau) + else: + if self.b is not None: + out.fill((x - self.b)/(1+2*tau) + self.b) + else: + out.fill(x/(1+2*tau)) + def proximal_conjugate(self, x, tau, out=None): + if out is None: - return x.divide(1 + tau/2) + if self.b is not None: + return (x - tau*self.b)/(1 + tau/2) + else: + return x/(1 + tau/2 ) else: - x.divide(1+tau/2, out=out) + if self.b is not None: + out.fill((x - tau*self.b)/(1 + tau/2)) + else: + out.fill(x/(1 + tau/2 )) + + def __rmul__(self, scalar): + return ScaledFunction(self, scalar) - -############################ L2NORM FUNCTIONS ############################# -class L2NormSq(SimpleL2NormSq): +if __name__ == '__main__': - def __init__(self, **kwargs): - super(L2NormSq, self).__init__() - self.b = kwargs.get('b',None) + + # TESTS for L2 and scalar * L2 + + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) + u = ig.allocate('random_int') + b = ig.allocate('random_int') + + # check grad/call no data + f = L2NormSquared() + a1 = f.gradient(u) + a2 = 2 * u + numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) + numpy.testing.assert_equal(f(u), u.squared_norm()) - def __call__(self, x): - if self.b is None: - return SimpleL2NormSq.__call__(self, x) - else: - return SimpleL2NormSq.__call__(self, x - self.b) + # check grad/call with data + f1 = L2NormSquared(b=b) + b1 = f1.gradient(u) + b2 = 2 * (u-b) - def gradient(self, x, out=None): - if self.b is None: - return 2 * x - else: - return 2 * (x - self.b) - - def convex_conjugate(self, x): - ''' The convex conjugate corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - ''' - if self.b is None: - return SimpleL2NormSq.convex_conjugate(self, x) - else: - return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - ''' The proximal operator corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} + numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) + numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) + + #check convex conjuagate no data + c1 = f.convex_conjugate(u) + c2 = 1/4 * u.squared_norm() + numpy.testing.assert_equal(c1, c2) + + #check convex conjuagate with data + d1 = f1.convex_conjugate(u) + d2 = (1/4) * u.squared_norm() + (u*b).sum() + numpy.testing.assert_equal(d1, d2) + + # check proximal no data + tau = 5 + e1 = f.proximal(u, tau) + e2 = u/(1+2*tau) + numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) + + # check proximal with data + tau = 5 + h1 = f1.proximal(u, tau) + h2 = (u-b)/(1+2*tau) + b + numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4) + + # check proximal conjugate no data + tau = 0.2 + k1 = f.proximal_conjugate(u, tau) + k2 = u/(1 + tau/2 ) + numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4) + + # check proximal conjugate with data + l1 = f1.proximal_conjugate(u, tau) + l2 = (u - tau * b)/(1 + tau/2 ) + numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) + - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal(self, x, tau) - else: - return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + # check scaled function properties + + # scalar + scalar = 100 + f_scaled_no_data = scalar * L2NormSquared() + f_scaled_data = scalar * L2NormSquared(b=b) + + # call + numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u)) + numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u)) + + # grad + numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4) + numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) + + # conj + numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ + f.convex_conjugate(u/scalar) * scalar, decimal=4) + + numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ + scalar * f1.convex_conjugate(u/scalar), decimal=4) + + # proximal + numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \ + f.proximal(u, tau*scalar).as_array()) + + + numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ + f1.proximal(u, tau*scalar).as_array()) + + + # proximal conjugate + numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \ + (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ + ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + - def proximal_conjugate(self, x, tau): - ''' The proximal operator corresponds to the simple convex conjugate - functional i.e., f^{*}(x^{) - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal_conjugate(self, x, tau) - else: - return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 8a52566..b48135d 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -39,9 +39,11 @@ class ScaledFunction(object): def proximal_conjugate(self, x, tau, out = None):
'''This returns the proximal operator for the function at x, tau
-
- TODO check if this is mathematically correct'''
- return self.function.proximal_conjugate(x, tau, out=out)
+ '''
+ if out is None:
+ return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)
+ else:
+ out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar))
def grad(self, x):
'''Alias of gradient(x,None)'''
@@ -57,10 +59,15 @@ class ScaledFunction(object): def gradient(self, x, out=None):
'''Returns the gradient of the function at x, if the function is differentiable'''
- return self.scalar * self.function.gradient(x, out=out)
+ if out is None:
+ return self.scalar * self.function.gradient(x)
+ else:
+ out.fill( self.scalar * self.function.gradient(x) )
def proximal(self, x, tau, out=None):
'''This returns the proximal operator for the function at x, tau
-
- TODO check if this is mathematically correct'''
- return self.function.proximal(x, tau, out=out)
+ '''
+ if out is None:
+ return self.function.proximal(x, tau*self.scalar)
+ else:
+ out.fill( self.function.proximal(x, tau*self.scalar) )
diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index 9030454..d6edd03 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -3,8 +3,10 @@ from .Function import Function from .ZeroFun import ZeroFun from .L1Norm import SimpleL1Norm, L1Norm -from .L2NormSquared import L2NormSq, SimpleL2NormSq -from .mixed_L12Norm import mixed_L12Norm +#from .L2NormSquared import L2NormSq, SimpleL2NormSq +from .L2NormSquared import L2NormSquared from .BlockFunction import BlockFunction from .ScaledFunction import ScaledFunction from .FunctionOperatorComposition import FunctionOperatorComposition +from .MixedL21Norm import MixedL21Norm + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 323efcd..1240b31 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -105,13 +105,8 @@ class BlockOperator(Operator): return self.operators[index] def norm(self): - norm = [op.norm() for op in self.operators] - b = [] - for i in range(self.shape[0]): - b.append([]) - for j in range(self.shape[1]): - b[-1].append(norm[i*self.shape[1]+j]) - return numpy.asarray(b) + norm = [op.norm()**2 for op in self.operators] + return numpy.sqrt(sum(norm)) def direct(self, x, out=None): '''Direct operation for the BlockOperator @@ -161,7 +156,10 @@ class BlockOperator(Operator): else: prod += self.get_item(row, col).adjoint(x_b.get_item(col)) res.append(prod) - return BlockDataContainer(*res, shape=shape) + if self.shape[1]==1: + return ImageData(*res) + else: + return BlockDataContainer(*res, shape=shape) def get_output_shape(self, xshape, adjoint=False): sshape = self.shape[1] @@ -204,7 +202,7 @@ class BlockOperator(Operator): ''' if self.shape[1] == 1: # column BlockOperator - return self[0].domain_geometry() + return self.get_item(0,0).domain_geometry() else: shape = (self.shape[0], 1) return BlockGeometry(*[el.domain_geometry() for el in self.operators], diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index d0d0f43..ec14b8f 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -6,131 +6,73 @@ Created on Fri Mar 1 22:50:04 2019 @author: evangelos """ -from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, BlockDataContainer -import numpy as np +from ccpi.framework import ImageData, ImageGeometry, BlockGeometry +import numpy from ccpi.optimisation.operators import FiniteDiff -from ccpi.framework import BlockGeometry #%% -class Gradient(Operator): +class Gradient(LinearOperator): - def __init__(self, gm_domain, gm_range=None, bnd_cond = 'Neumann', **kwargs): + def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): super(Gradient, self).__init__() - + self.gm_domain = gm_domain # Domain of Grad Operator - self.gm_range = gm_range # Range of Grad Operator - self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences - - if self.gm_range is None: - #FIXME this should be a BlockGeometry - self.gm_range = ((len(self.gm_domain),)+self.gm_domain) - - # Kwargs Default options - self.memopt = kwargs.get('memopt',False) - self.correlation = kwargs.get('correlation','Space') + self.correlation = kwargs.get('correlation','Space') - #TODO not tested yet, operator norm??? - self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) - + if self.correlation=='Space': + if self.gm_domain.channels>1: + self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length-1)] ) + self.ind = numpy.arange(1,self.gm_domain.length) + else: + self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length) ] ) + self.ind = numpy.arange(self.gm_domain.length) + elif self.correlation=='SpaceChannels': + if self.gm_domain.channels>1: + self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length)]) + self.ind = range(self.gm_domain.length) + else: + raise ValueError('No channels to correlate') + + self.bnd_cond = bnd_cond + def direct(self, x, out=None): - #tmp = np.zeros(self.gm_range) tmp = self.gm_range.allocate() - for i in range(len(self.gm_domain)): - #tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] - if self.correlation == 'Space': - if i == 0 : - i+=1 - tmp[i].fill( - FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] - ) -# return type(x)(tmp) - return type(x)(tmp) - + + + for i in range(tmp.shape[0]): + tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) + return tmp + def adjoint(self, x, out=None): - - tmp = np.zeros(self.gm_domain) - for i in range(len(self.gm_domain)): - tmp+=FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).adjoint(x.as_array()[i])/self.voxel_size[i] - return type(x)(-tmp) - def alloc_domain_dim(self): - return ImageData(np.zeros(self.gm_domain)) - - def alloc_range_dim(self): - return ImageData(np.zeros(self.range_dim)) + tmp = self.gm_domain.allocate() + for i in range(x.shape[0]): + tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) + return tmp + def domain_geometry(self): return self.gm_domain def range_geometry(self): - '''fix this''' - return BlockGeometry(self.gm_range, self.gm_range) + return self.gm_range def norm(self): -# return np.sqrt(4*len(self.domainDim())) - #TODO this takes time for big ImageData - # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) - x0 = ImageData(np.random.random_sample(self.domain_dim())) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + + x0 = self.gm_domain.allocate('random') + self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0) return self.s1 + def __rmul__(self, scalar): + return ScaledOperator(self, scalar) if __name__ == '__main__': - N, M = (200,300) - ig = (N,M) - G = Gradient(ig) - u = DataContainer(np.random.randint(10, size=G.domain_dim())) - w = DataContainer(np.random.randint(10, size=G.range_dim())) -# w = [DataContainer(np.random.randint(10, size=G.domain_dim())),\ -# DataContainer(np.random.randint(10, size=G.domain_dim()))] - - # domain_dim - print('Domain {}'.format(G.domain_geometry())) - - # range_dim - print('Range {}'.format(G.range_geometry())) - - # Direct - z = G.direct(u) - - # Adjoint - z1 = G.adjoint(w) - - print(z) - print(z1) - - LHS = (G.direct(u)*w).sum() - RHS = (u * G.adjoint(w)).sum() -# - print(LHS,RHS) - print(G.norm()) - -# print(G.adjoint(G.direct(u))) - - - - - - - - - - - - - - - - - - - -
\ No newline at end of file + pass diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py index d49cb30..0f50e82 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py @@ -6,10 +6,10 @@ Created on Wed Mar 6 19:30:51 2019 @author: evangelos """ -from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import LinearOperator -class Identity(Operator): +class Identity(LinearOperator): def __init__(self, gm_domain, gm_range=None): @@ -35,8 +35,8 @@ class Identity(Operator): def norm(self): return 1.0 - def domain_dim(self): + def domain_geometry(self): return self.gm_domain - def range_dim(self): + def range_geometry(self): return self.gm_range
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index 1e86efc..1c09faf 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -17,4 +17,3 @@ from .GradientOperator import Gradient from .SymmetrizedGradientOperator import SymmetrizedGradient
from .IdentityOperator import Identity
from .ZeroOperator import ZeroOp
-
diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 51d07fa..2ee0e94 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -214,10 +214,15 @@ class TestBlockDataContainer(unittest.TestCase): cp2 = numpy.asarray([3,2]) * cp0
numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
- cp2 = [3,2,3] * cp0
- numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
- numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+ try:
+ cp2 = [3,2,3] * cp0
+ #numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
+ #numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5)
+ self.assertTrue(False)
+ except ValueError as ve:
+ print (ve)
+ self.assertTrue(True)
cp2 *= cp1
numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0 , decimal=5)
numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5)
@@ -230,6 +235,12 @@ class TestBlockDataContainer(unittest.TestCase): numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5)
numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -6., decimal = 5)
+ try:
+ cp2 *= [2,3,5]
+ self.assertTrue(False)
+ except ValueError as ve:
+ print (ve)
+ self.assertTrue(True)
cp2 = cp0.divide(cp1)
assert (cp2.get_item(0).as_array()[0][0][0] == 0.)
diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py index cb09a1f..8edfd8b 100755 --- a/Wrappers/Python/test/test_DataContainer.py +++ b/Wrappers/Python/test/test_DataContainer.py @@ -526,17 +526,6 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(order[2], sino.dimension_labels[2]) self.assertEqual(order[2], sino.dimension_labels[2]) - def test_ImageGeometry_equal(self): - vg1 = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2) - vg2 = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2) - self.assertTrue(vg1 == vg2) - self.assertFalse(vg1 != vg2) - - vg2 = ImageGeometry(voxel_num_z=3,voxel_num_x=4, voxel_num_y=3, channels=2) - self.assertTrue(vg1 != vg2) - self.assertFalse(vg1 == vg2) - - def assertNumpyArrayEqual(self, first, second): res = True try: diff --git a/Wrappers/Python/test/test_Gradient.py b/Wrappers/Python/test/test_Gradient.py new file mode 100755 index 0000000..1d6485c --- /dev/null +++ b/Wrappers/Python/test/test_Gradient.py @@ -0,0 +1,90 @@ +import unittest +import numpy +#from ccpi.plugins.ops import CCPiProjectorSimple +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.funcs import Norm2sq, Norm1 +from ccpi.framework import ImageGeometry, AcquisitionGeometry +from ccpi.framework import ImageData, AcquisitionData +#from ccpi.optimisation.algorithms import GradientDescent +from ccpi.framework import BlockDataContainer +#from ccpi.optimisation.Algorithms import CGLS +import functools + +from ccpi.optimisation.operators import Gradient, Identity, BlockOperator + +class TestGradient(unittest.TestCase): + def test_Gradient(self): + N, M, K = 20, 30, 40 + channels = 10 + + # check range geometry, examples + + ig1 = ImageGeometry(voxel_num_x = M, voxel_num_y = N) + ig2 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, voxel_num_z = K) + ig3 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, channels = channels) + ig4 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, channels = channels, voxel_num_z= K) + + G1 = Gradient(ig1, correlation = 'Space') + print(G1.range_geometry().shape, '2D no channels') + + G4 = Gradient(ig3, correlation = 'SpaceChannels') + print(G4.range_geometry().shape, '2D with channels corr') + G5 = Gradient(ig3, correlation = 'Space') + print(G5.range_geometry().shape, '2D with channels no corr') + + G6 = Gradient(ig4, correlation = 'Space') + print(G6.range_geometry().shape, '3D with channels no corr') + G7 = Gradient(ig4, correlation = 'SpaceChannels') + print(G7.range_geometry().shape, '3D with channels with corr') + + + u = ig1.allocate(ImageGeometry.RANDOM) + w = G1.range_geometry().allocate(ImageGeometry.RANDOM_INT) + + LHS = (G1.direct(u)*w).sum() + RHS = (u * G1.adjoint(w)).sum() + numpy.testing.assert_approx_equal(LHS, RHS, significant = 1) + numpy.testing.assert_approx_equal(G1.norm(), numpy.sqrt(2*4), significant = 1) + + + u1 = ig3.allocate('random') + w1 = G4.range_geometry().allocate('random') + LHS1 = (G4.direct(u1) * w1).sum() + RHS1 = (u1 * G4.adjoint(w1)).sum() + numpy.testing.assert_approx_equal(LHS1, RHS1, significant=1) + numpy.testing.assert_almost_equal(G4.norm(), numpy.sqrt(3*4), decimal = 0) + + u2 = ig4.allocate('random') + w2 = G7.range_geometry().allocate('random') + LHS2 = (G7.direct(u2) * w2).sum() + RHS2 = (u2 * G7.adjoint(w2)).sum() + numpy.testing.assert_approx_equal(LHS2, RHS2, significant = 3) + numpy.testing.assert_approx_equal(G7.norm(), numpy.sqrt(3*4), significant = 1) + + + #check direct/adjoint for space/channels correlation + + ig_channel = ImageGeometry(voxel_num_x = 2, voxel_num_y = 3, channels = 2) + G_no_channel = Gradient(ig_channel, correlation = 'Space') + G_channel = Gradient(ig_channel, correlation = 'SpaceChannels') + + u3 = ig_channel.allocate('random_int') + res_no_channel = G_no_channel.direct(u3) + res_channel = G_channel.direct(u3) + + print(" Derivative for 3 directions, first is wrt Channel direction\n") + print(res_channel[0].as_array()) + print(res_channel[1].as_array()) + print(res_channel[2].as_array()) + + print(" Derivative for 2 directions, no Channel direction\n") + print(res_no_channel[0].as_array()) + print(res_no_channel[1].as_array()) + + ig2D = ImageGeometry(voxel_num_x = 2, voxel_num_y = 3) + u4 = ig2D.allocate('random_int') + G2D = Gradient(ig2D) + res = G2D.direct(u4) + print(res[0].as_array()) + print(res[1].as_array()) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 6a44641..384e351 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -37,13 +37,13 @@ class TestFunction(unittest.TestCase): N = 3 - ig = (N,N) + ig = ImageGeometry(N,N) ag = ig op1 = Gradient(ig) op2 = Identity(ig, ag) # Form Composite Operator - operator = BlockOperator((2,1), op1, op2 ) + operator = BlockOperator(op1, op2 , shape=(2,1) ) # Create functions noisy_data = ImageData(np.random.randint(10, size=ag)) diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py new file mode 100755 index 0000000..3819de5 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from skimage.util import random_noise + +# ############################################################################ +# Create phantom for TV denoising + +N = 200 +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode='gaussian', seed=10) +noisy_data = ImageData(n1) + + +#%% + +# Regularisation Parameter +alpha = 200 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '0' +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions +# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \ +# L2NormSq(0.5, b = noisy_data) ) + + f1 = alpha * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b = noisy_data) + + f = BlockFunction(f1, f2 ) + g = ZeroFun() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = 0.5 * L2NormSquared(b = noisy_data) + ########################################################################### +#%% + +# Compute operator Norm +normK = operator.norm() +print ("normK", normK) +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) + + +#%% +## Number of iterations +opt = {'niter':2000} +## +### Run algorithm +result, total_time, objective = PDHG(f, g, operator, \ + tau = tau, sigma = sigma, opt = opt) +#%% +###Show results +if isinstance(result, BlockDataContainer): + sol = result.get_item(0).as_array() +else: + sol = result.as_array() + +#sol = result.as_array() +# +plt.imshow(sol) +plt.colorbar() +plt.show() +# +### +plt.imshow(noisy_data.as_array()) +plt.colorbar() +plt.show() +## +plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +plt.legend() +plt.show() + + +#%% +# |