diff options
15 files changed, 175 insertions, 396 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index efc465c..b2b9791 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ccpi.optimisation.ops import Identity, FiniteDiff2D import numpy from ccpi.framework import DataContainer import warnings @@ -99,12 +98,6 @@ class Norm2(Function): raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) -class TV2D(Norm2): - - def __init__(self, gamma): - super(TV2D,self).__init__(gamma, 0) - self.op = FiniteDiff2D() - self.L = self.op.get_max_sing_val() # Define a class for squared 2-norm diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 835f96d..2c42532 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -7,7 +7,6 @@ Created on Fri Mar 1 22:51:17 2019 """ from ccpi.optimisation.operators import LinearOperator -from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, BlockDataContainer import numpy as np @@ -318,7 +317,7 @@ class FiniteDiff(LinearOperator): def norm(self): x0 = self.gm_domain.allocate('random_int') - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0) return self.s1 diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 4935435..e0b8a32 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -7,7 +7,6 @@ Created on Fri Mar 1 22:50:04 2019 """ from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator -from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer import numpy from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff @@ -90,7 +89,7 @@ class Gradient(LinearOperator): def norm(self): x0 = self.gm_domain.allocate('random') - self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0) + self.s1, sall, svec = LinearOperator.PowerMethod(self, 10, x0) return self.s1 def __rmul__(self, scalar): diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py index e19304f..bc18f9b 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py @@ -6,6 +6,8 @@ Created on Tue Mar 5 15:57:52 2019 """
from ccpi.optimisation.operators import Operator
+from ccpi.framework import ImageGeometry
+import numpy
class LinearOperator(Operator):
@@ -20,3 +22,63 @@ class LinearOperator(Operator): only available to linear operators'''
raise NotImplementedError
+
+ @staticmethod
+ def PowerMethod(operator, iterations, x0=None):
+ '''Power method to calculate iteratively the Lipschitz constant'''
+ # Initialise random
+
+ if x0 is None:
+ #x0 = op.create_image_data()
+ x0 = operator.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
+
+ x1 = operator.domain_geometry().allocate()
+ y_tmp = operator.range_geometry().allocate()
+ s = numpy.zeros(iterations)
+ # Loop
+ for it in numpy.arange(iterations):
+ #x1 = operator.adjoint(operator.direct(x0))
+ operator.direct(x0,out=y_tmp)
+ operator.adjoint(y_tmp,out=x1)
+ x1norm = x1.norm()
+ #s[it] = (x1*x0).sum() / (x0.squared_norm())
+ s[it] = x1.dot(x0) / x0.squared_norm()
+ #x0 = (1.0/x1norm)*x1
+ #x1 *= (1.0 / x1norm)
+ #x0.fill(x1)
+ x1.multiply((1.0/x1norm), out=x0)
+ return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+
+ @staticmethod
+ def PowerMethodNonsquare(op,numiters , x0=None):
+ # Initialise random
+ # Jakob's
+ # inputsize , outputsize = op.size()
+ #x0 = ImageContainer(numpy.random.randn(*inputsize)
+ # Edo's
+ #vg = ImageGeometry(voxel_num_x=inputsize[0],
+ # voxel_num_y=inputsize[1],
+ # voxel_num_z=inputsize[2])
+ #
+ #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x'])
+ #print (x0)
+ #x0.fill(numpy.random.randn(*x0.shape))
+
+ if x0 is None:
+ #x0 = op.create_image_data()
+ x0 = op.allocate_direct()
+ x0.fill(numpy.random.randn(*x0.shape))
+
+ s = numpy.zeros(numiters)
+ # Loop
+ for it in numpy.arange(numiters):
+ x1 = op.adjoint(op.direct(x0))
+ #x1norm = numpy.sqrt((x1*x1).sum())
+ x1norm = x1.norm()
+ #print ("x0 **********" ,x0)
+ #print ("x1 **********" ,x1)
+ s[it] = (x1*x0).sum() / (x0.squared_norm())
+ x0 = (1.0/x1norm)*x1
+ return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+
+
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py new file mode 100644 index 0000000..62e22e0 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py @@ -0,0 +1,51 @@ +import numpy +from scipy.sparse.linalg import svds +from ccpi.framework import DataContainer +from ccpi.framework import AcquisitionData +from ccpi.framework import ImageData +from ccpi.framework import ImageGeometry +from ccpi.framework import AcquisitionGeometry +from numbers import Number +from ccpi.optimisation.operators import LinearOperator +class LinearOperatorMatrix(LinearOperator): + def __init__(self,A): + self.A = A + self.s1 = None # Largest singular value, initially unknown + super(LinearOperatorMatrix, self).__init__() + + def direct(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A,x.as_array())) + else: + numpy.dot(self.A, x.as_array(), out=out.as_array()) + + + def adjoint(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A.transpose(),x.as_array())) + else: + numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) + + + def size(self): + return self.A.shape + + def get_max_sing_val(self): + # If unknown, compute and store. If known, simply return it. + if self.s1 is None: + self.s1 = svds(self.A,1,return_singular_vectors=False)[0] + return self.s1 + else: + return self.s1 + def allocate_direct(self): + '''allocates the memory to hold the result of adjoint''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((N_A,1)) + return DataContainer(out) + def allocate_adjoint(self): + '''allocate the memory to hold the result of direct''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((M_A,1)) + return DataContainer(out) diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py index c38458d..3fc43b8 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py @@ -7,7 +7,6 @@ Created on Fri Mar 1 22:53:55 2019 """ from ccpi.optimisation.operators import Gradient, Operator, LinearOperator, ScaledOperator -from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer import numpy from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff @@ -108,7 +107,7 @@ class SymmetrizedGradient(Gradient): #TODO this takes time for big ImageData # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) x0 = ImageData(np.random.random_sample(self.domain_dim())) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0) return self.s1 diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index 7040d3a..811adf6 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -19,5 +19,5 @@ from .GradientOperator import Gradient from .SymmetrizedGradientOperator import SymmetrizedGradient
from .IdentityOperator import Identity
from .ZeroOperator import ZeroOp
-
+from .LinearOperatorMatrix import LinearOperatorMatrix
diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py deleted file mode 100755 index fcd0d9e..0000000 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ /dev/null @@ -1,294 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -from scipy.sparse.linalg import svds -from ccpi.framework import DataContainer -from ccpi.framework import AcquisitionData -from ccpi.framework import ImageData -from ccpi.framework import ImageGeometry -from ccpi.framework import AcquisitionGeometry -from numbers import Number -# Maybe operators need to know what types they take as inputs/outputs -# to not just use generic DataContainer - - -class Operator(object): - '''Operator that maps from a space X -> Y''' - def __init__(self, **kwargs): - self.scalar = 1 - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - raise NotImplementedError - def size(self): - # To be defined for specific class - raise NotImplementedError - def norm(self): - raise NotImplementedError - def allocate_direct(self): - '''Allocates memory on the Y space''' - raise NotImplementedError - def allocate_adjoint(self): - '''Allocates memory on the X space''' - raise NotImplementedError - def range_geometry(self): - raise NotImplementedError - def domain_geometry(self): - raise NotImplementedError - def __rmul__(self, other): - '''reverse multiplication of Operator with number sets the variable scalar in the Operator''' - assert isinstance(other, Number) - self.scalar = other - return self - -class LinearOperator(Operator): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - raise NotImplementedError - -class Identity(Operator): - def __init__(self): - self.s1 = 1.0 - self.L = 1 - super(Identity, self).__init__() - - def direct(self,x,out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def adjoint(self,x, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - -class TomoIdentity(Operator): - def __init__(self, geometry, **kwargs): - super(TomoIdentity, self).__init__() - self.s1 = 1.0 - self.geometry = geometry - - def is_linear(self): - return True - def direct(self,x,out=None): - - if out is None: - if self.scalar != 1: - return x * self.scalar - return x.copy() - else: - if self.scalar != 1: - out.fill(x * self.scalar) - return - out.fill(x) - return - - def adjoint(self,x, out=None): - return self.direct(x, out) - - def norm(self): - return self.s1 - - def get_max_sing_val(self): - return self.s1 - def allocate_direct(self): - if issubclass(type(self.geometry), ImageGeometry): - return ImageData(geometry=self.geometry) - elif issubclass(type(self.geometry), AcquisitionGeometry): - return AcquisitionData(geometry=self.geometry) - else: - raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry)) - def allocate_adjoint(self): - return self.allocate_direct() - def range_geometry(self): - return self.geometry - def domain_geometry(self): - return self.geometry - - - -class FiniteDiff2D(Operator): - def __init__(self): - self.s1 = 8.0 - super(FiniteDiff2D, self).__init__() - - def direct(self,x, out=None): - '''Forward differences with Neumann BC.''' - # FIXME this seems to be working only with numpy arrays - - d1 = numpy.zeros_like(x.as_array()) - d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1] - d2 = numpy.zeros_like(x.as_array()) - d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:] - d = numpy.stack((d1,d2),0) - #x.geometry.voxel_num_z = 2 - return type(x)(d,False,geometry=x.geometry) - - def adjoint(self,x, out=None): - '''Backward differences, Neumann BC.''' - Nrows = x.get_dimension_size('horizontal_x') - Ncols = x.get_dimension_size('horizontal_y') - Nchannels = 1 - if len(x.shape) == 4: - Nchannels = x.get_dimension_size('channel') - zer = numpy.zeros((Nrows,1)) - xxx = x.as_array()[0,:,:-1] - # - h = numpy.concatenate((zer,xxx), 1) - h -= numpy.concatenate((xxx,zer), 1) - - zer = numpy.zeros((1,Ncols)) - xxx = x.as_array()[1,:-1,:] - # - v = numpy.concatenate((zer,xxx), 0) - v -= numpy.concatenate((xxx,zer), 0) - return type(x)(h + v, False, geometry=x.geometry) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - -def PowerMethodNonsquareOld(op,numiters): - # Initialise random - # Jakob's - #inputsize = op.size()[1] - #x0 = ImageContainer(numpy.random.randn(*inputsize) - # Edo's - #vg = ImageGeometry(voxel_num_x=inputsize[0], - # voxel_num_y=inputsize[1], - # voxel_num_z=inputsize[2]) - # - #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x']) - #print (x0) - #x0.fill(numpy.random.randn(*x0.shape)) - - x0 = op.create_image_data() - - s = numpy.zeros(numiters) - # Loop - for it in numpy.arange(numiters): - x1 = op.adjoint(op.direct(x0)) - x1norm = numpy.sqrt((x1**2).sum()) - #print ("x0 **********" ,x0) - #print ("x1 **********" ,x1) - s[it] = (x1*x0).sum() / (x0*x0).sum() - x0 = (1.0/x1norm)*x1 - return numpy.sqrt(s[-1]), numpy.sqrt(s), x0 - -#def PowerMethod(op,numiters): -# # Initialise random -# x0 = np.random.randn(400) -# s = np.zeros(numiters) -# # Loop -# for it in np.arange(numiters): -# x1 = np.dot(op.transpose(),np.dot(op,x0)) -# x1norm = np.sqrt(np.sum(np.dot(x1,x1))) -# s[it] = np.dot(x1,x0) / np.dot(x1,x0) -# x0 = (1.0/x1norm)*x1 -# return s, x0 - - -def PowerMethodNonsquare(op,numiters , x0=None): - # Initialise random - # Jakob's - # inputsize , outputsize = op.size() - #x0 = ImageContainer(numpy.random.randn(*inputsize) - # Edo's - #vg = ImageGeometry(voxel_num_x=inputsize[0], - # voxel_num_y=inputsize[1], - # voxel_num_z=inputsize[2]) - # - #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x']) - #print (x0) - #x0.fill(numpy.random.randn(*x0.shape)) - - if x0 is None: - #x0 = op.create_image_data() - x0 = op.allocate_direct() - x0.fill(numpy.random.randn(*x0.shape)) - - s = numpy.zeros(numiters) - # Loop - for it in numpy.arange(numiters): - x1 = op.adjoint(op.direct(x0)) - #x1norm = numpy.sqrt((x1*x1).sum()) - x1norm = x1.norm() - #print ("x0 **********" ,x0) - #print ("x1 **********" ,x1) - s[it] = (x1*x0).sum() / (x0.squared_norm()) - x0 = (1.0/x1norm)*x1 - return numpy.sqrt(s[-1]), numpy.sqrt(s), x0 - -class LinearOperatorMatrix(Operator): - def __init__(self,A): - self.A = A - self.s1 = None # Largest singular value, initially unknown - super(LinearOperatorMatrix, self).__init__() - - def direct(self,x, out=None): - if out is None: - return type(x)(numpy.dot(self.A,x.as_array())) - else: - numpy.dot(self.A, x.as_array(), out=out.as_array()) - - - def adjoint(self,x, out=None): - if out is None: - return type(x)(numpy.dot(self.A.transpose(),x.as_array())) - else: - numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) - - - def size(self): - return self.A.shape - - def get_max_sing_val(self): - # If unknown, compute and store. If known, simply return it. - if self.s1 is None: - self.s1 = svds(self.A,1,return_singular_vectors=False)[0] - return self.s1 - else: - return self.s1 - def allocate_direct(self): - '''allocates the memory to hold the result of adjoint''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((N_A,1)) - return DataContainer(out) - def allocate_adjoint(self): - '''allocate the memory to hold the result of direct''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((M_A,1)) - return DataContainer(out) diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index a20f289..aeb8454 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -7,15 +7,9 @@ Created on Tue Mar 5 16:08:23 2019 import unittest
import numpy
-#from ccpi.plugins.ops import CCPiProjectorSimple
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.optimisation.ops import TomoIdentity
-from ccpi.optimisation.funcs import Norm2sq, Norm1
from ccpi.framework import ImageGeometry, AcquisitionGeometry
from ccpi.framework import ImageData, AcquisitionData
-#from ccpi.optimisation.algorithms import GradientDescent
from ccpi.framework import BlockDataContainer, DataContainer
-#from ccpi.optimisation.Algorithms import CGLS
import functools
from ccpi.optimisation.operators import Gradient, Identity, BlockOperator
diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py index e1c05fb..b82c849 100644 --- a/Wrappers/Python/test/test_BlockOperator.py +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -1,17 +1,11 @@ import unittest from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer -from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.operators import Identity from ccpi.framework import ImageGeometry, ImageData import numpy from ccpi.optimisation.operators import FiniteDiff -class TestOperator(TomoIdentity): - def __init__(self, *args, **kwargs): - super(TestOperator, self).__init__(*args, **kwargs) - self.range = kwargs.get('range', self.geometry) - def range_geometry(self): - return self.range class TestBlockOperator(unittest.TestCase): @@ -21,7 +15,7 @@ class TestBlockOperator(unittest.TestCase): ImageGeometry(10,20,30) , \ ImageGeometry(10,20,30) ] x = [ g.allocate() for g in ig ] - ops = [ TomoIdentity(g) for g in ig ] + ops = [ Identity(g) for g in ig ] K = BlockOperator(*ops) X = BlockDataContainer(x[0]) @@ -50,7 +44,7 @@ class TestBlockOperator(unittest.TestCase): ImageGeometry(10,20,30) , \ ImageGeometry(10,20,30) ] x = [ g.allocate() for g in ig ] - ops = [ TomoIdentity(g) for g in ig ] + ops = [ Identity(g) for g in ig ] K = BlockOperator(*ops) self.assertTrue(False) @@ -70,8 +64,8 @@ class TestBlockOperator(unittest.TestCase): ImageGeometry(10,22,31) , \ ImageGeometry(10,20,31) ] x = [ g.allocate() for g in ig ] - ops = [ TestOperator(g, range=r) for g,r in zip(ig, rg0) ] - ops += [ TestOperator(g, range=r) for g,r in zip(ig, rg1) ] + ops = [ Identity(g, gm_range=r) for g,r in zip(ig, rg0) ] + ops += [ Identity(g, gm_range=r) for g,r in zip(ig, rg1) ] K = BlockOperator(*ops, shape=(2,3)) print ("K col comp? " , K.column_wise_compatible()) @@ -90,7 +84,7 @@ class TestBlockOperator(unittest.TestCase): ImageGeometry(10,20,30) , \ ImageGeometry(10,20,30) ] x = [ g.allocate() for g in ig ] - ops = [ TomoIdentity(g) for g in ig ] + ops = [ Identity(g) for g in ig ] val = 1 # test limit as non Scaled @@ -121,7 +115,7 @@ class TestBlockOperator(unittest.TestCase): #ImageGeometry(10,20,30) , \ ImageGeometry(2,3 ) ] x = [ g.allocate() for g in ig ] - ops = [ TomoIdentity(g) for g in ig ] + ops = [ Identity(g) for g in ig ] # test limit as non Scaled @@ -158,7 +152,7 @@ class TestBlockOperator(unittest.TestCase): print (img.shape, ig.shape) self.assertTrue(img.shape == (30,20,10)) self.assertEqual(img.sum(), 0) - Id = TomoIdentity(ig) + Id = Identity(ig) y = Id.direct(img) numpy.testing.assert_array_equal(y.as_array(), img.as_array()) @@ -167,7 +161,7 @@ class TestBlockOperator(unittest.TestCase): from ccpi.plugins.ops import CCPiProjectorSimple from ccpi.optimisation.ops import PowerMethodNonsquare - from ccpi.optimisation.ops import TomoIdentity + from ccpi.optimisation.operators import Identity from ccpi.optimisation.funcs import Norm2sq, Norm1 from ccpi.framework import ImageGeometry, AcquisitionGeometry from ccpi.optimisation.Algorithms import GradientDescent @@ -265,8 +259,8 @@ class TestBlockOperator(unittest.TestCase): ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) # setup a tomo identity - Ibig = 1e5 * TomoIdentity(geometry=ig) - Ismall = 1e-5 * TomoIdentity(geometry=ig) + Ibig = 1e5 * Identity(ig) + Ismall = 1e-5 * Identity(ig) # composite operator Kbig = BlockOperator(A, Ibig, shape=(2,1)) @@ -362,4 +356,4 @@ class TestBlockOperator(unittest.TestCase): G1 = FiniteDiff(ig1, direction=2, bnd_cond = 'Periodic') print(ig1.shape==u1.shape) print (G1.norm()) - numpy.testing.assert_allclose(G1.norm(), numpy.sqrt(4), atol=0.1)
\ No newline at end of file + numpy.testing.assert_allclose(G1.norm(), numpy.sqrt(4), atol=0.1) diff --git a/Wrappers/Python/test/test_Gradient.py b/Wrappers/Python/test/test_Gradient.py index 1d6485c..c6b2d2e 100755 --- a/Wrappers/Python/test/test_Gradient.py +++ b/Wrappers/Python/test/test_Gradient.py @@ -1,9 +1,5 @@ import unittest import numpy -#from ccpi.plugins.ops import CCPiProjectorSimple -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.optimisation.ops import TomoIdentity -from ccpi.optimisation.funcs import Norm2sq, Norm1 from ccpi.framework import ImageGeometry, AcquisitionGeometry from ccpi.framework import ImageData, AcquisitionData #from ccpi.optimisation.algorithms import GradientDescent diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py index 293fb43..0d6cb8b 100644 --- a/Wrappers/Python/test/test_Operator.py +++ b/Wrappers/Python/test/test_Operator.py @@ -1,6 +1,6 @@ import unittest #from ccpi.optimisation.operators import Operator -from ccpi.optimisation.ops import TomoIdentity +#from ccpi.optimisation.ops import TomoIdentity from ccpi.framework import ImageGeometry, ImageData, BlockDataContainer, DataContainer from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator,\ FiniteDiff @@ -8,6 +8,7 @@ import numpy from timeit import default_timer as timer from ccpi.framework import ImageGeometry from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff +from ccpi.optimisation.operators import LinearOperator def dt(steps): return steps[-1] - steps[-2] @@ -53,7 +54,7 @@ class TestOperator(CCPiTestClass): ig = ImageGeometry(10,20,30) img = ig.allocate() scalar = 0.5 - sid = scalar * TomoIdentity(ig) + sid = scalar * Identity(ig) numpy.testing.assert_array_equal(scalar * img.as_array(), sid.direct(img).as_array()) @@ -62,7 +63,7 @@ class TestOperator(CCPiTestClass): img = ig.allocate() self.assertTrue(img.shape == (30,20,10)) self.assertEqual(img.sum(), 0) - Id = TomoIdentity(ig) + Id = Identity(ig) y = Id.direct(img) numpy.testing.assert_array_equal(y.as_array(), img.as_array()) @@ -97,7 +98,35 @@ class TestOperator(CCPiTestClass): G.adjoint(u, out=res) w = G.adjoint(u) self.assertNumpyArrayEqual(res.as_array(), w.as_array()) + def test_PowerMethod(self): + + N, M = 200, 300 + niter = 1000 + ig = ImageGeometry(N, M) + Id = Identity(ig) + + G = Gradient(ig) + + uid = Id.domain_geometry().allocate(ImageGeometry.RANDOM_INT, seed=1) + a = LinearOperator.PowerMethod(Id, niter, uid) + b = LinearOperator.PowerMethodNonsquare(Id, niter, uid) + + print ("Edo impl", a[0]) + print ("old impl", b[0]) + + #self.assertAlmostEqual(a[0], b[0]) + self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=6) + + a = LinearOperator.PowerMethod(G, niter, uid) + b = LinearOperator.PowerMethodNonsquare(G, niter, uid) + + print ("Edo impl", a[0]) + print ("old impl", b[0]) + self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=2) + #self.assertAlmostEqual(a[0], b[0]) + + diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index a35ffc1..669804e 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -12,7 +12,7 @@ from ccpi.framework import ImageData from ccpi.framework import AcquisitionData from ccpi.framework import ImageGeometry from ccpi.framework import AcquisitionGeometry -from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.operators import Identity from ccpi.optimisation.funcs import Norm2sq from ccpi.optimisation.algorithms import GradientDescent from ccpi.optimisation.algorithms import CGLS @@ -26,7 +26,7 @@ class TestAlgorithms(unittest.TestCase): def setUp(self): #wget.download('https://github.com/DiamondLightSource/Savu/raw/master/test_data/data/24737_fd.nxs') #self.filename = '24737_fd.nxs' - # we use TomoIdentity as the operator and solve the simple least squares + # we use Identity as the operator and solve the simple least squares # problem for a random-valued ImageData or AcquisitionData b? # Then we know the minimiser is b itself @@ -48,7 +48,7 @@ class TestAlgorithms(unittest.TestCase): # fill with random numbers b.fill(numpy.random.random(x_init.shape)) - identity = TomoIdentity(geometry=ig) + identity = Identity(ig) norm2sq = Norm2sq(identity, b) @@ -66,7 +66,7 @@ class TestAlgorithms(unittest.TestCase): # fill with random numbers b.fill(numpy.random.random(x_init.shape)) - identity = TomoIdentity(geometry=ig) + identity = Identity(ig) alg = CGLS(x_init=x_init, operator=identity, data=b) alg.max_iteration = 1 @@ -83,7 +83,7 @@ class TestAlgorithms(unittest.TestCase): x_init = ImageData(geometry=ig) x_init.fill(numpy.random.random(x_init.shape)) - identity = TomoIdentity(geometry=ig) + identity = Identity(ig) norm2sq = Norm2sq(identity, b) norm2sq.L = 2 * norm2sq.c * identity.norm()**2 @@ -121,4 +121,4 @@ class TestAlgorithms(unittest.TestCase): if __name__ == '__main__': unittest.main() -
\ No newline at end of file + diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 05bdd7a..b428c12 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -17,15 +17,10 @@ from ccpi.framework import BlockDataContainer from numbers import Number from ccpi.optimisation.operators import Gradient -#from ccpi.optimisation.functions import SimpleL2NormSq from ccpi.optimisation.functions import L2NormSquared -#from ccpi.optimisation.functions import SimpleL1Norm from ccpi.optimisation.functions import L1Norm, MixedL21Norm -from ccpi.optimisation.funcs import Norm2sq -# from ccpi.optimisation.functions.L2NormSquared import SimpleL2NormSq, L2NormSq -# from ccpi.optimisation.functions.L1Norm import SimpleL1Norm, L1Norm -#from ccpi.optimisation.functions import mixed_L12Norm +from ccpi.optimisation.functions import Norm2sq from ccpi.optimisation.functions import ZeroFunction from ccpi.optimisation.functions import FunctionOperatorComposition diff --git a/Wrappers/Python/test/test_run_test.py b/Wrappers/Python/test/test_run_test.py index c698032..9b4d53b 100755 --- a/Wrappers/Python/test/test_run_test.py +++ b/Wrappers/Python/test/test_run_test.py @@ -8,16 +8,15 @@ from ccpi.framework import ImageGeometry from ccpi.framework import AcquisitionGeometry from ccpi.optimisation.algorithms import FISTA #from ccpi.optimisation.algs import FBPD -from ccpi.optimisation.funcs import Norm2sq +from ccpi.optimisation.functions import Norm2sq from ccpi.optimisation.functions import ZeroFunction from ccpi.optimisation.funcs import Norm1 -from ccpi.optimisation.funcs import TV2D from ccpi.optimisation.funcs import Norm2 -from ccpi.optimisation.ops import LinearOperatorMatrix -from ccpi.optimisation.ops import TomoIdentity -from ccpi.optimisation.ops import Identity -from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.optimisation.operators import LinearOperatorMatrix +from ccpi.optimisation.operators import Identity +#from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.optimisation.operators import LinearOperator import numpy.testing @@ -220,7 +219,7 @@ class TestAlgorithms(unittest.TestCase): # Create object instances with the test data A and b. f = Norm2sq(A, b, c=0.5, memopt=True) - f.L = PowerMethodNonsquare(A, 25, x_init)[0] + f.L = LinearOperator.PowerMethod(A, 25, x_init)[0] print ("Lipschitz", f.L) g0 = ZeroFun() @@ -289,7 +288,7 @@ class TestAlgorithms(unittest.TestCase): # Data fidelity term f_denoise = Norm2sq(I, y, c=0.5, memopt=True) x_init = ImageData(geometry=ig) - f_denoise.L = PowerMethodNonsquare(I, 25, x_init)[0] + f_denoise.L = LinearOperator.PowerMethod(I, 25, x_init)[0] # 1-norm regulariser lam1_denoise = 1.0 @@ -335,43 +334,6 @@ class TestAlgorithms(unittest.TestCase): self.assertNumpyArrayAlmostEqual( x_fbpd1_denoise.array.flatten(), x1_denoise.value, 5) - x1_cvx = x1_denoise.value - x1_cvx.shape = (N, N) - - # Now TV with FBPD - lam_tv = 0.1 - gtv = TV2D(lam_tv) - gtv(gtv.op.direct(x_init_denoise)) - - opt_tv = {'tol': 1e-4, 'iter': 10000} - - x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\ - criterfbpdtv_denoise = \ - FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv, opt=opt_tv) - print(x_fbpdtv_denoise) - print(criterfbpdtv_denoise[-1]) - - # Compare to CVXPY - - # Construct the problem. - xtv_denoise = Variable((N, N)) - objectivetv_denoise = Minimize( - 0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise)) - probtv_denoise = Problem(objectivetv_denoise) - - # The optimal objective is returned by prob.solve(). - resulttv_denoise = probtv_denoise.solve( - verbose=False, solver=SCS, eps=1e-12) - - # The optimal solution for x is stored in x.value and optimal objective value - # is in result as well as in objective.value - print("CVXPY least squares plus 1-norm solution and objective value:") - print(xtv_denoise.value) - print(objectivetv_denoise.value) - - self.assertNumpyArrayAlmostEqual( - x_fbpdtv_denoise.as_array(), xtv_denoise.value, 1) - else: self.assertTrue(cvx_not_installable) |