From 3271aa9c4ca50177bf2f9e37269efa03f52f635e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 4 Jun 2019 13:36:50 +0100 Subject: fixing tests --- .../Python/ccpi/optimisation/algorithms/FISTA.py | 117 +++++++-------------- .../Python/ccpi/optimisation/functions/Norm2Sq.py | 2 +- .../ccpi/optimisation/functions/ScaledFunction.py | 3 +- .../optimisation/operators/LinearOperatorMatrix.py | 21 ++-- Wrappers/Python/test/test_run_test.py | 14 ++- 5 files changed, 59 insertions(+), 98 deletions(-) (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index 8ea2b6c..04e7c38 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -20,102 +20,61 @@ class FISTA(Algorithm): x_init: initial guess f: data fidelity g: regularizer - h: - opt: additional algorithm + opt: additional options ''' - + + def __init__(self, **kwargs): '''initialisation can be done at creation time if all proper variables are passed or later with set_up''' super(FISTA, self).__init__() - self.f = None - self.g = None + self.f = kwargs.get('f', None) + self.g = kwargs.get('g', None) + self.x_init = kwargs.get('x_init',None) self.invL = None self.t_old = 1 - args = ['x_init', 'f', 'g', 'opt'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(kwargs['x_init'], - f=kwargs['f'], - g=kwargs['g'], - opt=kwargs['opt']) + if self.f is not None and self.g is not None: + print ("Calling from creator") + self.set_up(self.x_init, self.f, self.g) + - def set_up(self, x_init, f=None, g=None, opt=None): + def set_up(self, x_init, f, g, opt=None, **kwargs): - # default inputs - if f is None: - self.f = ZeroFunction() - else: - self.f = f - if g is None: - g = ZeroFunction() - self.g = g - else: - self.g = g + self.f = f + self.g = g # algorithmic parameters if opt is None: - opt = {'tol': 1e-4, 'memopt':False} - - self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - self.memopt = memopt - - # initialization - if memopt: - self.y = x_init.clone() - self.x_old = x_init.clone() - self.x = x_init.clone() - self.u = x_init.clone() - else: - self.x_old = x_init.copy() - self.y = x_init.copy() - - #timing = numpy.zeros(max_iter) - #criter = numpy.zeros(max_iter) + opt = {'tol': 1e-4} - + self.y = x_init.copy() + self.x_old = x_init.copy() + self.x = x_init.copy() + self.u = x_init.copy() + + self.invL = 1/f.L self.t_old = 1 def update(self): - # algorithm loop - #for it in range(0, max_iter): - - if self.memopt: - # u = y - invL*f.grad(y) - # store the result in x_old - self.f.gradient(self.y, out=self.u) - self.u.__imul__( -self.invL ) - self.u.__iadd__( self.y ) - # x = g.prox(u,invL) - self.g.proximal(self.u, self.invL, out=self.x) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - # y = x + (t_old-1)/t*(x-x_old) - self.x.subtract(self.x_old, out=self.y) - self.y.__imul__ ((self.t_old-1)/self.t) - self.y.__iadd__( self.x ) - - self.x_old.fill(self.x) - self.t_old = self.t - - - else: - u = self.y - self.invL*self.f.gradient(self.y) - - self.x = self.g.proximal(u,self.invL) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old) - - self.x_old = self.x.copy() - self.t_old = self.t + + self.f.gradient(self.y, out=self.u) + self.u.__imul__( -self.invL ) + self.u.__iadd__( self.y ) + # x = g.prox(u,invL) + self.g.proximal(self.u, self.invL, out=self.x) + + self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) + + self.x.subtract(self.x_old, out=self.y) + self.y.__imul__ ((self.t_old-1)/self.t) + self.y.__iadd__( self.x ) + + self.x_old.fill(self.x) + self.t_old = self.t def update_objective(self): - self.loss.append( self.f(self.x) + self.g(self.x) ) \ No newline at end of file + self.loss.append( self.f(self.x) + self.g(self.x) ) + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py index 0b6bb0b..d9d9010 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py +++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py @@ -88,7 +88,7 @@ class Norm2Sq(Function): def gradient(self, x, out = None): if self.memopt: #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - + print (self.range_tmp, self.range_tmp.as_array()) self.A.direct(x, out=self.range_tmp) self.range_tmp -= self.b self.A.adjoint(self.range_tmp, out=out) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 7caeab2..d292ac8 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -18,6 +18,7 @@ # limitations under the License. from numbers import Number import numpy +import warnings class ScaledFunction(object): @@ -88,7 +89,7 @@ class ScaledFunction(object): '''Alias of proximal(x, tau, None)''' warnings.warn('''This method will disappear in following versions of the CIL. Use proximal instead''', DeprecationWarning) - return self.proximal(x, out=None) + return self.proximal(x, tau, out=None) diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py index 62e22e0..6306192 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py +++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py @@ -10,6 +10,9 @@ from ccpi.optimisation.operators import LinearOperator class LinearOperatorMatrix(LinearOperator): def __init__(self,A): self.A = A + M_A, N_A = self.A.shape + self.gm_domain = ImageGeometry(0, N_A) + self.gm_range = ImageGeometry(M_A,0) self.s1 = None # Largest singular value, initially unknown super(LinearOperatorMatrix, self).__init__() @@ -30,22 +33,14 @@ class LinearOperatorMatrix(LinearOperator): def size(self): return self.A.shape - def get_max_sing_val(self): + def norm(self): # If unknown, compute and store. If known, simply return it. if self.s1 is None: self.s1 = svds(self.A,1,return_singular_vectors=False)[0] return self.s1 else: return self.s1 - def allocate_direct(self): - '''allocates the memory to hold the result of adjoint''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((N_A,1)) - return DataContainer(out) - def allocate_adjoint(self): - '''allocate the memory to hold the result of direct''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((M_A,1)) - return DataContainer(out) + def domain_geometry(self): + return self.gm_domain + def range_geometry(self): + return self.gm_range diff --git a/Wrappers/Python/test/test_run_test.py b/Wrappers/Python/test/test_run_test.py index a6c13f4..ebe494f 100755 --- a/Wrappers/Python/test/test_run_test.py +++ b/Wrappers/Python/test/test_run_test.py @@ -10,7 +10,8 @@ from ccpi.optimisation.algorithms import FISTA #from ccpi.optimisation.algs import FBPD from ccpi.optimisation.functions import Norm2Sq from ccpi.optimisation.functions import ZeroFunction -from ccpi.optimisation.funcs import Norm1 +#from ccpi.optimisation.funcs import Norm1 +from ccpi.optimisation.functions import L1Norm from ccpi.optimisation.funcs import Norm2 from ccpi.optimisation.operators import LinearOperatorMatrix @@ -81,6 +82,7 @@ class TestAlgorithms(unittest.TestCase): opt = {'memopt': True} # Create object instances with the test data A and b. f = Norm2Sq(A, b, c=0.5, memopt=True) + f.L = LinearOperator.PowerMethod(A, 10) g0 = ZeroFunction() # Initial guess @@ -123,6 +125,7 @@ class TestAlgorithms(unittest.TestCase): self.assertTrue(cvx_not_installable) def test_FISTA_Norm1_cvx(self): + print ("test_FISTA_Norm1_cvx") if not cvx_not_installable: try: opt = {'memopt': True} @@ -151,7 +154,8 @@ class TestAlgorithms(unittest.TestCase): x_init = DataContainer(np.zeros((n, 1))) # Create 1-norm object instance - g1 = Norm1(lam) + #g1 = Norm1(lam) + g1 = lam * L1Norm() g1(x_init) g1.prox(x_init, 0.02) @@ -225,7 +229,8 @@ class TestAlgorithms(unittest.TestCase): # Create 1-norm object instance - g1 = Norm1(lam) + #g1 = Norm1(lam) + g1 = lam * L1Norm() # Compare to CVXPY @@ -292,7 +297,8 @@ class TestAlgorithms(unittest.TestCase): # 1-norm regulariser lam1_denoise = 1.0 - g1_denoise = Norm1(lam1_denoise) + #g1_denoise = Norm1(lam1_denoise) + g1_denoise = lam1_denoise * L1Norm() # Initial guess x_init_denoise = ImageData(np.zeros((N, N))) -- cgit v1.2.3 From 910a81042068b084278783b53bac45ad63b852d2 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 4 Jun 2019 16:29:45 +0100 Subject: progress --- Wrappers/Python/ccpi/framework/VectorData.py | 58 ++++++++++++++++++ Wrappers/Python/ccpi/framework/VectorGeometry.py | 69 ++++++++++++++++++++++ Wrappers/Python/ccpi/framework/__init__.py | 2 + Wrappers/Python/ccpi/framework/framework.py | 1 - .../Python/ccpi/optimisation/algorithms/FISTA.py | 14 ++++- .../Python/ccpi/optimisation/functions/Norm2Sq.py | 1 - .../optimisation/operators/LinearOperatorMatrix.py | 16 +++-- 7 files changed, 149 insertions(+), 12 deletions(-) create mode 100755 Wrappers/Python/ccpi/framework/VectorData.py create mode 100755 Wrappers/Python/ccpi/framework/VectorGeometry.py (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/ccpi/framework/VectorData.py b/Wrappers/Python/ccpi/framework/VectorData.py new file mode 100755 index 0000000..fdce3a5 --- /dev/null +++ b/Wrappers/Python/ccpi/framework/VectorData.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy +import sys +from datetime import timedelta, datetime +import warnings +from functools import reduce +from numbers import Number +from ccpi.framework import DataContainer, VectorGeometry + +class VectorData(DataContainer): + def __init__(self, array=None, **kwargs): + self.geometry = kwargs.get('geometry', None) + self.dtype = kwargs.get('dtype', numpy.float32) + + if self.geometry is None: + if array is None: + raise ValueError('Please specify either a geometry or an array') + else: + if len(array.shape) > 1: + raise ValueError('Incompatible size: expected 1D got {}'.format(array.shape)) + out = array + self.geometry = VectorGeometry.VectorGeometry(array.shape[0]) + self.length = self.geometry.length + else: + self.length = self.geometry.length + + if array is None: + out = numpy.zeros((self.length,), dtype=self.dtype) + else: + if self.length == array.shape[0]: + out = array + else: + raise ValueError('Incompatible size: expecting {} got {}'.format((self.length,), array.shape)) + deep_copy = False + super(VectorData, self).__init__(out, deep_copy, None) diff --git a/Wrappers/Python/ccpi/framework/VectorGeometry.py b/Wrappers/Python/ccpi/framework/VectorGeometry.py new file mode 100755 index 0000000..255d2a0 --- /dev/null +++ b/Wrappers/Python/ccpi/framework/VectorGeometry.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy +import sys +from datetime import timedelta, datetime +import warnings +from functools import reduce +from numbers import Number +from ccpi.framework.VectorData import VectorData + +class VectorGeometry(object): + RANDOM = 'random' + RANDOM_INT = 'random_int' + + def __init__(self, + length): + + self.length = length + self.shape = (length, ) + + + def clone(self): + '''returns a copy of VectorGeometry''' + return VectorGeometry(self.length) + + def allocate(self, value=0, **kwargs): + '''allocates an VectorData according to the size expressed in the instance''' + self.dtype = kwargs.get('dtype', numpy.float32) + out = VectorData(geometry=self, dtype=self.dtype) + if isinstance(value, Number): + if value != 0: + out += value + else: + if value == VectorGeometry.RANDOM: + seed = kwargs.get('seed', None) + if seed is not None: + numpy.random.seed(seed) + out.fill(numpy.random.random_sample(self.shape)) + elif value == VectorGeometry.RANDOM_INT: + seed = kwargs.get('seed', None) + if seed is not None: + numpy.random.seed(seed) + max_value = kwargs.get('max_value', 100) + out.fill(numpy.random.randint(max_value,size=self.shape)) + else: + raise ValueError('Value {} unknown'.format(value)) + return out \ No newline at end of file diff --git a/Wrappers/Python/ccpi/framework/__init__.py b/Wrappers/Python/ccpi/framework/__init__.py index 229edb5..9cec708 100755 --- a/Wrappers/Python/ccpi/framework/__init__.py +++ b/Wrappers/Python/ccpi/framework/__init__.py @@ -24,3 +24,5 @@ from .framework import DataProcessor from .framework import AX, PixelByPixelDataProcessor, CastDataContainer from .BlockDataContainer import BlockDataContainer from .BlockGeometry import BlockGeometry +from .VectorGeometry import VectorGeometry +from .VectorData import VectorData diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 7236e0e..b972be6 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -29,7 +29,6 @@ import warnings from functools import reduce from numbers import Number - def find_key(dic, val): """return the key of dictionary dic given the value""" return [k for k, v in dic.items() if v == val][0] diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index 04e7c38..db4e8b7 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -34,7 +34,7 @@ class FISTA(Algorithm): self.invL = None self.t_old = 1 if self.f is not None and self.g is not None: - print ("Calling from creator") + print ("FISTA initialising from creator") self.set_up(self.x_init, self.f, self.g) @@ -46,6 +46,8 @@ class FISTA(Algorithm): # algorithmic parameters if opt is None: opt = {'tol': 1e-4} + print(self.x_init.as_array()) + print(x_init.as_array()) self.y = x_init.copy() self.x_old = x_init.copy() @@ -60,18 +62,28 @@ class FISTA(Algorithm): def update(self): self.f.gradient(self.y, out=self.u) + print ('update, self.u' , self.u.as_array()) self.u.__imul__( -self.invL ) self.u.__iadd__( self.y ) + print ('update, self.u' , self.u.as_array()) + # x = g.prox(u,invL) + print ('update, self.x pre prox' , self.x.as_array()) self.g.proximal(self.u, self.invL, out=self.x) + print ('update, self.x post prox' , self.x.as_array()) self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) self.x.subtract(self.x_old, out=self.y) + print ('update, self.y' , self.y.as_array()) + self.y.__imul__ ((self.t_old-1)/self.t) + print ('update, self.x' , self.x.as_array()) self.y.__iadd__( self.x ) + print ('update, self.y' , self.y.as_array()) self.x_old.fill(self.x) + print ('update, self.x_old' , self.x_old.as_array()) self.t_old = self.t def update_objective(self): diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py index d9d9010..8e77f56 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py +++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py @@ -88,7 +88,6 @@ class Norm2Sq(Function): def gradient(self, x, out = None): if self.memopt: #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - print (self.range_tmp, self.range_tmp.as_array()) self.A.direct(x, out=self.range_tmp) self.range_tmp -= self.b self.A.adjoint(self.range_tmp, out=out) diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py index 6306192..292db2c 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py +++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py @@ -2,8 +2,8 @@ import numpy from scipy.sparse.linalg import svds from ccpi.framework import DataContainer from ccpi.framework import AcquisitionData -from ccpi.framework import ImageData -from ccpi.framework import ImageGeometry +from ccpi.framework import VectorData +from ccpi.framework import VectorGeometry from ccpi.framework import AcquisitionGeometry from numbers import Number from ccpi.optimisation.operators import LinearOperator @@ -11,8 +11,8 @@ class LinearOperatorMatrix(LinearOperator): def __init__(self,A): self.A = A M_A, N_A = self.A.shape - self.gm_domain = ImageGeometry(0, N_A) - self.gm_range = ImageGeometry(M_A,0) + self.gm_domain = VectorGeometry(N_A) + self.gm_range = VectorGeometry(M_A) self.s1 = None # Largest singular value, initially unknown super(LinearOperatorMatrix, self).__init__() @@ -21,18 +21,16 @@ class LinearOperatorMatrix(LinearOperator): return type(x)(numpy.dot(self.A,x.as_array())) else: numpy.dot(self.A, x.as_array(), out=out.as_array()) - - + def adjoint(self,x, out=None): if out is None: return type(x)(numpy.dot(self.A.transpose(),x.as_array())) else: numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) - - + def size(self): return self.A.shape - + def norm(self): # If unknown, compute and store. If known, simply return it. if self.s1 is None: -- cgit v1.2.3 From e5d9351e87c92276677a0b26141049205e215860 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 4 Jun 2019 16:30:46 +0100 Subject: added example file --- Wrappers/Python/wip/fix_test.py | 133 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100755 Wrappers/Python/wip/fix_test.py (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/wip/fix_test.py b/Wrappers/Python/wip/fix_test.py new file mode 100755 index 0000000..7b19910 --- /dev/null +++ b/Wrappers/Python/wip/fix_test.py @@ -0,0 +1,133 @@ +import numpy as np +from ccpi.optimisation.operators import * +from ccpi.optimisation.algorithms import * +from ccpi.optimisation.functions import * +from ccpi.framework import * + +def isSizeCorrect(data1 ,data2): + if issubclass(type(data1), DataContainer) and \ + issubclass(type(data2), DataContainer): + # check dimensionality + if data1.check_dimensions(data2): + return True + elif issubclass(type(data1) , numpy.ndarray) and \ + issubclass(type(data2) , numpy.ndarray): + return data1.shape == data2.shape + else: + raise ValueError("{0}: getting two incompatible types: {1} {2}"\ + .format('Function', type(data1), type(data2))) + return False + +class Norm1(Function): + + def __init__(self,gamma): + super(Norm1, self).__init__() + self.gamma = gamma + self.L = 1 + self.sign_x = None + + def __call__(self,x,out=None): + if out is None: + return self.gamma*(x.abs().sum()) + else: + if not x.shape == out.shape: + raise ValueError('Norm1 Incompatible size:', + x.shape, out.shape) + x.abs(out=out) + return out.sum() * self.gamma + + def prox(self,x,tau): + return (x.abs() - tau*self.gamma).maximum(0) * x.sign() + + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + if isSizeCorrect(x,out): + # check dimensionality + if issubclass(type(out), DataContainer): + v = (x.abs() - tau*self.gamma).maximum(0) + x.sign(out=out) + out *= v + #out.fill(self.prox(x,tau)) + elif issubclass(type(out) , numpy.ndarray): + v = (x.abs() - tau*self.gamma).maximum(0) + out[:] = x.sign() + out *= v + #out[:] = self.prox(x,tau) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + +opt = {'memopt': True} +# Problem data. +m = 3 +n = 3 +np.random.seed(1) +#Amat = np.asarray( np.random.randn(m, n), dtype=numpy.float32) +Amat = np.asarray(np.eye(m), dtype=np.float32) * 2 +A = LinearOperatorMatrix(Amat) +bmat = np.asarray( np.random.randn(m), dtype=numpy.float32) +bmat *= 0 +bmat += 2 +print ("bmat", bmat.shape) +print ("A", A.A) +#bmat.shape = (bmat.shape[0], 1) + +# A = Identity() +# Change n to equal to m. +vgb = VectorGeometry(m) +vgx = VectorGeometry(n) +b = vgb.allocate(VectorGeometry.RANDOM, dtype=numpy.float32) +b.fill(bmat) +#b = DataContainer(bmat) + +# Regularization parameter +lam = 10 +opt = {'memopt': True} +# Create object instances with the test data A and b. +f = Norm2Sq(A, b, c=0.5, memopt=True) +#f = FunctionOperatorComposition(A, L2NormSquared(b=bmat)) +g0 = ZeroFunction() + +#f.L = 30.003 +x_init = vgx.allocate(VectorGeometry.RANDOM, dtype=numpy.float32) + +f.L = LinearOperator.PowerMethod(A, 25, x_init)[0] * 2 +print ('f.L', f.L) + +# Initial guess +#x_init = DataContainer(np.zeros((n, 1))) +print ('x_init', x_init.as_array()) +print ('b', b.as_array()) +# Create 1-norm object instance +g1_new = lam * L1Norm() +g1 = Norm1(lam) +g1 = ZeroFunction() +#g1(x_init) +x = g1.prox(x_init, 1/f.L ) +print ("g1.proximal ", x.as_array()) + +x = g1.prox(x_init, 0.03 ) +print ("g1.proximal ", x.as_array()) +x = g1_new.proximal(x_init, 0.03 ) +print ("g1.proximal ", x.as_array()) + + + +# Combine with least squares and solve using generic FISTA implementation +#x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt) +def callback(it, objective, solution): + print (it, objective, solution.as_array()) + +fa = FISTA(x_init=x_init, f=f, g=g1) +fa.max_iteration = 10 +fa.run(3, callback = callback, verbose=False) + + +# Print for comparison +print("FISTA least squares plus 1-norm solution and objective value:") +print(fa.get_output().as_array()) +print(fa.get_last_objective()) + +print (A.direct(fa.get_output()).as_array()) +print (b.as_array()) \ No newline at end of file -- cgit v1.2.3 From 935361ba734c7a2ecae8835d5f6959d32f4c7403 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 5 Jun 2019 09:58:00 +0100 Subject: fixed test --- Wrappers/Python/ccpi/framework/VectorData.py | 2 +- Wrappers/Python/ccpi/framework/framework.py | 3 +- .../Python/ccpi/optimisation/algorithms/FISTA.py | 10 ------ Wrappers/Python/wip/fix_test.py | 39 ++++++++++++++++++---- 4 files changed, 35 insertions(+), 19 deletions(-) (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/ccpi/framework/VectorData.py b/Wrappers/Python/ccpi/framework/VectorData.py index fdce3a5..d0fed10 100755 --- a/Wrappers/Python/ccpi/framework/VectorData.py +++ b/Wrappers/Python/ccpi/framework/VectorData.py @@ -54,5 +54,5 @@ class VectorData(DataContainer): out = array else: raise ValueError('Incompatible size: expecting {} got {}'.format((self.length,), array.shape)) - deep_copy = False + deep_copy = True super(VectorData, self).__init__(out, deep_copy, None) diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index b972be6..f91343c 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -329,7 +329,7 @@ class DataContainer(object): self.dimension_labels = {} self.geometry = None # Only relevant for AcquisitionData and ImageData - if dimension_labels is not None and \ + if dimension_labels != {} and \ len (dimension_labels) == self.number_of_dimensions: for i in range(self.number_of_dimensions): self.dimension_labels[i] = dimension_labels[i] @@ -632,6 +632,7 @@ class DataContainer(object): def pixel_wise_binary(self, pwop, x2, *args, **kwargs): out = kwargs.get('out', None) + if out is None: if isinstance(x2, (int, float, complex)): out = pwop(self.as_array() , x2 , *args, **kwargs ) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index db4e8b7..4754d9c 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -62,28 +62,18 @@ class FISTA(Algorithm): def update(self): self.f.gradient(self.y, out=self.u) - print ('update, self.u' , self.u.as_array()) self.u.__imul__( -self.invL ) self.u.__iadd__( self.y ) - print ('update, self.u' , self.u.as_array()) - # x = g.prox(u,invL) - print ('update, self.x pre prox' , self.x.as_array()) self.g.proximal(self.u, self.invL, out=self.x) - print ('update, self.x post prox' , self.x.as_array()) self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) self.x.subtract(self.x_old, out=self.y) - print ('update, self.y' , self.y.as_array()) - self.y.__imul__ ((self.t_old-1)/self.t) - print ('update, self.x' , self.x.as_array()) self.y.__iadd__( self.x ) - print ('update, self.y' , self.y.as_array()) self.x_old.fill(self.x) - print ('update, self.x_old' , self.x_old.as_array()) self.t_old = self.t def update_objective(self): diff --git a/Wrappers/Python/wip/fix_test.py b/Wrappers/Python/wip/fix_test.py index 7b19910..094f571 100755 --- a/Wrappers/Python/wip/fix_test.py +++ b/Wrappers/Python/wip/fix_test.py @@ -60,11 +60,11 @@ class Norm1(Function): opt = {'memopt': True} # Problem data. -m = 3 -n = 3 +m = 30 +n = 30 np.random.seed(1) -#Amat = np.asarray( np.random.randn(m, n), dtype=numpy.float32) -Amat = np.asarray(np.eye(m), dtype=np.float32) * 2 +Amat = np.asarray( np.random.randn(m, n), dtype=numpy.float32) +#Amat = np.asarray(np.eye(m), dtype=np.float32) * 2 A = LinearOperatorMatrix(Amat) bmat = np.asarray( np.random.randn(m), dtype=numpy.float32) bmat *= 0 @@ -92,8 +92,15 @@ g0 = ZeroFunction() #f.L = 30.003 x_init = vgx.allocate(VectorGeometry.RANDOM, dtype=numpy.float32) +a = VectorData(x_init.as_array(), deep_copy=True) + +assert id(x_init.as_array()) != id(a.as_array()) + +#%% f.L = LinearOperator.PowerMethod(A, 25, x_init)[0] * 2 print ('f.L', f.L) +rate = (1 / f.L) / 3 +f.L *= 6 # Initial guess #x_init = DataContainer(np.zeros((n, 1))) @@ -102,6 +109,7 @@ print ('b', b.as_array()) # Create 1-norm object instance g1_new = lam * L1Norm() g1 = Norm1(lam) + g1 = ZeroFunction() #g1(x_init) x = g1.prox(x_init, 1/f.L ) @@ -112,6 +120,16 @@ print ("g1.proximal ", x.as_array()) x = g1_new.proximal(x_init, 0.03 ) print ("g1.proximal ", x.as_array()) +x1 = vgx.allocate(VectorGeometry.RANDOM, dtype=numpy.float32) +pippo = vgx.allocate() + +print ("x_init", x_init.as_array()) +print ("x1", x1.as_array()) +a = x_init.subtract(x1, out=pippo) + +print ("pippo", pippo.as_array()) +print ("x_init", x_init.as_array()) +print ("x1", x1.as_array()) # Combine with least squares and solve using generic FISTA implementation @@ -120,8 +138,14 @@ def callback(it, objective, solution): print (it, objective, solution.as_array()) fa = FISTA(x_init=x_init, f=f, g=g1) -fa.max_iteration = 10 -fa.run(3, callback = callback, verbose=False) +fa.max_iteration = 1000 +fa.update_objective_interval = int( fa.max_iteration / 10 ) +fa.run(fa.max_iteration, callback = None, verbose=True) + +gd = GradientDescent(x_init=x_init, objective_function=f, rate = rate ) +gd.max_iteration = 100000 +gd.update_objective_interval = 10000 +gd.run(gd.max_iteration, callback = None, verbose=True) # Print for comparison @@ -130,4 +154,5 @@ print(fa.get_output().as_array()) print(fa.get_last_objective()) print (A.direct(fa.get_output()).as_array()) -print (b.as_array()) \ No newline at end of file +print (b.as_array()) +print (A.direct(gd.get_output()).as_array()) -- cgit v1.2.3 From 940a1371fdf88e8c9e8230cece6fa1c73842804c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 6 Jun 2019 10:07:37 +0100 Subject: add callback --- Wrappers/Python/wip/compare_CGLS_algos.py | 28 +++++++++++++++++----------- Wrappers/Python/wip/fix_test.py | 31 ++++++++++++++++++++++++------- 2 files changed, 41 insertions(+), 18 deletions(-) (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/wip/compare_CGLS_algos.py b/Wrappers/Python/wip/compare_CGLS_algos.py index 119752c..52f3f31 100644 --- a/Wrappers/Python/wip/compare_CGLS_algos.py +++ b/Wrappers/Python/wip/compare_CGLS_algos.py @@ -12,6 +12,8 @@ from ccpi.optimisation.algorithms import CGLS as CGLSalg import numpy as np import matplotlib.pyplot as plt +from ccpi.optimisation.functions import Norm2Sq + # Choose either a parallel-beam (1=parallel2D) or fan-beam (2=cone2D) test case test_case = 1 @@ -101,25 +103,29 @@ x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b, opt) #plt.title('CGLS criterion') #plt.show() +f = Norm2Sq(Aop, b, c=1.) +def callback(it, objective, solution): + print (objective, f(solution)) # Now CLGS using the algorithm class CGLS_alg = CGLSalg() CGLS_alg.set_up(x_init, Aop, b ) -CGLS_alg.max_iteration = 2000 -CGLS_alg.run(opt['iter']) +CGLS_alg.max_iteration = 500 +CGLS_alg.update_objective_interval = 10 +CGLS_alg.run(300, callback=callback) x_CGLS_alg = CGLS_alg.get_output() -#plt.figure() -#plt.imshow(x_CGLS_alg.as_array()) -#plt.title('CGLS ALG') -#plt.colorbar() -#plt.show() +plt.figure() +plt.imshow(x_CGLS_alg.as_array()) +plt.title('CGLS ALG') +plt.colorbar() +plt.show() -#plt.figure() -#plt.semilogy(CGLS_alg.objective) -#plt.title('CGLS criterion') -#plt.show() +plt.figure() +plt.semilogy(CGLS_alg.objective) +plt.title('CGLS criterion') +plt.show() print(criter_CGLS) print(CGLS_alg.objective) diff --git a/Wrappers/Python/wip/fix_test.py b/Wrappers/Python/wip/fix_test.py index 094f571..5e40d70 100755 --- a/Wrappers/Python/wip/fix_test.py +++ b/Wrappers/Python/wip/fix_test.py @@ -1,4 +1,5 @@ import numpy as np +import numpy from ccpi.optimisation.operators import * from ccpi.optimisation.algorithms import * from ccpi.optimisation.functions import * @@ -97,10 +98,10 @@ a = VectorData(x_init.as_array(), deep_copy=True) assert id(x_init.as_array()) != id(a.as_array()) #%% -f.L = LinearOperator.PowerMethod(A, 25, x_init)[0] * 2 +f.L = LinearOperator.PowerMethod(A, 25, x_init)[0] print ('f.L', f.L) -rate = (1 / f.L) / 3 -f.L *= 6 +rate = (1 / f.L) / 6 +f.L *= 12 # Initial guess #x_init = DataContainer(np.zeros((n, 1))) @@ -138,7 +139,7 @@ def callback(it, objective, solution): print (it, objective, solution.as_array()) fa = FISTA(x_init=x_init, f=f, g=g1) -fa.max_iteration = 1000 +fa.max_iteration = 10000 fa.update_objective_interval = int( fa.max_iteration / 10 ) fa.run(fa.max_iteration, callback = None, verbose=True) @@ -147,12 +148,28 @@ gd.max_iteration = 100000 gd.update_objective_interval = 10000 gd.run(gd.max_iteration, callback = None, verbose=True) +cgls = CGLS(x_init= x_init, operator=A, data=b) +cgls.max_iteration = 200 +cgls.update_objective_interval = 1 +def stop_criterion(alg): + try: + x = alg.get_last_objective() + print (x) + a = True if x < numpy.finfo(numpy.float32).eps else False + except IndexError as ie: + a = False + def f (): + return a or alg.max_iteration_stop_cryterion() + return f +#cgls.should_stop = stop_criterion(cgls) +cgls.run(cgls.max_iteration, callback = None, verbose=True) # Print for comparison print("FISTA least squares plus 1-norm solution and objective value:") print(fa.get_output().as_array()) print(fa.get_last_objective()) -print (A.direct(fa.get_output()).as_array()) -print (b.as_array()) -print (A.direct(gd.get_output()).as_array()) +print ("data ", b.as_array()) +print ('FISTA ', A.direct(fa.get_output()).as_array()) +print ('GradientDescent', A.direct(gd.get_output()).as_array()) +print ('CGLS ', A.direct(cgls.get_output()).as_array()) -- cgit v1.2.3 From b234f4cf26ee56da94211dc15c9b277c7c29fff4 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 6 Jun 2019 10:18:31 +0100 Subject: add prints --- Wrappers/Python/wip/fix_test.py | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-) (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/wip/fix_test.py b/Wrappers/Python/wip/fix_test.py index 5e40d70..316606e 100755 --- a/Wrappers/Python/wip/fix_test.py +++ b/Wrappers/Python/wip/fix_test.py @@ -61,8 +61,8 @@ class Norm1(Function): opt = {'memopt': True} # Problem data. -m = 30 -n = 30 +m = 4 +n = 10 np.random.seed(1) Amat = np.asarray( np.random.randn(m, n), dtype=numpy.float32) #Amat = np.asarray(np.eye(m), dtype=np.float32) * 2 @@ -78,20 +78,21 @@ print ("A", A.A) # Change n to equal to m. vgb = VectorGeometry(m) vgx = VectorGeometry(n) -b = vgb.allocate(VectorGeometry.RANDOM, dtype=numpy.float32) -b.fill(bmat) +b = vgb.allocate(2, dtype=numpy.float32) +# b.fill(bmat) #b = DataContainer(bmat) # Regularization parameter lam = 10 opt = {'memopt': True} # Create object instances with the test data A and b. -f = Norm2Sq(A, b, c=0.5, memopt=True) +f = Norm2Sq(A, b, c=1., memopt=True) #f = FunctionOperatorComposition(A, L2NormSquared(b=bmat)) g0 = ZeroFunction() #f.L = 30.003 x_init = vgx.allocate(VectorGeometry.RANDOM, dtype=numpy.float32) +x_initcgls = x_init.copy() a = VectorData(x_init.as_array(), deep_copy=True) @@ -136,33 +137,24 @@ print ("x1", x1.as_array()) # Combine with least squares and solve using generic FISTA implementation #x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt) def callback(it, objective, solution): - print (it, objective, solution.as_array()) + print (objective, f(solution)) fa = FISTA(x_init=x_init, f=f, g=g1) -fa.max_iteration = 10000 +fa.max_iteration = 100 fa.update_objective_interval = int( fa.max_iteration / 10 ) fa.run(fa.max_iteration, callback = None, verbose=True) gd = GradientDescent(x_init=x_init, objective_function=f, rate = rate ) -gd.max_iteration = 100000 -gd.update_objective_interval = 10000 +gd.max_iteration = 100 +gd.update_objective_interval = int( gd.max_iteration / 10 ) gd.run(gd.max_iteration, callback = None, verbose=True) -cgls = CGLS(x_init= x_init, operator=A, data=b) -cgls.max_iteration = 200 -cgls.update_objective_interval = 1 -def stop_criterion(alg): - try: - x = alg.get_last_objective() - print (x) - a = True if x < numpy.finfo(numpy.float32).eps else False - except IndexError as ie: - a = False - def f (): - return a or alg.max_iteration_stop_cryterion() - return f +cgls = CGLS(x_init= x_initcgls, operator=A, data=b) +cgls.max_iteration = 1000 +cgls.update_objective_interval = 2 + #cgls.should_stop = stop_criterion(cgls) -cgls.run(cgls.max_iteration, callback = None, verbose=True) +cgls.run(10, callback = callback, verbose=True) # Print for comparison print("FISTA least squares plus 1-norm solution and objective value:") -- cgit v1.2.3 From 1bdd5f572988caa3888b33a0b422692fa78962ef Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 6 Jun 2019 10:19:57 +0100 Subject: add memopt and some checks --- .../Python/ccpi/optimisation/algorithms/CGLS.py | 49 +++++++++++++++++++++- 1 file changed, 47 insertions(+), 2 deletions(-) (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py index e65bc89..cb4f049 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py @@ -23,6 +23,8 @@ Created on Thu Feb 21 11:11:23 2019 """ from ccpi.optimisation.algorithms import Algorithm +import numpy + class CGLS(Algorithm): '''Conjugate Gradient Least Squares algorithm @@ -54,13 +56,16 @@ class CGLS(Algorithm): self.normr2 = self.d.squared_norm() + + self.s = self.operator.domain_geometry().allocate() #if isinstance(self.normr2, Iterable): # self.normr2 = sum(self.normr2) #self.normr2 = numpy.sqrt(self.normr2) #print ("set_up" , self.normr2) def update(self): - + self.update_new() + def update_old(self): Ad = self.operator.direct(self.d) #norm = (Ad*Ad).sum() #if isinstance(norm, Iterable): @@ -82,5 +87,45 @@ class CGLS(Algorithm): self.normr2 = normr2_new self.d = s + beta*self.d + def update_new(self): + + Ad = self.operator.direct(self.d) + norm = Ad.squared_norm() + if norm == 0.: + print ('cannot update solution') + raise StopIteration() + alpha = self.normr2/norm + if alpha == 0.: + print ('cannot update solution') + raise StopIteration() + self.d *= alpha + Ad *= alpha + self.r -= Ad + if numpy.isnan(self.r.as_array()).any(): + print ("some nan") + raise StopIteration() + self.x += self.d + + self.operator.adjoint(self.r, out=self.s) + s = self.s + + normr2_new = s.squared_norm() + + beta = normr2_new/self.normr2 + self.normr2 = normr2_new + self.d *= (beta/alpha) + self.d += s + def update_objective(self): - self.loss.append(self.r.squared_norm()) + a = self.r.squared_norm() + if a is numpy.nan: + raise StopIteration() + self.loss.append(a) + +# def should_stop(self): +# if self.iteration > 0: +# x = self.get_last_objective() +# a = x > 0 +# return self.max_iteration_stop_cryterion() or (not a) +# else: +# return False -- cgit v1.2.3 From 218dc267849a8fd53f3e92ebe56d2a5926a302b9 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 7 Jun 2019 09:30:34 +0100 Subject: reinstated check of dimension_label None --- Wrappers/Python/ccpi/framework/framework.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index f91343c..e1927c3 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -329,7 +329,7 @@ class DataContainer(object): self.dimension_labels = {} self.geometry = None # Only relevant for AcquisitionData and ImageData - if dimension_labels != {} and \ + if dimension_labels is not None and \ len (dimension_labels) == self.number_of_dimensions: for i in range(self.number_of_dimensions): self.dimension_labels[i] = dimension_labels[i] -- cgit v1.2.3