summaryrefslogtreecommitdiffstats
path: root/Wrappers/Python
diff options
context:
space:
mode:
authorepapoutsellis <epapoutsellis@gmail.com>2019-06-12 11:42:57 +0100
committerepapoutsellis <epapoutsellis@gmail.com>2019-06-12 11:42:57 +0100
commite0c27194cf2470253fe949000b98cdc392a27e03 (patch)
treeed2431a1629077520b71d799bb8f239056b22427 /Wrappers/Python
parent4a1f878a19631147b80dcd146b75a396f2ffc2ac (diff)
parente2891f139050d1c50a522b34be51a1a0d6df0b40 (diff)
downloadframework-e0c27194cf2470253fe949000b98cdc392a27e03.tar.gz
framework-e0c27194cf2470253fe949000b98cdc392a27e03.tar.bz2
framework-e0c27194cf2470253fe949000b98cdc392a27e03.tar.xz
framework-e0c27194cf2470253fe949000b98cdc392a27e03.zip
merge
Diffstat (limited to 'Wrappers/Python')
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/Algorithm.py55
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/CGLS.py6
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py86
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/FISTA.py9
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py3
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py10
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py8
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/__init__.py2
-rwxr-xr-xWrappers/Python/ccpi/optimisation/functions/Norm2Sq.py4
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/__init__.py2
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/BlockOperator.py4
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py4
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py9
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py7
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py2
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/LinearOperator.py26
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py21
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/Operator.py10
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py4
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py2
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py13
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py4
-rw-r--r--Wrappers/Python/test/test_Operator.py37
-rwxr-xr-xWrappers/Python/test/test_algorithms.py25
-rw-r--r--Wrappers/Python/test/test_functions.py6
-rwxr-xr-xWrappers/Python/test/test_run_test.py21
26 files changed, 156 insertions, 224 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
index 0ba2c55..c62d0ea 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
@@ -51,6 +51,7 @@ class Algorithm(object):
self.__max_iteration = kwargs.get('max_iteration', 0)
self.__loss = []
self.memopt = False
+ self.configured = False
self.timing = []
self.update_objective_interval = kwargs.get('update_objective_interval', 1)
def set_up(self, *args, **kwargs):
@@ -86,6 +87,8 @@ class Algorithm(object):
raise StopIteration()
else:
time0 = time.time()
+ if not self.configured:
+ raise ValueError('Algorithm not configured correctly. Please run set_up.')
self.update()
self.timing.append( time.time() - time0 )
if self.iteration % self.update_objective_interval == 0:
@@ -147,13 +150,9 @@ class Algorithm(object):
if self.should_stop():
print ("Stop cryterion has been reached.")
i = 0
-
- if verbose:
- print ("{:>9} {:>10} {:>11} {:>20}".format('Iter',
- 'Max Iter',
- 's/Iter',
- 'Objective Value'))
for _ in self:
+ if i == 0 and verbose:
+ print (self.verbose_header())
if (self.iteration -1) % self.update_objective_interval == 0:
if verbose:
print (self.verbose_output())
@@ -170,14 +169,40 @@ class Algorithm(object):
t = 0
else:
t = sum(timing)/len(timing)
- el = [ self.iteration-1,
- self.max_iteration,
- "{:.3f} s/it".format(t),
- self.get_last_objective() ]
-
- if type(el[-1] ) == list:
- string = functools.reduce(lambda x,y: x+' {:>15.5e}'.format(y), el[-1],'')
- out = "{:>9} {:>10} {:>11} {}".format(*el[:-1] , string)
+ out = "{:>9} {:>10} {:>13} {}".format(
+ self.iteration-1,
+ self.max_iteration,
+ "{:.3f}".format(t),
+ self.objective_to_string()
+ )
+ return out
+
+ def objective_to_string(self):
+ el = self.get_last_objective()
+ if type(el) == list:
+ string = functools.reduce(lambda x,y: x+' {:>13.5e}'.format(y), el[:-1],'')
+ string += '{:>15.5e}'.format(el[-1])
+ else:
+ string = "{:>20.5e}".format(el)
+ return string
+ def verbose_header(self):
+ el = self.get_last_objective()
+ if type(el) == list:
+ out = "{:>9} {:>10} {:>13} {:>13} {:>13} {:>15}\n".format('Iter',
+ 'Max Iter',
+ 'Time/Iter',
+ 'Primal' , 'Dual', 'Primal-Dual')
+ out += "{:>9} {:>10} {:>13} {:>13} {:>13} {:>15}".format('',
+ '',
+ '[s]',
+ 'Objective' , 'Objective', 'Gap')
else:
- out = "{:>9} {:>10} {:>11} {:>20.5e}".format(*el)
+ out = "{:>9} {:>10} {:>13} {:>20}\n".format('Iter',
+ 'Max Iter',
+ 'Time/Iter',
+ 'Objective')
+ out += "{:>9} {:>10} {:>13} {:>20}".format('',
+ '',
+ '[s]',
+ '')
return out
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
index 4d4843c..6b610a0 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
@@ -23,6 +23,7 @@ Created on Thu Feb 21 11:11:23 2019
"""
from ccpi.optimisation.algorithms import Algorithm
+from ccpi.optimisation.functions import Norm2Sq
class CGLS(Algorithm):
@@ -59,6 +60,9 @@ class CGLS(Algorithm):
# self.normr2 = sum(self.normr2)
#self.normr2 = numpy.sqrt(self.normr2)
#print ("set_up" , self.normr2)
+ n = Norm2Sq(operator, self.data)
+ self.loss.append(n(x_init))
+ self.configured = True
def update(self):
@@ -84,4 +88,4 @@ class CGLS(Algorithm):
self.d = s + beta*self.d
def update_objective(self):
- self.loss.append(self.r.squared_norm()) \ No newline at end of file
+ self.loss.append(self.r.squared_norm())
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py
deleted file mode 100644
index aa07359..0000000
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2019 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Created on Thu Feb 21 11:09:03 2019
-
-@author: ofn77899
-"""
-
-from ccpi.optimisation.algorithms import Algorithm
-from ccpi.optimisation.functions import ZeroFunction
-
-class FBPD(Algorithm):
- '''FBPD Algorithm
-
- Parameters:
- x_init: initial guess
- f: constraint
- g: data fidelity
- h: regularizer
- opt: additional algorithm
- '''
- constraint = None
- data_fidelity = None
- regulariser = None
- def __init__(self, **kwargs):
- pass
- def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\
- regulariser=None, opt=None):
-
- # default inputs
- if constraint is None:
- self.constraint = ZeroFun()
- else:
- self.constraint = constraint
- if data_fidelity is None:
- data_fidelity = ZeroFun()
- else:
- self.data_fidelity = data_fidelity
- if regulariser is None:
- self.regulariser = ZeroFun()
- else:
- self.regulariser = regulariser
-
- # algorithmic parameters
-
-
- # step-sizes
- self.tau = 2 / (self.data_fidelity.L + 2)
- self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L
-
- self.inv_sigma = 1/self.sigma
-
- # initialization
- self.x = x_init
- self.y = operator.direct(self.x)
-
-
- def update(self):
-
- # primal forward-backward step
- x_old = self.x
- self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) )
- self.x = self.constraint.prox(self.x, self.tau);
-
- # dual forward-backward step
- self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old);
- self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma);
-
- # time and criterion
- self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x))
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
index 419958a..c8fd0d4 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
@@ -29,12 +29,13 @@ class FISTA(Algorithm):
proper variables are passed or later with set_up'''
super(FISTA, self).__init__()
self.f = kwargs.get('f', None)
- self.g = kwargs.get('g', None)
+ self.g = kwargs.get('g', ZeroFunction())
self.x_init = kwargs.get('x_init',None)
self.invL = None
self.t_old = 1
- if self.f is not None and self.g is not None:
- print ("Calling from creator")
+ if self.x_init is not None and \
+ self.f is not None and self.g is not None:
+ print ("FISTA set_up called from creator")
self.set_up(self.x_init, self.f, self.g)
@@ -56,6 +57,8 @@ class FISTA(Algorithm):
self.invL = 1/f.L
self.t_old = 1
+ self.update_objective()
+ self.configured = True
def update(self):
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
index 14763c5..34bf954 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
@@ -40,7 +40,7 @@ class GradientDescent(Algorithm):
if k in args:
args.pop(args.index(k))
if len(args) == 0:
- return self.set_up(x_init=kwargs['x_init'],
+ self.set_up(x_init=kwargs['x_init'],
objective_function=kwargs['objective_function'],
rate=kwargs['rate'])
@@ -61,6 +61,7 @@ class GradientDescent(Algorithm):
self.memopt = False
if self.memopt:
self.x_update = x_init.copy()
+ self.configured = True
def update(self):
'''Single iteration'''
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
index 39b092b..3afd8b0 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
@@ -23,10 +23,16 @@ class PDHG(Algorithm):
self.operator = kwargs.get('operator', None)
self.g = kwargs.get('g', None)
self.tau = kwargs.get('tau', None)
- self.sigma = kwargs.get('sigma', None)
+ self.sigma = kwargs.get('sigma', 1.)
+
if self.f is not None and self.operator is not None and \
self.g is not None:
+ if self.tau is None:
+ # Compute operator Norm
+ normK = self.operator.norm()
+ # Primal & dual stepsizes
+ self.tau = 1/(self.sigma*normK**2)
print ("Calling from creator")
self.set_up(self.f,
self.g,
@@ -57,6 +63,8 @@ class PDHG(Algorithm):
# relaxation parameter
self.theta = 1
+ self.update_objective()
+ self.configured = True
def update(self):
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
index 30584d4..c73d323 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
@@ -59,6 +59,7 @@ class SIRT(Algorithm):
# Set up scaling matrices D and M.
self.M = 1/self.operator.direct(self.operator.domain_geometry().allocate(value=1.0))
self.D = 1/self.operator.adjoint(self.operator.range_geometry().allocate(value=1.0))
+ self.configured = True
def update(self):
@@ -67,8 +68,9 @@ class SIRT(Algorithm):
self.x += self.relax_par * (self.D*self.operator.adjoint(self.M*self.r))
- if self.constraint != None:
- self.x = self.constraint.prox(self.x,None)
+ if self.constraint is not None:
+ self.x = self.constraint.proximal(self.x,None)
+ # self.constraint.proximal(self.x,None, out=self.x)
def update_objective(self):
- self.loss.append(self.r.squared_norm()) \ No newline at end of file
+ self.loss.append(self.r.squared_norm())
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
index 2dbacfc..8f255f3 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
@@ -27,7 +27,5 @@ from .CGLS import CGLS
from .SIRT import SIRT
from .GradientDescent import GradientDescent
from .FISTA import FISTA
-from .FBPD import FBPD
from .PDHG import PDHG
-from .PDHG import PDHG_old
diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
index b553d7c..0b6bb0b 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
@@ -21,7 +21,7 @@ import numpy
import warnings
# Define a class for squared 2-norm
-class Norm2sq(Function):
+class Norm2Sq(Function):
'''
f(x) = c*||A*x-b||_2^2
@@ -38,7 +38,7 @@ class Norm2sq(Function):
'''
def __init__(self,A,b,c=1.0,memopt=False):
- super(Norm2sq, self).__init__()
+ super(Norm2Sq, self).__init__()
self.A = A # Should be an operator, default identity
self.b = b # Default zero DataSet?
diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
index a82ee3e..c0eab31 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
@@ -10,4 +10,4 @@ from .FunctionOperatorComposition import FunctionOperatorComposition
from .MixedL21Norm import MixedL21Norm
from .IndicatorBox import IndicatorBox
from .KullbackLeibler import KullbackLeibler
-from .Norm2Sq import Norm2sq
+from .Norm2Sq import Norm2Sq
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
index c8bd546..5f04363 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -104,8 +104,8 @@ class BlockOperator(Operator):
index = row*self.shape[1]+col
return self.operators[index]
- def norm(self):
- norm = [op.norm()**2 for op in self.operators]
+ def calculate_norm(self, **kwargs):
+ norm = [op.norm(**kwargs)**2 for op in self.operators]
return numpy.sqrt(sum(norm))
def direct(self, x, out=None):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
index aeb6c53..74ba9e4 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
@@ -49,8 +49,8 @@ class BlockScaledOperator(ScaledOperator):
return self.scalar * self.operator.adjoint(x, out=out)
else:
raise TypeError('Operator is not linear')
- def norm(self):
- return numpy.abs(self.scalar) * self.operator.norm()
+ def norm(self, **kwargs):
+ return numpy.abs(self.scalar) * self.operator.norm(**kwargs)
def range_geometry(self):
return self.operator.range_geometry()
def domain_geometry(self):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
index f459334..876f45f 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
@@ -314,13 +314,8 @@ class FiniteDiff(LinearOperator):
def domain_geometry(self):
'''Returns the domain geometry'''
return self.gm_domain
-
- def norm(self):
- x0 = self.gm_domain.allocate('random_int')
- self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0)
- return self.s1
-
-
+
+
if __name__ == '__main__':
from ccpi.framework import ImageGeometry
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index d98961b..cd58b7d 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -109,13 +109,6 @@ class Gradient(LinearOperator):
def range_geometry(self):
return self.gm_range
-
- def norm(self, **kwargs):
-
- x0 = self.gm_domain.allocate('random')
- iterations = kwargs.get('iterations', 10)
- self.s1, sall, svec = LinearOperator.PowerMethod(self, iterations, x0)
- return self.s1
def __rmul__(self, scalar):
return ScaledOperator(self, scalar)
diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
index a853b8d..8f35373 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
@@ -35,7 +35,7 @@ class Identity(LinearOperator):
else:
out.fill(x)
- def norm(self):
+ def calculate_norm(self, **kwargs):
return 1.0
def domain_geometry(self):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
index f304cc6..55eb692 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
@@ -29,7 +29,7 @@ class LinearOperator(Operator):
# Initialise random
if x_init is None:
- x0 = operator.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
+ x0 = operator.domain_geometry().allocate(type(operator.domain_geometry()).RANDOM_INT)
else:
x0 = x_init.copy()
@@ -45,23 +45,11 @@ class LinearOperator(Operator):
x1.multiply((1.0/x1norm), out=x0)
return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
- @staticmethod
- def PowerMethodNonsquare(op,numiters , x_init=None):
- '''Power method to calculate iteratively the Lipschitz constant'''
-
- if x_init is None:
- x0 = op.allocate_direct()
- x0.fill(numpy.random.randn(*x0.shape))
- else:
- x0 = x_init.copy()
-
- s = numpy.zeros(numiters)
- # Loop
- for it in numpy.arange(numiters):
- x1 = op.adjoint(op.direct(x0))
- x1norm = x1.norm()
- s[it] = (x1*x0).sum() / (x0.squared_norm())
- x0 = (1.0/x1norm)*x1
- return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+ def calculate_norm(self, **kwargs):
+ '''Returns the norm of the LinearOperator as calculated by the PowerMethod'''
+ x0 = kwargs.get('x0', None)
+ iterations = kwargs.get('iterations', 25)
+ s1, sall, svec = LinearOperator.PowerMethod(self, iterations, x_init=x0)
+ return s1
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
index 62e22e0..8cc4c71 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
@@ -30,22 +30,7 @@ class LinearOperatorMatrix(LinearOperator):
def size(self):
return self.A.shape
- def get_max_sing_val(self):
+ def calculate_norm(self, **kwargs):
# If unknown, compute and store. If known, simply return it.
- if self.s1 is None:
- self.s1 = svds(self.A,1,return_singular_vectors=False)[0]
- return self.s1
- else:
- return self.s1
- def allocate_direct(self):
- '''allocates the memory to hold the result of adjoint'''
- #numpy.dot(self.A.transpose(),x.as_array())
- M_A, N_A = self.A.shape
- out = numpy.zeros((N_A,1))
- return DataContainer(out)
- def allocate_adjoint(self):
- '''allocate the memory to hold the result of direct'''
- #numpy.dot(self.A.transpose(),x.as_array())
- M_A, N_A = self.A.shape
- out = numpy.zeros((M_A,1))
- return DataContainer(out)
+ return svds(self.A,1,return_singular_vectors=False)[0]
+
diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
index 2d2089b..c1adf62 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
@@ -8,14 +8,22 @@ from ccpi.optimisation.operators.ScaledOperator import ScaledOperator
class Operator(object):
'''Operator that maps from a space X -> Y'''
+ def __init__(self, **kwargs):
+ self.__norm = None
+
def is_linear(self):
'''Returns if the operator is linear'''
return False
def direct(self,x, out=None):
'''Returns the application of the Operator on x'''
raise NotImplementedError
- def norm(self):
+ def norm(self, **kwargs):
'''Returns the norm of the Operator'''
+ if self.__norm is None:
+ self.__norm = self.calculate_norm(**kwargs)
+ return self.__norm
+ def calculate_norm(self, **kwargs):
+ '''Calculates the norm of the Operator'''
raise NotImplementedError
def range_geometry(self):
'''Returns the range of the Operator: Y space'''
diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
index ba0049e..f7be5de 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
@@ -40,8 +40,8 @@ class ScaledOperator(object):
out *= self.scalar
else:
raise TypeError('Operator is not linear')
- def norm(self):
- return numpy.abs(self.scalar) * self.operator.norm()
+ def norm(self, **kwargs):
+ return numpy.abs(self.scalar) * self.operator.norm(**kwargs)
def range_geometry(self):
return self.operator.range_geometry()
def domain_geometry(self):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
index c5c2ec9..cb76dce 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
@@ -10,7 +10,7 @@ import scipy.sparse as sp
import numpy as np
from ccpi.framework import ImageData
-class SparseFiniteDiff():
+class SparseFiniteDiff(object):
def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
index 243809b..205f7e1 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
@@ -111,17 +111,6 @@ class SymmetrizedGradient(Gradient):
def range_geometry(self):
return self.gm_range
-
- def norm(self):
-
-# TODO need dot method for BlockDataContainer
-# return numpy.sqrt(4*self.gm_domain.shape[0])
-
-# x0 = self.gm_domain.allocate('random')
- self.s1, sall, svec = LinearOperator.PowerMethod(self, 50)
- return self.s1
-
-
if __name__ == '__main__':
@@ -241,4 +230,4 @@ if __name__ == '__main__':
#
#
#
-# \ No newline at end of file
+#
diff --git a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
index 8168f0b..67808de 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
@@ -34,8 +34,8 @@ class ZeroOperator(LinearOperator):
else:
out.fill(self.gm_domain.allocate())
- def norm(self):
- return 0
+ def calculate_norm(self, **kwargs):
+ return 0.
def domain_geometry(self):
return self.gm_domain
diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py
index 5c97545..d890e46 100644
--- a/Wrappers/Python/test/test_Operator.py
+++ b/Wrappers/Python/test/test_Operator.py
@@ -51,6 +51,7 @@ class CCPiTestClass(unittest.TestCase):
class TestOperator(CCPiTestClass):
def test_ScaledOperator(self):
+ print ("test_ScaledOperator")
ig = ImageGeometry(10,20,30)
img = ig.allocate()
scalar = 0.5
@@ -58,7 +59,8 @@ class TestOperator(CCPiTestClass):
numpy.testing.assert_array_equal(scalar * img.as_array(), sid.direct(img).as_array())
- def test_TomoIdentity(self):
+ def test_Identity(self):
+ print ("test_Identity")
ig = ImageGeometry(10,20,30)
img = ig.allocate()
self.assertTrue(img.shape == (30,20,10))
@@ -107,9 +109,10 @@ class TestOperator(CCPiTestClass):
self.assertBlockDataContainerEqual(res, w)
def test_PowerMethod(self):
+ print ("test_BlockOperator")
N, M = 200, 300
- niter = 1000
+ niter = 10
ig = ImageGeometry(N, M)
Id = Identity(ig)
@@ -118,23 +121,37 @@ class TestOperator(CCPiTestClass):
uid = Id.domain_geometry().allocate(ImageGeometry.RANDOM_INT, seed=1)
a = LinearOperator.PowerMethod(Id, niter, uid)
- b = LinearOperator.PowerMethodNonsquare(Id, niter, uid)
-
+ #b = LinearOperator.PowerMethodNonsquare(Id, niter, uid)
+ b = LinearOperator.PowerMethod(Id, niter)
print ("Edo impl", a[0])
- print ("old impl", b[0])
+ print ("None impl", b[0])
#self.assertAlmostEqual(a[0], b[0])
self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=6)
a = LinearOperator.PowerMethod(G, niter, uid)
- b = LinearOperator.PowerMethodNonsquare(G, niter, uid)
+ b = LinearOperator.PowerMethod(G, niter)
+ #b = LinearOperator.PowerMethodNonsquare(G, niter, uid)
print ("Edo impl", a[0])
- print ("old impl", b[0])
+ #print ("old impl", b[0])
self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=2)
#self.assertAlmostEqual(a[0], b[0])
+ def test_Norm(self):
+ print ("test_BlockOperator")
+ ##
+ N, M = 200, 300
+ ig = ImageGeometry(N, M)
+ G = Gradient(ig)
+ t0 = timer()
+ norm = G.norm()
+ t1 = timer()
+ norm2 = G.norm()
+ t2 = timer()
+ print ("Norm dT1 {} dT2 {}".format(t1-t0,t2-t1))
+ self.assertLess(t2-t1, t1-t0)
@@ -175,7 +192,7 @@ class TestBlockOperator(unittest.TestCase):
self.assertTrue(res)
def test_BlockOperator(self):
-
+ print ("test_BlockOperator")
M, N = 3, 4
ig = ImageGeometry(M, N)
@@ -358,7 +375,7 @@ class TestBlockOperator(unittest.TestCase):
print("Z1", Z1[0][1].as_array())
print("RES1", RES1[0][1].as_array())
def test_timedifference(self):
-
+ print ("test_timedifference")
M, N ,W = 100, 512, 512
ig = ImageGeometry(M, N, W)
arr = ig.allocate('random_int')
@@ -427,7 +444,7 @@ class TestBlockOperator(unittest.TestCase):
self.assertGreater(t1,t2)
def test_BlockOperatorLinearValidity(self):
-
+ print ("test_BlockOperatorLinearValidity")
M, N = 3, 4
ig = ImageGeometry(M, N)
diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py
index 669804e..8c398f4 100755
--- a/Wrappers/Python/test/test_algorithms.py
+++ b/Wrappers/Python/test/test_algorithms.py
@@ -13,11 +13,11 @@ from ccpi.framework import AcquisitionData
from ccpi.framework import ImageGeometry
from ccpi.framework import AcquisitionGeometry
from ccpi.optimisation.operators import Identity
-from ccpi.optimisation.funcs import Norm2sq
+from ccpi.optimisation.functions import Norm2Sq, ZeroFunction, \
+ L2NormSquared, FunctionOperatorComposition
from ccpi.optimisation.algorithms import GradientDescent
from ccpi.optimisation.algorithms import CGLS
from ccpi.optimisation.algorithms import FISTA
-from ccpi.optimisation.algorithms import FBPD
@@ -50,11 +50,13 @@ class TestAlgorithms(unittest.TestCase):
identity = Identity(ig)
- norm2sq = Norm2sq(identity, b)
+ norm2sq = Norm2Sq(identity, b)
+ rate = 0.3
+ rate = norm2sq.L / 3.
alg = GradientDescent(x_init=x_init,
objective_function=norm2sq,
- rate=0.3)
+ rate=rate)
alg.max_iteration = 20
alg.run(20, verbose=True)
self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
@@ -80,20 +82,19 @@ class TestAlgorithms(unittest.TestCase):
b = x_init.copy()
# fill with random numbers
b.fill(numpy.random.random(x_init.shape))
- x_init = ImageData(geometry=ig)
- x_init.fill(numpy.random.random(x_init.shape))
-
+ x_init = ig.allocate(ImageGeometry.RANDOM)
identity = Identity(ig)
- norm2sq = Norm2sq(identity, b)
- norm2sq.L = 2 * norm2sq.c * identity.norm()**2
+ #### it seems FISTA does not work with Nowm2Sq
+ # norm2sq = Norm2Sq(identity, b)
+ # norm2sq.L = 2 * norm2sq.c * identity.norm()**2
+ norm2sq = FunctionOperatorComposition(L2NormSquared(b=b), identity)
opt = {'tol': 1e-4, 'memopt':False}
- alg = FISTA(x_init=x_init, f=norm2sq, g=None, opt=opt)
+ print ("initial objective", norm2sq(x_init))
+ alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction())
alg.max_iteration = 2
alg.run(20, verbose=True)
self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
- alg.run(20, verbose=True)
- self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py
index 082548b..021ad99 100644
--- a/Wrappers/Python/test/test_functions.py
+++ b/Wrappers/Python/test/test_functions.py
@@ -20,7 +20,7 @@ from ccpi.optimisation.operators import Gradient
from ccpi.optimisation.functions import L2NormSquared
from ccpi.optimisation.functions import L1Norm, MixedL21Norm
-from ccpi.optimisation.functions import Norm2sq
+from ccpi.optimisation.functions import Norm2Sq
from ccpi.optimisation.functions import ZeroFunction
from ccpi.optimisation.functions import FunctionOperatorComposition
@@ -298,7 +298,7 @@ class TestFunction(unittest.TestCase):
b = ig.allocate(ImageGeometry.RANDOM_INT)
A = 0.5 * Identity(ig)
- old_chisq = Norm2sq(A, b, 1.0)
+ old_chisq = Norm2Sq(A, b, 1.0)
new_chisq = FunctionOperatorComposition(L2NormSquared(b=b),A)
yold = old_chisq(u)
@@ -364,4 +364,4 @@ class TestFunction(unittest.TestCase):
proxc = f.proximal_conjugate(x,1.2)
f.proximal_conjugate(x, 1.2, out=out)
- numpy.testing.assert_array_equal(proxc.as_array(), out.as_array()) \ No newline at end of file
+ numpy.testing.assert_array_equal(proxc.as_array(), out.as_array())
diff --git a/Wrappers/Python/test/test_run_test.py b/Wrappers/Python/test/test_run_test.py
index 9b4d53b..22d4a78 100755
--- a/Wrappers/Python/test/test_run_test.py
+++ b/Wrappers/Python/test/test_run_test.py
@@ -8,10 +8,11 @@ from ccpi.framework import ImageGeometry
from ccpi.framework import AcquisitionGeometry
from ccpi.optimisation.algorithms import FISTA
#from ccpi.optimisation.algs import FBPD
-from ccpi.optimisation.functions import Norm2sq
+from ccpi.optimisation.functions import Norm2Sq
from ccpi.optimisation.functions import ZeroFunction
-from ccpi.optimisation.funcs import Norm1
-from ccpi.optimisation.funcs import Norm2
+from ccpi.optimisation.functions import L1Norm
+# This was removed
+#from ccpi.optimisation.funcs import Norm2
from ccpi.optimisation.operators import LinearOperatorMatrix
from ccpi.optimisation.operators import Identity
@@ -80,7 +81,7 @@ class TestAlgorithms(unittest.TestCase):
lam = 10
opt = {'memopt': True}
# Create object instances with the test data A and b.
- f = Norm2sq(A, b, c=0.5, memopt=True)
+ f = Norm2Sq(A, b, c=0.5, memopt=True)
g0 = ZeroFunction()
# Initial guess
@@ -144,14 +145,14 @@ class TestAlgorithms(unittest.TestCase):
lam = 10
opt = {'memopt': True}
# Create object instances with the test data A and b.
- f = Norm2sq(A, b, c=0.5, memopt=True)
+ f = Norm2Sq(A, b, c=0.5, memopt=True)
g0 = ZeroFunction()
# Initial guess
x_init = DataContainer(np.zeros((n, 1)))
# Create 1-norm object instance
- g1 = Norm1(lam)
+ g1 = lam * L1Norm()
g1(x_init)
g1.prox(x_init, 0.02)
@@ -218,14 +219,14 @@ class TestAlgorithms(unittest.TestCase):
x_init = DataContainer(np.random.randn(n, 1))
# Create object instances with the test data A and b.
- f = Norm2sq(A, b, c=0.5, memopt=True)
+ f = Norm2Sq(A, b, c=0.5, memopt=True)
f.L = LinearOperator.PowerMethod(A, 25, x_init)[0]
print ("Lipschitz", f.L)
g0 = ZeroFun()
# Create 1-norm object instance
- g1 = Norm1(lam)
+ g1 = lam * L1Norm()
# Compare to CVXPY
@@ -286,13 +287,13 @@ class TestAlgorithms(unittest.TestCase):
y.array = y.array + 0.1*np.random.randn(N, N)
# Data fidelity term
- f_denoise = Norm2sq(I, y, c=0.5, memopt=True)
+ f_denoise = Norm2Sq(I, y, c=0.5, memopt=True)
x_init = ImageData(geometry=ig)
f_denoise.L = LinearOperator.PowerMethod(I, 25, x_init)[0]
# 1-norm regulariser
lam1_denoise = 1.0
- g1_denoise = Norm1(lam1_denoise)
+ g1_denoise = lam1_denoise * L1Norm()
# Initial guess
x_init_denoise = ImageData(np.zeros((N, N)))