From 97a45ae06f9bc95f9df640158e16f522d356392d Mon Sep 17 00:00:00 2001
From: "Jakob Jorgensen, WS at HMXIF" <jakob.jorgensen@manchester.ac.uk>
Date: Tue, 21 May 2019 15:44:59 +0100
Subject: Fix upper/lower case inconsistency to allow astra demo to run

---
 Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py  | 4 ++--
 Wrappers/Python/ccpi/optimisation/functions/__init__.py | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
index b553d7c..0b6bb0b 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
@@ -21,7 +21,7 @@ import numpy
 import warnings
 
 # Define a class for squared 2-norm
-class Norm2sq(Function):
+class Norm2Sq(Function):
     '''
     f(x) = c*||A*x-b||_2^2
     
@@ -38,7 +38,7 @@ class Norm2sq(Function):
     '''
     
     def __init__(self,A,b,c=1.0,memopt=False):
-        super(Norm2sq, self).__init__()
+        super(Norm2Sq, self).__init__()
     
         self.A = A  # Should be an operator, default identity
         self.b = b  # Default zero DataSet?
diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
index a82ee3e..c0eab31 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
@@ -10,4 +10,4 @@ from .FunctionOperatorComposition import FunctionOperatorComposition
 from .MixedL21Norm import MixedL21Norm
 from .IndicatorBox import IndicatorBox
 from .KullbackLeibler import KullbackLeibler
-from .Norm2Sq import Norm2sq
+from .Norm2Sq import Norm2Sq
-- 
cgit v1.2.3


From 6794aaaa388f622cc8cd18c56c7345dca016163b Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Thu, 23 May 2019 16:04:50 +0100
Subject: update pretty print

---
 .../ccpi/optimisation/algorithms/Algorithm.py      | 44 +++++++++++++++++-----
 1 file changed, 34 insertions(+), 10 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
index e119a1c..0778838 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
@@ -149,10 +149,7 @@ class Algorithm(object):
         i = 0
         
         if verbose:
-            print ("{:>9} {:>10} {:>11} {:>20}".format('Iter', 
-                                                      'Max Iter',
-                                                      's/Iter',
-                                                      'Objective Value'))
+            print (self.verbose_header())
         for _ in self:
             if (self.iteration -1) % self.update_objective_interval == 0:                
                 if verbose:
@@ -173,12 +170,39 @@ class Algorithm(object):
             t = sum(timing)/len(timing)
         el = [ self.iteration-1, 
                self.max_iteration,
-               "{:.3f} s/it".format(t), 
+               "{:.3f}".format(t), 
                self.get_last_objective() ]
         
-        if type(el[-1] ) == list:
-            string = functools.reduce(lambda x,y: x+' {:>15.5e}'.format(y), el[-1],'')
-            out = "{:>9} {:>10} {:>11} {}".format(*el[:-1] , string)
-        else:
-            out = "{:>9} {:>10} {:>11} {:>20.5e}".format(*el)
+        string = self.objective_to_string()
+        out = "{:>9} {:>10} {:>13} {}".format(*el[:-1] , string)
         return out
+
+    def objective_to_string(self):
+        el = self.get_last_objective()[0]
+        if type(el) == list:
+            string = functools.reduce(lambda x,y: x+' {:>13.5e}'.format(y), el[:-1],'')
+            string += '{:>15.5e}'.format(el[-1])
+        else:
+            string = "{:>20.5e}".format(el)
+        return string
+    def verbose_header(self):
+        el = 1
+        if type(el) == list:
+            out = "{:>9} {:>10} {:>13} {:>13} {:>13} {:>15}\n".format('Iter', 
+                                                      'Max Iter',
+                                                      'Time/Iter',
+                                                      'Primal' , 'Dual', 'Primal-Dual')
+            out += "{:>9} {:>10} {:>13} {:>13} {:>13} {:>15}".format('', 
+                                                      '',
+                                                      '[s]',
+                                                      'Objective' , 'Objective', 'Gap')
+        else:
+            out = "{:>9} {:>10} {:>13} {:>20}".format('Iter', 
+                                                      'Max Iter',
+                                                      'Time/Iter',
+                                                      'Objective')
+            out += "{:>9} {:>10} {:>13} {:>20}".format('', 
+                                                      '',
+                                                      '[s]',
+                                                      '')
+        return out
\ No newline at end of file
-- 
cgit v1.2.3


From 31410bc217c7de4659e5df35f3423fdbbd04f59a Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Thu, 23 May 2019 16:20:08 +0100
Subject: updated print

---
 Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
index 0778838..78fe196 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
@@ -147,10 +147,9 @@ class Algorithm(object):
         if self.should_stop():
             print ("Stop cryterion has been reached.")
         i = 0
-        
-        if verbose:
-            print (self.verbose_header())
         for _ in self:
+            if i == 0 and verbose:
+                print (self.verbose_header())
             if (self.iteration -1) % self.update_objective_interval == 0:                
                 if verbose:
                     #print ("Iteration {:>7} max: {:>7}, = {}".format(self.iteration-1, 
@@ -178,7 +177,7 @@ class Algorithm(object):
         return out
 
     def objective_to_string(self):
-        el = self.get_last_objective()[0]
+        el = self.get_last_objective()
         if type(el) == list:
             string = functools.reduce(lambda x,y: x+' {:>13.5e}'.format(y), el[:-1],'')
             string += '{:>15.5e}'.format(el[-1])
@@ -186,7 +185,7 @@ class Algorithm(object):
             string = "{:>20.5e}".format(el)
         return string
     def verbose_header(self):
-        el = 1
+        el = self.get_last_objective()
         if type(el) == list:
             out = "{:>9} {:>10} {:>13} {:>13} {:>13} {:>15}\n".format('Iter', 
                                                       'Max Iter',
@@ -197,7 +196,7 @@ class Algorithm(object):
                                                       '[s]',
                                                       'Objective' , 'Objective', 'Gap')
         else:
-            out = "{:>9} {:>10} {:>13} {:>20}".format('Iter', 
+            out = "{:>9} {:>10} {:>13} {:>20}\n".format('Iter', 
                                                       'Max Iter',
                                                       'Time/Iter',
                                                       'Objective')
-- 
cgit v1.2.3


From 8787adfef38ded4852b53a6d7812bb7d4f888d86 Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Mon, 3 Jun 2019 11:18:23 -0400
Subject: fix import and calls in tests

---
 Wrappers/Python/test/test_functions.py |  6 +++---
 Wrappers/Python/test/test_run_test.py  | 10 +++++-----
 2 files changed, 8 insertions(+), 8 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py
index af419c7..523008d 100644
--- a/Wrappers/Python/test/test_functions.py
+++ b/Wrappers/Python/test/test_functions.py
@@ -20,7 +20,7 @@ from ccpi.optimisation.operators import Gradient
 from ccpi.optimisation.functions import L2NormSquared
 from ccpi.optimisation.functions import L1Norm, MixedL21Norm
 
-from ccpi.optimisation.functions import Norm2sq
+from ccpi.optimisation.functions import Norm2Sq
 from ccpi.optimisation.functions import ZeroFunction
 
 from ccpi.optimisation.functions import FunctionOperatorComposition
@@ -298,7 +298,7 @@ class TestFunction(unittest.TestCase):
         b = ig.allocate(ImageGeometry.RANDOM_INT) 
         
         A = 0.5 * Identity(ig)
-        old_chisq = Norm2sq(A, b, 1.0)
+        old_chisq = Norm2Sq(A, b, 1.0)
         new_chisq = FunctionOperatorComposition(A, L2NormSquared(b=b))
 
         yold = old_chisq(u)
@@ -364,4 +364,4 @@ class TestFunction(unittest.TestCase):
         
         proxc = f.proximal_conjugate(x,1.2)
         f.proximal_conjugate(x, 1.2, out=out)
-        numpy.testing.assert_array_equal(proxc.as_array(), out.as_array())
\ No newline at end of file
+        numpy.testing.assert_array_equal(proxc.as_array(), out.as_array())
diff --git a/Wrappers/Python/test/test_run_test.py b/Wrappers/Python/test/test_run_test.py
index 9b4d53b..a6c13f4 100755
--- a/Wrappers/Python/test/test_run_test.py
+++ b/Wrappers/Python/test/test_run_test.py
@@ -8,7 +8,7 @@ from ccpi.framework import ImageGeometry
 from ccpi.framework import AcquisitionGeometry
 from ccpi.optimisation.algorithms import FISTA
 #from ccpi.optimisation.algs import FBPD
-from ccpi.optimisation.functions import Norm2sq
+from ccpi.optimisation.functions import Norm2Sq
 from ccpi.optimisation.functions import ZeroFunction
 from ccpi.optimisation.funcs import Norm1
 from ccpi.optimisation.funcs import Norm2
@@ -80,7 +80,7 @@ class TestAlgorithms(unittest.TestCase):
                 lam = 10
                 opt = {'memopt': True}
                 # Create object instances with the test data A and b.
-                f = Norm2sq(A, b, c=0.5, memopt=True)
+                f = Norm2Sq(A, b, c=0.5, memopt=True)
                 g0 = ZeroFunction()
 
                 # Initial guess
@@ -144,7 +144,7 @@ class TestAlgorithms(unittest.TestCase):
                 lam = 10
                 opt = {'memopt': True}
                 # Create object instances with the test data A and b.
-                f = Norm2sq(A, b, c=0.5, memopt=True)
+                f = Norm2Sq(A, b, c=0.5, memopt=True)
                 g0 = ZeroFunction()
 
                 # Initial guess
@@ -218,7 +218,7 @@ class TestAlgorithms(unittest.TestCase):
             x_init = DataContainer(np.random.randn(n, 1))
 
             # Create object instances with the test data A and b.
-            f = Norm2sq(A, b, c=0.5, memopt=True)
+            f = Norm2Sq(A, b, c=0.5, memopt=True)
             f.L = LinearOperator.PowerMethod(A, 25, x_init)[0]
             print ("Lipschitz", f.L)
             g0 = ZeroFun()
@@ -286,7 +286,7 @@ class TestAlgorithms(unittest.TestCase):
             y.array = y.array + 0.1*np.random.randn(N, N)
 
             # Data fidelity term
-            f_denoise = Norm2sq(I, y, c=0.5, memopt=True)
+            f_denoise = Norm2Sq(I, y, c=0.5, memopt=True)
             x_init = ImageData(geometry=ig)
             f_denoise.L = LinearOperator.PowerMethod(I, 25, x_init)[0]
 
-- 
cgit v1.2.3


From d688da2c586e3165bc366772668ecb008d03eca3 Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Tue, 4 Jun 2019 04:57:46 -0400
Subject: fix print for python27

---
 Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
index 78fe196..9f54c1e 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
@@ -167,13 +167,12 @@ class Algorithm(object):
             t = 0
         else:
             t = sum(timing)/len(timing)
-        el = [ self.iteration-1, 
-               self.max_iteration,
-               "{:.3f}".format(t), 
-               self.get_last_objective() ]
-        
-        string = self.objective_to_string()
-        out = "{:>9} {:>10} {:>13} {}".format(*el[:-1] , string)
+        out = "{:>9} {:>10} {:>13} {}".format(
+                 self.iteration-1, 
+                 self.max_iteration,
+                 "{:.3f}".format(t), 
+                 self.objective_to_string()
+               )
         return out
 
     def objective_to_string(self):
@@ -204,4 +203,4 @@ class Algorithm(object):
                                                       '',
                                                       '[s]',
                                                       '')
-        return out
\ No newline at end of file
+        return out
-- 
cgit v1.2.3


From 1580b6fbaebc77649c2eebf89ea7a9ebda8ba91c Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Thu, 6 Jun 2019 12:16:53 +0000
Subject: Norm refactor (#299)

* Fix upper/lower case inconsistency to allow astra demo to run

* fix import and calls in tests

* add storage in Operator Class of the operator norm

scaled operator is not an Operator

* add test for norm

* reduced iteration in PowerMethod
---
 .../ccpi/optimisation/operators/BlockOperator.py   |  2 +-
 .../optimisation/operators/BlockScaledOperator.py  |  2 +-
 .../operators/FiniteDifferenceOperator.py          |  2 +-
 .../optimisation/operators/GradientOperator.py     |  2 +-
 .../optimisation/operators/IdentityOperator.py     |  2 +-
 .../Python/ccpi/optimisation/operators/Operator.py |  8 +++++++
 .../operators/SymmetrizedGradientOperator.py       |  2 +-
 .../ccpi/optimisation/operators/ZeroOperator.py    |  2 +-
 Wrappers/Python/test/test_Operator.py              | 26 +++++++++++++++++-----
 9 files changed, 36 insertions(+), 12 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
index 1d77510..10fe34c 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -104,7 +104,7 @@ class BlockOperator(Operator):
         index = row*self.shape[1]+col
         return self.operators[index]
     
-    def norm(self):
+    def calculate_norm(self):
         norm = [op.norm()**2 for op in self.operators]
         return numpy.sqrt(sum(norm))    
     
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
index aeb6c53..dc8cbf7 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
@@ -49,7 +49,7 @@ class BlockScaledOperator(ScaledOperator):
             return self.scalar * self.operator.adjoint(x, out=out)
         else:
             raise TypeError('Operator is not linear')
-    def norm(self):
+    def calculate_norm(self):
         return numpy.abs(self.scalar) * self.operator.norm()
     def range_geometry(self):
         return self.operator.range_geometry()
diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
index 2c42532..89f3549 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
@@ -315,7 +315,7 @@ class FiniteDiff(LinearOperator):
         '''Returns the domain geometry'''
         return self.gm_domain
        
-    def norm(self):
+    def calculate_norm(self):
         x0 = self.gm_domain.allocate('random_int')
         self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0)
         return self.s1
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index e0b8a32..475022e 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -86,7 +86,7 @@ class Gradient(LinearOperator):
     def range_geometry(self):
         return self.gm_range
                                    
-    def norm(self):
+    def calculate_norm(self):
 
         x0 = self.gm_domain.allocate('random')
         self.s1, sall, svec = LinearOperator.PowerMethod(self, 10, x0)
diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
index a58a296..61428ae 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
@@ -35,7 +35,7 @@ class Identity(LinearOperator):
         else:
             out.fill(x)
         
-    def norm(self):
+    def calculate_norm(self):
         return 1.0
         
     def domain_geometry(self):       
diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
index 2d2089b..0cac586 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
@@ -8,6 +8,9 @@ from ccpi.optimisation.operators.ScaledOperator import ScaledOperator
 
 class Operator(object):
     '''Operator that maps from a space X -> Y'''
+    def __init__(self, **kwargs):
+        self.__norm = None
+
     def is_linear(self):
         '''Returns if the operator is linear'''
         return False
@@ -16,6 +19,11 @@ class Operator(object):
         raise NotImplementedError
     def norm(self):
         '''Returns the norm of the Operator'''
+        if self.__norm is None:
+            self.__norm = self.calculate_norm()
+        return self.__norm
+    def calculate_norm(self):
+        '''Calculates the norm of the Operator'''
         raise NotImplementedError
     def range_geometry(self):
         '''Returns the range of the Operator: Y space'''
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
index 3fc43b8..da73de6 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
@@ -102,7 +102,7 @@ class SymmetrizedGradient(Gradient):
     def range_dim(self):
         return self.gm_range
                                    
-    def norm(self):
+    def calculate_norm(self):
 #        return np.sqrt(4*len(self.domainDim()))        
         #TODO this takes time for big ImageData
         # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12)        
diff --git a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
index a7c5f09..2331856 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
@@ -29,7 +29,7 @@ class ZeroOp(Operator):
         else:
             return ImageData(np.zeros(self.gm_domain))
         
-    def norm(self):
+    def calculate_norm(self):
         return 0
     
     def domain_dim(self):       
diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py
index 5c97545..2fc45a5 100644
--- a/Wrappers/Python/test/test_Operator.py
+++ b/Wrappers/Python/test/test_Operator.py
@@ -51,6 +51,7 @@ class CCPiTestClass(unittest.TestCase):
 
 class TestOperator(CCPiTestClass):
     def test_ScaledOperator(self):
+        print ("test_ScaledOperator")
         ig = ImageGeometry(10,20,30)
         img = ig.allocate()
         scalar = 0.5
@@ -58,7 +59,8 @@ class TestOperator(CCPiTestClass):
         numpy.testing.assert_array_equal(scalar * img.as_array(), sid.direct(img).as_array())
         
 
-    def test_TomoIdentity(self):
+    def test_Identity(self):
+        print ("test_Identity")
         ig = ImageGeometry(10,20,30)
         img = ig.allocate()
         self.assertTrue(img.shape == (30,20,10))
@@ -107,9 +109,10 @@ class TestOperator(CCPiTestClass):
         self.assertBlockDataContainerEqual(res, w)
         
     def test_PowerMethod(self):
+        print ("test_BlockOperator")
         
         N, M = 200, 300
-        niter = 1000
+        niter = 10
         ig = ImageGeometry(N, M)
         Id = Identity(ig)
         
@@ -134,7 +137,20 @@ class TestOperator(CCPiTestClass):
         self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=2)
         #self.assertAlmostEqual(a[0], b[0])
         
+    def test_Norm(self):
+        print ("test_BlockOperator")
+        ##
+        N, M = 200, 300
 
+        ig = ImageGeometry(N, M)
+        G = Gradient(ig)
+        t0 = timer()
+        norm = G.norm()
+        t1 = timer()
+        norm2 = G.norm()
+        t2 = timer()
+        print ("Norm dT1 {} dT2 {}".format(t1-t0,t2-t1))
+        self.assertLess(t2-t1, t1-t0)
 
 
 
@@ -175,7 +191,7 @@ class TestBlockOperator(unittest.TestCase):
         self.assertTrue(res)
 
     def test_BlockOperator(self):
-        
+        print ("test_BlockOperator")
         
         M, N  = 3, 4
         ig = ImageGeometry(M, N)
@@ -358,7 +374,7 @@ class TestBlockOperator(unittest.TestCase):
             print("Z1", Z1[0][1].as_array())
             print("RES1", RES1[0][1].as_array())
     def test_timedifference(self):
-        
+        print ("test_timedifference")
         M, N ,W = 100, 512, 512
         ig = ImageGeometry(M, N, W)
         arr = ig.allocate('random_int')  
@@ -427,7 +443,7 @@ class TestBlockOperator(unittest.TestCase):
         self.assertGreater(t1,t2)
     
     def test_BlockOperatorLinearValidity(self):
-        
+        print ("test_BlockOperatorLinearValidity")
         
         M, N  = 3, 4
         ig = ImageGeometry(M, N)
-- 
cgit v1.2.3


From 68d716ff14264bfaac8c9311d435530689382e7e Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Fri, 7 Jun 2019 13:25:15 +0000
Subject: Merge norm ref (#301)

* Fix upper/lower case inconsistency to allow astra demo to run

* fix import and calls in tests

* add storage in Operator Class of the operator norm

scaled operator is not an Operator

* add test for norm

* simplified calls to norm for LinearOperators, added default in base class

* add pillow as dependency

* resolve conflict
---
 .../ccpi/optimisation/operators/BlockOperator.py   |  4 +--
 .../optimisation/operators/BlockScaledOperator.py  |  4 +--
 .../operators/FiniteDifferenceOperator.py          |  9 ++----
 .../optimisation/operators/GradientOperator.py     |  5 ----
 .../optimisation/operators/IdentityOperator.py     |  2 +-
 .../ccpi/optimisation/operators/LinearOperator.py  | 26 +++++-------------
 .../optimisation/operators/LinearOperatorMatrix.py | 21 ++------------
 .../Python/ccpi/optimisation/operators/Operator.py |  6 ++--
 .../ccpi/optimisation/operators/ScaledOperator.py  |  4 +--
 .../optimisation/operators/SparseFiniteDiff.py     |  2 +-
 .../operators/SymmetrizedGradientOperator.py       | 32 +---------------------
 .../ccpi/optimisation/operators/ZeroOperator.py    |  4 +--
 Wrappers/Python/conda-recipe/meta.yaml             |  1 +
 Wrappers/Python/test/test_Operator.py              | 11 ++++----
 14 files changed, 33 insertions(+), 98 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
index 10fe34c..1577f06 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -104,8 +104,8 @@ class BlockOperator(Operator):
         index = row*self.shape[1]+col
         return self.operators[index]
     
-    def calculate_norm(self):
-        norm = [op.norm()**2 for op in self.operators]
+    def calculate_norm(self, **kwargs):
+        norm = [op.norm(**kwargs)**2 for op in self.operators]
         return numpy.sqrt(sum(norm))    
     
     def direct(self, x, out=None):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
index dc8cbf7..74ba9e4 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py
@@ -49,8 +49,8 @@ class BlockScaledOperator(ScaledOperator):
             return self.scalar * self.operator.adjoint(x, out=out)
         else:
             raise TypeError('Operator is not linear')
-    def calculate_norm(self):
-        return numpy.abs(self.scalar) * self.operator.norm()
+    def norm(self, **kwargs):
+        return numpy.abs(self.scalar) * self.operator.norm(**kwargs)
     def range_geometry(self):
         return self.operator.range_geometry()
     def domain_geometry(self):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
index 89f3549..05278ef 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
@@ -314,13 +314,8 @@ class FiniteDiff(LinearOperator):
     def domain_geometry(self):
         '''Returns the domain geometry'''
         return self.gm_domain
-       
-    def calculate_norm(self):
-        x0 = self.gm_domain.allocate('random_int')
-        self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0)
-        return self.s1
-    
-    
+
+
 if __name__ == '__main__':
     
     from ccpi.framework import ImageGeometry
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index 475022e..8aa7f5b 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -85,12 +85,7 @@ class Gradient(LinearOperator):
     
     def range_geometry(self):
         return self.gm_range
-                                   
-    def calculate_norm(self):
 
-        x0 = self.gm_domain.allocate('random')
-        self.s1, sall, svec = LinearOperator.PowerMethod(self, 10, x0)
-        return self.s1
     
     def __rmul__(self, scalar):
         return ScaledOperator(self, scalar) 
diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
index 61428ae..f084504 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py
@@ -35,7 +35,7 @@ class Identity(LinearOperator):
         else:
             out.fill(x)
         
-    def calculate_norm(self):
+    def calculate_norm(self, **kwargs):
         return 1.0
         
     def domain_geometry(self):       
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
index f304cc6..55eb692 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
@@ -29,7 +29,7 @@ class LinearOperator(Operator):
         
         # Initialise random
         if x_init is None:
-            x0 = operator.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
+            x0 = operator.domain_geometry().allocate(type(operator.domain_geometry()).RANDOM_INT)
         else:
             x0 = x_init.copy()
             
@@ -45,23 +45,11 @@ class LinearOperator(Operator):
             x1.multiply((1.0/x1norm), out=x0)
         return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
 
-    @staticmethod
-    def PowerMethodNonsquare(op,numiters , x_init=None):
-        '''Power method to calculate iteratively the Lipschitz constant'''
-        
-        if x_init is None:
-            x0 = op.allocate_direct()
-            x0.fill(numpy.random.randn(*x0.shape))
-        else:
-            x0 = x_init.copy()
-        
-        s = numpy.zeros(numiters)
-        # Loop
-        for it in numpy.arange(numiters):
-            x1 = op.adjoint(op.direct(x0))
-            x1norm = x1.norm()
-            s[it] = (x1*x0).sum() / (x0.squared_norm())
-            x0 = (1.0/x1norm)*x1
-        return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+    def calculate_norm(self, **kwargs):
+        '''Returns the norm of the LinearOperator as calculated by the PowerMethod'''
+        x0 = kwargs.get('x0', None)
+        iterations = kwargs.get('iterations', 25)
+        s1, sall, svec = LinearOperator.PowerMethod(self, iterations, x_init=x0)
+        return s1
 
 
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
index 62e22e0..8cc4c71 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
@@ -30,22 +30,7 @@ class LinearOperatorMatrix(LinearOperator):
     def size(self):
         return self.A.shape
     
-    def get_max_sing_val(self):
+    def calculate_norm(self, **kwargs):
         # If unknown, compute and store. If known, simply return it.
-        if self.s1 is None:
-            self.s1 = svds(self.A,1,return_singular_vectors=False)[0]
-            return self.s1
-        else:
-            return self.s1
-    def allocate_direct(self):
-        '''allocates the memory to hold the result of adjoint'''
-        #numpy.dot(self.A.transpose(),x.as_array())
-        M_A, N_A = self.A.shape
-        out = numpy.zeros((N_A,1))
-        return DataContainer(out)
-    def allocate_adjoint(self):
-        '''allocate the memory to hold the result of direct'''
-        #numpy.dot(self.A.transpose(),x.as_array())
-        M_A, N_A = self.A.shape
-        out = numpy.zeros((M_A,1))
-        return DataContainer(out)
+        return svds(self.A,1,return_singular_vectors=False)[0]
+        
diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
index 0cac586..c1adf62 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
@@ -17,12 +17,12 @@ class Operator(object):
     def direct(self,x, out=None):
         '''Returns the application of the Operator on x'''
         raise NotImplementedError
-    def norm(self):
+    def norm(self, **kwargs):
         '''Returns the norm of the Operator'''
         if self.__norm is None:
-            self.__norm = self.calculate_norm()
+            self.__norm = self.calculate_norm(**kwargs)
         return self.__norm
-    def calculate_norm(self):
+    def calculate_norm(self, **kwargs):
         '''Calculates the norm of the Operator'''
         raise NotImplementedError
     def range_geometry(self):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
index ba0049e..f7be5de 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py
@@ -40,8 +40,8 @@ class ScaledOperator(object):
                 out *= self.scalar
         else:
             raise TypeError('Operator is not linear')
-    def norm(self):
-        return numpy.abs(self.scalar) * self.operator.norm()
+    def norm(self, **kwargs):
+        return numpy.abs(self.scalar) * self.operator.norm(**kwargs)
     def range_geometry(self):
         return self.operator.range_geometry()
     def domain_geometry(self):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
index 5e318ff..eb51cc3 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
@@ -10,7 +10,7 @@ import scipy.sparse as sp
 import numpy as np
 from ccpi.framework import ImageData
 
-class SparseFiniteDiff():
+class SparseFiniteDiff(object):
     
     def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'):
         
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
index da73de6..1897040 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
@@ -75,42 +75,12 @@ class SymmetrizedGradient(Gradient):
                 tmp += FiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).direct(x.get_item(j))
             res.append(tmp)   
         return res            
-                
-                
-        
-#        for 
-        
-#        tmp = numpy.zeros(self.gm_domain)
-#        
-#        tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) +  \
-#                 FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2])
-#                 
-#        tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) +  \
-#                 FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1])                 
-#
-#        return type(x)(tmp)          
-            
-    def alloc_domain_dim(self):
-        return ImageData(numpy.zeros(self.gm_domain))
-    
-    def alloc_range_dim(self):
-        return ImageData(numpy.zeros(self.range_dim))
-    
+
     def domain_dim(self):
         return self.gm_domain
     
     def range_dim(self):
         return self.gm_range
-                                   
-    def calculate_norm(self):
-#        return np.sqrt(4*len(self.domainDim()))        
-        #TODO this takes time for big ImageData
-        # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12)        
-        x0 = ImageData(np.random.random_sample(self.domain_dim()))
-        self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0)
-        return self.s1 
-    
-
 
 if __name__ == '__main__':   
     
diff --git a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
index 2331856..a2a94b7 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py
@@ -29,8 +29,8 @@ class ZeroOp(Operator):
         else:
             return ImageData(np.zeros(self.gm_domain))
         
-    def calculate_norm(self):
-        return 0
+    def calculate_norm(self, **kwargs):
+        return 0.
     
     def domain_dim(self):       
         return self.gm_domain  
diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml
index dd3238e..163d326 100644
--- a/Wrappers/Python/conda-recipe/meta.yaml
+++ b/Wrappers/Python/conda-recipe/meta.yaml
@@ -36,6 +36,7 @@ requirements:
     - scipy
     #- matplotlib
     - h5py
+    - pillow
   
 about:
   home: http://www.ccpi.ac.uk
diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py
index 2fc45a5..d890e46 100644
--- a/Wrappers/Python/test/test_Operator.py
+++ b/Wrappers/Python/test/test_Operator.py
@@ -121,19 +121,20 @@ class TestOperator(CCPiTestClass):
         uid = Id.domain_geometry().allocate(ImageGeometry.RANDOM_INT, seed=1)
         
         a = LinearOperator.PowerMethod(Id, niter, uid)
-        b = LinearOperator.PowerMethodNonsquare(Id, niter, uid)
-        
+        #b = LinearOperator.PowerMethodNonsquare(Id, niter, uid)
+        b = LinearOperator.PowerMethod(Id, niter)
         print ("Edo impl", a[0])
-        print ("old impl", b[0])
+        print ("None impl", b[0])
         
         #self.assertAlmostEqual(a[0], b[0])
         self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=6)
         
         a = LinearOperator.PowerMethod(G, niter, uid)
-        b = LinearOperator.PowerMethodNonsquare(G, niter, uid)
+        b = LinearOperator.PowerMethod(G, niter)
+        #b = LinearOperator.PowerMethodNonsquare(G, niter, uid)
         
         print ("Edo impl", a[0])
-        print ("old impl", b[0])
+        #print ("old impl", b[0])
         self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=2)
         #self.assertAlmostEqual(a[0], b[0])
         
-- 
cgit v1.2.3


From 6005c3075db20bada44caf31429f0adc28b3907e Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Fri, 7 Jun 2019 16:24:16 +0100
Subject: added bool configured to Algorithm

Algorithms will not run if not configured  (meaning set_up has been run).
closes #304
---
 .../ccpi/optimisation/algorithms/Algorithm.py      |  3 ++
 .../Python/ccpi/optimisation/algorithms/CGLS.py    |  6 ++-
 .../Python/ccpi/optimisation/algorithms/FBPD.py    | 48 ++++++++++------------
 .../Python/ccpi/optimisation/algorithms/FISTA.py   |  1 +
 .../optimisation/algorithms/GradientDescent.py     |  3 +-
 .../Python/ccpi/optimisation/algorithms/PDHG.py    | 10 ++++-
 .../Python/ccpi/optimisation/algorithms/SIRT.py    |  8 ++--
 7 files changed, 47 insertions(+), 32 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
index 4fbf83b..c62d0ea 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
@@ -51,6 +51,7 @@ class Algorithm(object):
         self.__max_iteration = kwargs.get('max_iteration', 0)
         self.__loss = []
         self.memopt = False
+        self.configured = False
         self.timing = []
         self.update_objective_interval = kwargs.get('update_objective_interval', 1)
     def set_up(self, *args, **kwargs):
@@ -86,6 +87,8 @@ class Algorithm(object):
             raise StopIteration()
         else:
             time0 = time.time()
+            if not self.configured:
+                raise ValueError('Algorithm not configured correctly. Please run set_up.')
             self.update()
             self.timing.append( time.time() - time0 )
             if self.iteration % self.update_objective_interval == 0:
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
index 4d4843c..6b610a0 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
@@ -23,6 +23,7 @@ Created on Thu Feb 21 11:11:23 2019
 """
 
 from ccpi.optimisation.algorithms import Algorithm
+from ccpi.optimisation.functions import Norm2Sq
 
 class CGLS(Algorithm):
 
@@ -59,6 +60,9 @@ class CGLS(Algorithm):
         #    self.normr2 = sum(self.normr2)
         #self.normr2 = numpy.sqrt(self.normr2)
         #print ("set_up" , self.normr2)
+        n = Norm2Sq(operator, self.data)
+        self.loss.append(n(x_init))
+        self.configured = True
 
     def update(self):
 
@@ -84,4 +88,4 @@ class CGLS(Algorithm):
         self.d = s + beta*self.d
 
     def update_objective(self):
-        self.loss.append(self.r.squared_norm())
\ No newline at end of file
+        self.loss.append(self.r.squared_norm())
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py
index aa07359..269438c 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py
@@ -35,52 +35,48 @@ class FBPD(Algorithm):
       h: regularizer
       opt: additional algorithm 
     '''
-    constraint = None
-    data_fidelity = None
-    regulariser = None
     def __init__(self, **kwargs):
-        pass
-    def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\
-         regulariser=None, opt=None):
+        super(FBPD, self).__init__()
+        self.f = kwargs.get('f', None)
+        self.g = kwargs.get('g', ZeroFunction())
+        self.g = kwargs.get('h', ZeroFunction())
+        self.operator = kwargs.get('operator', None)
+        self.x_init = kwargs.get('x_init',None)
+        if self.x_init is not None and self.operator is not None:
+            self.set_up(self.x_init, self.operator, self.f, self.g, self.h)
+
+    def set_up(self, x_init, operator, constraint, data_fidelity, 
+               regulariser, opt=None):
 
-        # default inputs
-        if constraint    is None: 
-            self.constraint    = ZeroFun()
-        else:
-            self.constraint = constraint
-        if data_fidelity is None:
-            data_fidelity = ZeroFun()
-        else:
-            self.data_fidelity = data_fidelity
-        if regulariser   is None:
-            self.regulariser   = ZeroFun()
-        else:
-            self.regulariser = regulariser
         
         # algorithmic parameters
         
         
         # step-sizes
-        self.tau   = 2 / (self.data_fidelity.L + 2)
-        self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L
+        self.tau   = 2 / (data_fidelity.L + 2)
+        self.sigma = (1/self.tau - data_fidelity.L/2) / regulariser.L
         
         self.inv_sigma = 1/self.sigma
     
         # initialization
         self.x = x_init
         self.y = operator.direct(self.x)
+        self.update_objective()
+        self.configured = True
         
     
     def update(self):
     
         # primal forward-backward step
         x_old = self.x
-        self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) )
-        self.x = self.constraint.prox(self.x, self.tau);
+        self.x = self.x - self.tau * ( self.g.gradient(self.x) + self.operator.adjoint(self.y) )
+        self.x = self.f.proximal(self.x, self.tau)
     
         # dual forward-backward step
-        self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old);
-        self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma);   
+        self.y += self.sigma * self.operator.direct(2*self.x - x_old);
+        self.y -= self.sigma * self.h.proximal(self.inv_sigma*self.y, self.inv_sigma)   
 
         # time and criterion
-        self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x))
+
+    def update_objective(self):
+        self.loss.append(self.f(self.x) + self.g(self.x) + self.h(self.operator.direct(self.x)))
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
index 3c7a8d1..647ae98 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
@@ -58,6 +58,7 @@ class FISTA(Algorithm):
         
         self.t_old = 1
         self.update_objective()
+        self.configured = True
             
     def update(self):
 
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
index 14763c5..34bf954 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
@@ -40,7 +40,7 @@ class GradientDescent(Algorithm):
             if k in args:
                 args.pop(args.index(k))
         if len(args) == 0:
-            return self.set_up(x_init=kwargs['x_init'],
+            self.set_up(x_init=kwargs['x_init'],
                                objective_function=kwargs['objective_function'],
                                rate=kwargs['rate'])
     
@@ -61,6 +61,7 @@ class GradientDescent(Algorithm):
             self.memopt = False
         if self.memopt:
             self.x_update = x_init.copy()
+        self.configured = True
 
     def update(self):
         '''Single iteration'''
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
index 39b092b..3afd8b0 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
@@ -23,10 +23,16 @@ class PDHG(Algorithm):
         self.operator = kwargs.get('operator', None)
         self.g        = kwargs.get('g', None)
         self.tau      = kwargs.get('tau', None)
-        self.sigma    = kwargs.get('sigma', None)
+        self.sigma    = kwargs.get('sigma', 1.)
+        
         
         if self.f is not None and self.operator is not None and \
            self.g is not None:
+            if self.tau is None:
+                # Compute operator Norm
+                normK = self.operator.norm()
+                # Primal & dual stepsizes
+                self.tau = 1/(self.sigma*normK**2)
             print ("Calling from creator")
             self.set_up(self.f,
                         self.g,
@@ -57,6 +63,8 @@ class PDHG(Algorithm):
 
         # relaxation parameter
         self.theta = 1
+        self.update_objective()
+        self.configured = True
 
     def update(self):
         
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
index 30584d4..c73d323 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
@@ -59,6 +59,7 @@ class SIRT(Algorithm):
         # Set up scaling matrices D and M.
         self.M = 1/self.operator.direct(self.operator.domain_geometry().allocate(value=1.0))        
         self.D = 1/self.operator.adjoint(self.operator.range_geometry().allocate(value=1.0))
+        self.configured = True
 
 
     def update(self):
@@ -67,8 +68,9 @@ class SIRT(Algorithm):
         
         self.x += self.relax_par * (self.D*self.operator.adjoint(self.M*self.r))
         
-        if self.constraint != None:
-            self.x = self.constraint.prox(self.x,None)
+        if self.constraint is not None:
+            self.x = self.constraint.proximal(self.x,None)
+            # self.constraint.proximal(self.x,None, out=self.x)
 
     def update_objective(self):
-        self.loss.append(self.r.squared_norm())
\ No newline at end of file
+        self.loss.append(self.r.squared_norm())
-- 
cgit v1.2.3


From 1d7324cec6f10f00b73af2bab3469202c5cc2e87 Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Fri, 7 Jun 2019 16:35:09 +0100
Subject: removed FBPD

---
 .../Python/ccpi/optimisation/algorithms/FBPD.py    | 82 ----------------------
 .../ccpi/optimisation/algorithms/__init__.py       |  2 -
 Wrappers/Python/test/test_algorithms.py            |  1 -
 3 files changed, 85 deletions(-)
 delete mode 100644 Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py
deleted file mode 100644
index 269438c..0000000
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-#   This work is part of the Core Imaging Library developed by
-#   Visual Analytics and Imaging System Group of the Science Technology
-#   Facilities Council, STFC
-
-#   Copyright 2019 Edoardo Pasca
-
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-
-#       http://www.apache.org/licenses/LICENSE-2.0
-
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-"""
-Created on Thu Feb 21 11:09:03 2019
-
-@author: ofn77899
-"""
-
-from ccpi.optimisation.algorithms import Algorithm
-from ccpi.optimisation.functions import ZeroFunction
-        
-class FBPD(Algorithm):
-    '''FBPD Algorithm
-    
-    Parameters:
-      x_init: initial guess
-      f: constraint
-      g: data fidelity
-      h: regularizer
-      opt: additional algorithm 
-    '''
-    def __init__(self, **kwargs):
-        super(FBPD, self).__init__()
-        self.f = kwargs.get('f', None)
-        self.g = kwargs.get('g', ZeroFunction())
-        self.g = kwargs.get('h', ZeroFunction())
-        self.operator = kwargs.get('operator', None)
-        self.x_init = kwargs.get('x_init',None)
-        if self.x_init is not None and self.operator is not None:
-            self.set_up(self.x_init, self.operator, self.f, self.g, self.h)
-
-    def set_up(self, x_init, operator, constraint, data_fidelity, 
-               regulariser, opt=None):
-
-        
-        # algorithmic parameters
-        
-        
-        # step-sizes
-        self.tau   = 2 / (data_fidelity.L + 2)
-        self.sigma = (1/self.tau - data_fidelity.L/2) / regulariser.L
-        
-        self.inv_sigma = 1/self.sigma
-    
-        # initialization
-        self.x = x_init
-        self.y = operator.direct(self.x)
-        self.update_objective()
-        self.configured = True
-        
-    
-    def update(self):
-    
-        # primal forward-backward step
-        x_old = self.x
-        self.x = self.x - self.tau * ( self.g.gradient(self.x) + self.operator.adjoint(self.y) )
-        self.x = self.f.proximal(self.x, self.tau)
-    
-        # dual forward-backward step
-        self.y += self.sigma * self.operator.direct(2*self.x - x_old);
-        self.y -= self.sigma * self.h.proximal(self.inv_sigma*self.y, self.inv_sigma)   
-
-        # time and criterion
-
-    def update_objective(self):
-        self.loss.append(self.f(self.x) + self.g(self.x) + self.h(self.operator.direct(self.x)))
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
index 2dbacfc..8f255f3 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py
@@ -27,7 +27,5 @@ from .CGLS import CGLS
 from .SIRT import SIRT
 from .GradientDescent import GradientDescent
 from .FISTA import FISTA
-from .FBPD import FBPD
 from .PDHG import PDHG
-from .PDHG import PDHG_old
 
diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py
index 4121358..8c398f4 100755
--- a/Wrappers/Python/test/test_algorithms.py
+++ b/Wrappers/Python/test/test_algorithms.py
@@ -18,7 +18,6 @@ from ccpi.optimisation.functions import Norm2Sq, ZeroFunction, \
 from ccpi.optimisation.algorithms import GradientDescent
 from ccpi.optimisation.algorithms import CGLS
 from ccpi.optimisation.algorithms import FISTA
-from ccpi.optimisation.algorithms import FBPD
 
 
 
-- 
cgit v1.2.3