summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEdoardo Pasca <edo.paskino@gmail.com>2019-04-01 14:51:40 +0100
committerEdoardo Pasca <edo.paskino@gmail.com>2019-04-01 14:51:40 +0100
commitd76a4d10b5fd2038eeac4eda85361e5b92dabc30 (patch)
treedde3a84ccca461308ab4919b8557321cd8a1925e
parentd9588f62186a9532890907d11536ebe3824a3bb9 (diff)
downloadframework-d76a4d10b5fd2038eeac4eda85361e5b92dabc30.tar.gz
framework-d76a4d10b5fd2038eeac4eda85361e5b92dabc30.tar.bz2
framework-d76a4d10b5fd2038eeac4eda85361e5b92dabc30.tar.xz
framework-d76a4d10b5fd2038eeac4eda85361e5b92dabc30.zip
code and test updates
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py42
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py2
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/BlockOperator.py12
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/Operator.py2
-rw-r--r--Wrappers/Python/test/test_BlockOperator.py5
-rw-r--r--Wrappers/Python/test/test_functions.py137
6 files changed, 167 insertions, 33 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
index 54f8859..5489d92 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
@@ -31,8 +31,7 @@ class L2NormSquared(Function):
super(L2NormSquared, self).__init__()
self.b = kwargs.get('b',None)
- def __call__(self, x, out=None):
-
+ def __call__(self, x):
''' Evaluates L2NormSq at point x'''
y = x
@@ -44,33 +43,41 @@ class L2NormSquared(Function):
# if out is None:
# return x.squared_norm()
# else:
- return y.squared_norm()
-
+ try:
+ return y.squared_norm()
+ except AttributeError as ae:
+ # added for compatibility with SIRF
+ return (y.norm()**2)
+
def gradient(self, x, out=None):
-
''' Evaluates gradient of L2NormSq at point x'''
-
+ if out is not None:
+ out.fill(x)
+ if self.b is not None:
+ out -= self.b
+ out *= 2
+ else:
+ y = x
if self.b is not None:
# x.subtract(self.b, out=x)
y = x - self.b
- if out is None:
- return 2*y
- else:
- return out.fill(2*y)
+ return 2*y
+
def convex_conjugate(self, x, out=None):
-
- ''' Evaluate convex conjugate of L2NormSq '''
+ ''' Evaluate convex conjugate of L2NormSq'''
tmp = 0
if self.b is not None:
tmp = (self.b * x).sum()
if out is None:
+ # FIXME: this is a number
return (1/4) * x.squared_norm() + tmp
else:
+ # FIXME: this is a DataContainer
out.fill((1/4) * x.squared_norm() + tmp)
@@ -86,10 +93,15 @@ class L2NormSquared(Function):
else:
return x/(1+2*tau)
else:
+ out.fill(x)
if self.b is not None:
- out.fill((x - self.b)/(1+2*tau) + self.b)
- else:
- out.fill(x/(1+2*tau))
+ out -= self.b
+ out /= (1+2*tau)
+ if self.b is not None:
+ out += self.b
+ #out.fill((x - self.b)/(1+2*tau) + self.b)
+ #else:
+ # out.fill(x/(1+2*tau))
def proximal_conjugate(self, x, tau, out=None):
diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py
index 9def741..8c28874 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py
@@ -29,7 +29,7 @@ class ZeroFun(Function):
if x.shape[0]==1:
return x.maximum(0).sum()
else:
- if isinstance(x, CompositeDataContainer):
+ if isinstance(x, BlockDataContainer):
return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum()
else:
return x.maximum(0).sum() + x.maximum(0).sum()
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
index 1240b31..ee8f609 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -190,9 +190,15 @@ class BlockOperator(Operator):
return type(self)(*ops, shape=self.shape)
@property
def T(self):
- '''Return the transposed of self'''
- shape = (self.shape[1], self.shape[0])
- return type(self)(*self.operators, shape=shape)
+ '''Return the transposed of self
+
+ input in a row-by-row'''
+ newshape = (self.shape[1], self.shape[0])
+ oplist = []
+ for col in range(newshape[1]):
+ for row in range(newshape[0]):
+ oplist.append(self.get_item(col,row))
+ return type(self)(*oplist, shape=newshape)
def domain_geometry(self):
'''returns the domain of the BlockOperator
diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
index cdf15a7..2d2089b 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py
@@ -4,7 +4,7 @@ Created on Tue Mar 5 15:55:56 2019
@author: ofn77899
"""
-from ccpi.optimisation.operators import ScaledOperator
+from ccpi.optimisation.operators.ScaledOperator import ScaledOperator
class Operator(object):
'''Operator that maps from a space X -> Y'''
diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py
index 4eb84bb..e1c05fb 100644
--- a/Wrappers/Python/test/test_BlockOperator.py
+++ b/Wrappers/Python/test/test_BlockOperator.py
@@ -39,6 +39,11 @@ class TestBlockOperator(unittest.TestCase):
zero = numpy.zeros(X.get_item(0).shape)
numpy.testing.assert_array_equal(Y.get_item(0).as_array(),len(x)+zero)
+ K2 = BlockOperator(*(ops+ops), shape=(3,2))
+ Y = K2.T.direct(X)
+ # K.T (2,3) X (3,1) => output shape (2,1)
+ self.assertTrue(Y.shape == (2,1))
+
try:
# this should fail as the domain is not compatible
ig = [ ImageGeometry(10,20,31) , \
diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py
index 384e351..3e5f26f 100644
--- a/Wrappers/Python/test/test_functions.py
+++ b/Wrappers/Python/test/test_functions.py
@@ -17,17 +17,20 @@ from ccpi.framework import BlockDataContainer
from numbers import Number
from ccpi.optimisation.operators import Gradient
-from ccpi.optimisation.functions import SimpleL2NormSq
-from ccpi.optimisation.functions import L2NormSq
+#from ccpi.optimisation.functions import SimpleL2NormSq
+from ccpi.optimisation.functions import L2NormSquared
from ccpi.optimisation.functions import SimpleL1Norm
from ccpi.optimisation.functions import L1Norm
+
+from ccpi.optimisation.funcs import Norm2sq
# from ccpi.optimisation.functions.L2NormSquared import SimpleL2NormSq, L2NormSq
# from ccpi.optimisation.functions.L1Norm import SimpleL1Norm, L1Norm
-from ccpi.optimisation.functions import mixed_L12Norm
+#from ccpi.optimisation.functions import mixed_L12Norm
from ccpi.optimisation.functions import ZeroFun
from ccpi.optimisation.functions import FunctionOperatorComposition
-import unittest
+import unittest
+import numpy
#
@@ -46,12 +49,12 @@ class TestFunction(unittest.TestCase):
operator = BlockOperator(op1, op2 , shape=(2,1) )
# Create functions
- noisy_data = ImageData(np.random.randint(10, size=ag))
+ noisy_data = ag.allocate(ImageGeometry.RANDOM_INT)
- d = ImageData(np.random.randint(10, size=ag))
+ d = ag.allocate(ImageGeometry.RANDOM_INT)
alpha = 0.5
# scaled function
- g = alpha * L2NormSq(b=noisy_data)
+ g = alpha * L2NormSquared(b=noisy_data)
# Compare call of g
a2 = alpha*(d - noisy_data).power(2).sum()
@@ -63,13 +66,121 @@ class TestFunction(unittest.TestCase):
self.assertEqual(a3, g.convex_conjugate(d))
#print( a3, g.convex_conjugate(d))
+ def test_L2NormSquared(self):
+ # TESTS for L2 and scalar * L2
- def stest_ScaledFunctin(self):
- ig = (N,N)
- ag = ig
- op1 = Gradient(ig)
- op2 = Identity(ig, ag)
-
+ M, N, K = 2,3,5
+ ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K)
+ u = ig.allocate(ImageGeometry.RANDOM_INT)
+ b = ig.allocate(ImageGeometry.RANDOM_INT)
+
+ # check grad/call no data
+ f = L2NormSquared()
+ a1 = f.gradient(u)
+ a2 = 2 * u
+ numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4)
+ numpy.testing.assert_equal(f(u), u.squared_norm())
+
+ # check grad/call with data
+ f1 = L2NormSquared(b=b)
+ b1 = f1.gradient(u)
+ b2 = 2 * (u-b)
+
+ numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4)
+ numpy.testing.assert_equal(f1(u), (u-b).squared_norm())
+
+ #check convex conjuagate no data
+ c1 = f.convex_conjugate(u)
+ c2 = 1/4 * u.squared_norm()
+ numpy.testing.assert_equal(c1, c2)
+
+ #check convex conjuagate with data
+ d1 = f1.convex_conjugate(u)
+ d2 = (1/4) * u.squared_norm() + (u*b).sum()
+ numpy.testing.assert_equal(d1, d2)
+
+ # check proximal no data
+ tau = 5
+ e1 = f.proximal(u, tau)
+ e2 = u/(1+2*tau)
+ numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4)
+
+ # check proximal with data
+ tau = 5
+ h1 = f1.proximal(u, tau)
+ h2 = (u-b)/(1+2*tau) + b
+ numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4)
+
+ # check proximal conjugate no data
+ tau = 0.2
+ k1 = f.proximal_conjugate(u, tau)
+ k2 = u/(1 + tau/2 )
+ numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4)
+
+ # check proximal conjugate with data
+ l1 = f1.proximal_conjugate(u, tau)
+ l2 = (u - tau * b)/(1 + tau/2 )
+ numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4)
+
+
+ # check scaled function properties
+
+ # scalar
+ scalar = 100
+ f_scaled_no_data = scalar * L2NormSquared()
+ f_scaled_data = scalar * L2NormSquared(b=b)
+
+ # call
+ numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u))
+ numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u))
+
+ # grad
+ numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4)
+ numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4)
+
+ # conj
+ numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \
+ f.convex_conjugate(u/scalar) * scalar, decimal=4)
+
+ numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \
+ scalar * f1.convex_conjugate(u/scalar), decimal=4)
+
+ # proximal
+ numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \
+ f.proximal(u, tau*scalar).as_array())
+
+
+ numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \
+ f1.proximal(u, tau*scalar).as_array())
+
+
+ # proximal conjugate
+ numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \
+ (u/(1 + tau/(2*scalar) )).as_array(), decimal=4)
+
+ numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \
+ ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4)
+
+
+ def test_Norm2sq_as_FunctionOperatorComposition(self):
+ M, N, K = 2,3,5
+ ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K)
+ u = ig.allocate(ImageGeometry.RANDOM_INT)
+ b = ig.allocate(ImageGeometry.RANDOM_INT)
+
+ A = 0.5 * Identity(ig)
+ old_chisq = Norm2sq(A, b, 1.0)
+ new_chisq = FunctionOperatorComposition(A, L2NormSquared(b=b))
+
+ yold = old_chisq(u)
+ ynew = new_chisq(u)
+ self.assertEqual(yold, ynew)
+
+ yold = old_chisq.gradient(u)
+ ynew = new_chisq.gradient(u)
+ numpy.testing.assert_array_equal(yold.as_array(), ynew.as_array())
+
+
#
# f1 = L2NormSq(alpha=1, b=noisy_data)