summaryrefslogtreecommitdiffstats
path: root/Wrappers/Python
diff options
context:
space:
mode:
Diffstat (limited to 'Wrappers/Python')
-rw-r--r--Wrappers/Python/ccpi/contrib/__init__.py0
-rw-r--r--Wrappers/Python/ccpi/contrib/optimisation/__init__.py0
-rw-r--r--Wrappers/Python/ccpi/contrib/optimisation/algorithms/__init__.py0
-rwxr-xr-xWrappers/Python/ccpi/contrib/optimisation/algorithms/spdhg.py (renamed from Wrappers/Python/ccpi/optimisation/spdhg.py)0
-rwxr-xr-xWrappers/Python/ccpi/optimisation/funcs.py7
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py78
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py13
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py3
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py3
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/LinearOperator.py45
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py51
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py3
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/__init__.py2
-rwxr-xr-xWrappers/Python/ccpi/optimisation/ops.py294
-rw-r--r--Wrappers/Python/setup.py4
-rwxr-xr-xWrappers/Python/test/test_BlockDataContainer.py6
-rw-r--r--Wrappers/Python/test/test_BlockOperator.py30
-rwxr-xr-xWrappers/Python/test/test_DataContainer.py64
-rwxr-xr-xWrappers/Python/test/test_Gradient.py4
-rw-r--r--Wrappers/Python/test/test_Operator.py45
-rwxr-xr-xWrappers/Python/test/test_algorithms.py12
-rw-r--r--Wrappers/Python/test/test_functions.py62
-rwxr-xr-xWrappers/Python/test/test_run_test.py52
23 files changed, 328 insertions, 450 deletions
diff --git a/Wrappers/Python/ccpi/contrib/__init__.py b/Wrappers/Python/ccpi/contrib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/Wrappers/Python/ccpi/contrib/__init__.py
diff --git a/Wrappers/Python/ccpi/contrib/optimisation/__init__.py b/Wrappers/Python/ccpi/contrib/optimisation/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/Wrappers/Python/ccpi/contrib/optimisation/__init__.py
diff --git a/Wrappers/Python/ccpi/contrib/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/contrib/optimisation/algorithms/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/Wrappers/Python/ccpi/contrib/optimisation/algorithms/__init__.py
diff --git a/Wrappers/Python/ccpi/optimisation/spdhg.py b/Wrappers/Python/ccpi/contrib/optimisation/algorithms/spdhg.py
index 263a7cd..263a7cd 100755
--- a/Wrappers/Python/ccpi/optimisation/spdhg.py
+++ b/Wrappers/Python/ccpi/contrib/optimisation/algorithms/spdhg.py
diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py
index efc465c..b2b9791 100755
--- a/Wrappers/Python/ccpi/optimisation/funcs.py
+++ b/Wrappers/Python/ccpi/optimisation/funcs.py
@@ -17,7 +17,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from ccpi.optimisation.ops import Identity, FiniteDiff2D
import numpy
from ccpi.framework import DataContainer
import warnings
@@ -99,12 +98,6 @@ class Norm2(Function):
raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) )
-class TV2D(Norm2):
-
- def __init__(self, gamma):
- super(TV2D,self).__init__(gamma, 0)
- self.op = FiniteDiff2D()
- self.L = self.op.get_max_sing_val()
# Define a class for squared 2-norm
diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
index 7889cad..09e39bf 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
@@ -20,7 +20,8 @@
import numpy
from ccpi.optimisation.functions import Function
from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
-from ccpi.framework import ImageData
+from ccpi.framework import ImageData, ImageGeometry
+import functools
class KullbackLeibler(Function):
@@ -39,6 +40,7 @@ class KullbackLeibler(Function):
def __call__(self, x):
# TODO check
+<<<<<<< HEAD
tmp = x + self.bnoise
ind = tmp.as_array()>0
@@ -47,6 +49,29 @@ class KullbackLeibler(Function):
return sum(res)
+=======
+
+ self.sum_value = x + self.bnoise
+ if (self.sum_value.as_array()<0).any():
+ self.sum_value = numpy.inf
+
+ if self.sum_value==numpy.inf:
+ return numpy.inf
+ else:
+ tmp = self.sum_value.copy()
+ #tmp.fill( numpy.log(tmp.as_array()) )
+ self.log(tmp)
+ return (x - self.b * tmp ).sum()
+
+# return numpy.sum( x.as_array() - self.b.as_array() * numpy.log(self.sum_value.as_array()))
+
+ def log(self, datacontainer):
+ '''calculates the in-place log of the datacontainer'''
+ if not functools.reduce(lambda x,y: x and y>0,
+ datacontainer.as_array().ravel(), True):
+ raise ValueError('KullbackLeibler. Cannot calculate log of negative number')
+ datacontainer.fill( numpy.log(datacontainer.as_array()) )
+>>>>>>> origin/composite_operator_datacontainer
def gradient(self, x, out=None):
@@ -54,6 +79,7 @@ class KullbackLeibler(Function):
if out is None:
return 1 - self.b/(x + self.bnoise)
else:
+<<<<<<< HEAD
self.b.divide(x+self.bnoise, out=out)
out.subtract(1, out=out)
@@ -65,14 +91,47 @@ class KullbackLeibler(Function):
sel
return (self.b * ( ImageData( numpy.log(tmp) ) - 1 ) - self.bnoise * (x - 1)).sum()
+=======
+ x.add(self.bnoise, out=out)
+ self.b.divide(out, out=out)
+ out.subtract(1, out=out)
+ out *= -1
+
+ def convex_conjugate(self, x):
+
+ tmp = self.b/( 1 - x )
+ self.log(tmp)
+ return (self.b * ( tmp - 1 ) - self.bnoise * (x - 1)).sum()
+# return self.b * ( ImageData(numpy.log(self.b/(1-x)) - 1 )) - self.bnoise * (x - 1)
+>>>>>>> origin/composite_operator_datacontainer
def proximal(self, x, tau, out=None):
if out is None:
return 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() )
else:
+<<<<<<< HEAD
tmp = 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() )
out.fill(tmp)
+=======
+ tmp = 0.5 *( (x - self.bnoise - tau) +
+ ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt()
+ )
+ x.add(self.bnoise, out=out)
+ out -= tau
+ out *= out
+ tmp = self.b * (4 * tau)
+ out.add(tmp, out=out)
+ out.sqrt(out=out)
+
+ x.subtract(self.bnoise, out=tmp)
+ tmp -= tau
+
+ out += tmp
+
+ out *= 0.5
+
+>>>>>>> origin/composite_operator_datacontainer
def proximal_conjugate(self, x, tau, out=None):
@@ -80,12 +139,27 @@ class KullbackLeibler(Function):
if out is None:
z = x + tau * self.bnoise
+<<<<<<< HEAD
return 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt())
else:
z = x + tau * self.bnoise
res = 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt())
out.fill(res)
+=======
+ return (z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()
+ else:
+ z_m = x + tau * self.bnoise - 1
+ self.b.multiply(4*tau, out=out)
+ z_m.multiply(z_m, out=z_m)
+ out += z_m
+ out.sqrt(out=out)
+ # z = z_m + 2
+ z_m.sqrt(out=z_m)
+ z_m += 2
+ out *= -1
+ out += z_m
+>>>>>>> origin/composite_operator_datacontainer
def __rmul__(self, scalar):
@@ -137,4 +211,4 @@ if __name__ == '__main__':
#
- \ No newline at end of file
+
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
index 6d3bf86..8a16c28 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
@@ -93,12 +93,13 @@ class L2NormSquared(Function):
if self.b is None:
return x/(1+2*tau)
else:
-# tmp = x
-# tmp -= self.b
-# tmp /= (1+2*tau)
-# tmp += self.b
-# return tmp
- return (x-self.b)/(1+2*tau) + self.b
+
+ tmp = x.subtract(self.b)
+ #tmp -= self.b
+ tmp /= (1+2*tau)
+ tmp += self.b
+ return tmp
+# return (x-self.b)/(1+2*tau) + self.b
# if self.b is not None:
# out=x
diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
index 835f96d..2c42532 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
@@ -7,7 +7,6 @@ Created on Fri Mar 1 22:51:17 2019
"""
from ccpi.optimisation.operators import LinearOperator
-from ccpi.optimisation.ops import PowerMethodNonsquare
from ccpi.framework import ImageData, BlockDataContainer
import numpy as np
@@ -318,7 +317,7 @@ class FiniteDiff(LinearOperator):
def norm(self):
x0 = self.gm_domain.allocate('random_int')
- self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0)
+ self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0)
return self.s1
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index 4935435..e0b8a32 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -7,7 +7,6 @@ Created on Fri Mar 1 22:50:04 2019
"""
from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator
-from ccpi.optimisation.ops import PowerMethodNonsquare
from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer
import numpy
from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff
@@ -90,7 +89,7 @@ class Gradient(LinearOperator):
def norm(self):
x0 = self.gm_domain.allocate('random')
- self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0)
+ self.s1, sall, svec = LinearOperator.PowerMethod(self, 10, x0)
return self.s1
def __rmul__(self, scalar):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
index e19304f..f304cc6 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py
@@ -6,6 +6,8 @@ Created on Tue Mar 5 15:57:52 2019
"""
from ccpi.optimisation.operators import Operator
+from ccpi.framework import ImageGeometry
+import numpy
class LinearOperator(Operator):
@@ -20,3 +22,46 @@ class LinearOperator(Operator):
only available to linear operators'''
raise NotImplementedError
+
+ @staticmethod
+ def PowerMethod(operator, iterations, x_init=None):
+ '''Power method to calculate iteratively the Lipschitz constant'''
+
+ # Initialise random
+ if x_init is None:
+ x0 = operator.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
+ else:
+ x0 = x_init.copy()
+
+ x1 = operator.domain_geometry().allocate()
+ y_tmp = operator.range_geometry().allocate()
+ s = numpy.zeros(iterations)
+ # Loop
+ for it in numpy.arange(iterations):
+ operator.direct(x0,out=y_tmp)
+ operator.adjoint(y_tmp,out=x1)
+ x1norm = x1.norm()
+ s[it] = x1.dot(x0) / x0.squared_norm()
+ x1.multiply((1.0/x1norm), out=x0)
+ return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+
+ @staticmethod
+ def PowerMethodNonsquare(op,numiters , x_init=None):
+ '''Power method to calculate iteratively the Lipschitz constant'''
+
+ if x_init is None:
+ x0 = op.allocate_direct()
+ x0.fill(numpy.random.randn(*x0.shape))
+ else:
+ x0 = x_init.copy()
+
+ s = numpy.zeros(numiters)
+ # Loop
+ for it in numpy.arange(numiters):
+ x1 = op.adjoint(op.direct(x0))
+ x1norm = x1.norm()
+ s[it] = (x1*x0).sum() / (x0.squared_norm())
+ x0 = (1.0/x1norm)*x1
+ return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+
+
diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
new file mode 100644
index 0000000..62e22e0
--- /dev/null
+++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperatorMatrix.py
@@ -0,0 +1,51 @@
+import numpy
+from scipy.sparse.linalg import svds
+from ccpi.framework import DataContainer
+from ccpi.framework import AcquisitionData
+from ccpi.framework import ImageData
+from ccpi.framework import ImageGeometry
+from ccpi.framework import AcquisitionGeometry
+from numbers import Number
+from ccpi.optimisation.operators import LinearOperator
+class LinearOperatorMatrix(LinearOperator):
+ def __init__(self,A):
+ self.A = A
+ self.s1 = None # Largest singular value, initially unknown
+ super(LinearOperatorMatrix, self).__init__()
+
+ def direct(self,x, out=None):
+ if out is None:
+ return type(x)(numpy.dot(self.A,x.as_array()))
+ else:
+ numpy.dot(self.A, x.as_array(), out=out.as_array())
+
+
+ def adjoint(self,x, out=None):
+ if out is None:
+ return type(x)(numpy.dot(self.A.transpose(),x.as_array()))
+ else:
+ numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array())
+
+
+ def size(self):
+ return self.A.shape
+
+ def get_max_sing_val(self):
+ # If unknown, compute and store. If known, simply return it.
+ if self.s1 is None:
+ self.s1 = svds(self.A,1,return_singular_vectors=False)[0]
+ return self.s1
+ else:
+ return self.s1
+ def allocate_direct(self):
+ '''allocates the memory to hold the result of adjoint'''
+ #numpy.dot(self.A.transpose(),x.as_array())
+ M_A, N_A = self.A.shape
+ out = numpy.zeros((N_A,1))
+ return DataContainer(out)
+ def allocate_adjoint(self):
+ '''allocate the memory to hold the result of direct'''
+ #numpy.dot(self.A.transpose(),x.as_array())
+ M_A, N_A = self.A.shape
+ out = numpy.zeros((M_A,1))
+ return DataContainer(out)
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
index c38458d..3fc43b8 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
@@ -7,7 +7,6 @@ Created on Fri Mar 1 22:53:55 2019
"""
from ccpi.optimisation.operators import Gradient, Operator, LinearOperator, ScaledOperator
-from ccpi.optimisation.ops import PowerMethodNonsquare
from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer
import numpy
from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff
@@ -108,7 +107,7 @@ class SymmetrizedGradient(Gradient):
#TODO this takes time for big ImageData
# for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12)
x0 = ImageData(np.random.random_sample(self.domain_dim()))
- self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0)
+ self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0)
return self.s1
diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py
index 7040d3a..811adf6 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py
@@ -19,5 +19,5 @@ from .GradientOperator import Gradient
from .SymmetrizedGradientOperator import SymmetrizedGradient
from .IdentityOperator import Identity
from .ZeroOperator import ZeroOp
-
+from .LinearOperatorMatrix import LinearOperatorMatrix
diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py
deleted file mode 100755
index fcd0d9e..0000000
--- a/Wrappers/Python/ccpi/optimisation/ops.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy
-from scipy.sparse.linalg import svds
-from ccpi.framework import DataContainer
-from ccpi.framework import AcquisitionData
-from ccpi.framework import ImageData
-from ccpi.framework import ImageGeometry
-from ccpi.framework import AcquisitionGeometry
-from numbers import Number
-# Maybe operators need to know what types they take as inputs/outputs
-# to not just use generic DataContainer
-
-
-class Operator(object):
- '''Operator that maps from a space X -> Y'''
- def __init__(self, **kwargs):
- self.scalar = 1
- def is_linear(self):
- '''Returns if the operator is linear'''
- return False
- def direct(self,x, out=None):
- raise NotImplementedError
- def size(self):
- # To be defined for specific class
- raise NotImplementedError
- def norm(self):
- raise NotImplementedError
- def allocate_direct(self):
- '''Allocates memory on the Y space'''
- raise NotImplementedError
- def allocate_adjoint(self):
- '''Allocates memory on the X space'''
- raise NotImplementedError
- def range_geometry(self):
- raise NotImplementedError
- def domain_geometry(self):
- raise NotImplementedError
- def __rmul__(self, other):
- '''reverse multiplication of Operator with number sets the variable scalar in the Operator'''
- assert isinstance(other, Number)
- self.scalar = other
- return self
-
-class LinearOperator(Operator):
- '''Operator that maps from a space X -> Y'''
- def is_linear(self):
- '''Returns if the operator is linear'''
- return True
- def adjoint(self,x, out=None):
- raise NotImplementedError
-
-class Identity(Operator):
- def __init__(self):
- self.s1 = 1.0
- self.L = 1
- super(Identity, self).__init__()
-
- def direct(self,x,out=None):
- if out is None:
- return x.copy()
- else:
- out.fill(x)
-
- def adjoint(self,x, out=None):
- if out is None:
- return x.copy()
- else:
- out.fill(x)
-
- def size(self):
- return NotImplemented
-
- def get_max_sing_val(self):
- return self.s1
-
-class TomoIdentity(Operator):
- def __init__(self, geometry, **kwargs):
- super(TomoIdentity, self).__init__()
- self.s1 = 1.0
- self.geometry = geometry
-
- def is_linear(self):
- return True
- def direct(self,x,out=None):
-
- if out is None:
- if self.scalar != 1:
- return x * self.scalar
- return x.copy()
- else:
- if self.scalar != 1:
- out.fill(x * self.scalar)
- return
- out.fill(x)
- return
-
- def adjoint(self,x, out=None):
- return self.direct(x, out)
-
- def norm(self):
- return self.s1
-
- def get_max_sing_val(self):
- return self.s1
- def allocate_direct(self):
- if issubclass(type(self.geometry), ImageGeometry):
- return ImageData(geometry=self.geometry)
- elif issubclass(type(self.geometry), AcquisitionGeometry):
- return AcquisitionData(geometry=self.geometry)
- else:
- raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry))
- def allocate_adjoint(self):
- return self.allocate_direct()
- def range_geometry(self):
- return self.geometry
- def domain_geometry(self):
- return self.geometry
-
-
-
-class FiniteDiff2D(Operator):
- def __init__(self):
- self.s1 = 8.0
- super(FiniteDiff2D, self).__init__()
-
- def direct(self,x, out=None):
- '''Forward differences with Neumann BC.'''
- # FIXME this seems to be working only with numpy arrays
-
- d1 = numpy.zeros_like(x.as_array())
- d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1]
- d2 = numpy.zeros_like(x.as_array())
- d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:]
- d = numpy.stack((d1,d2),0)
- #x.geometry.voxel_num_z = 2
- return type(x)(d,False,geometry=x.geometry)
-
- def adjoint(self,x, out=None):
- '''Backward differences, Neumann BC.'''
- Nrows = x.get_dimension_size('horizontal_x')
- Ncols = x.get_dimension_size('horizontal_y')
- Nchannels = 1
- if len(x.shape) == 4:
- Nchannels = x.get_dimension_size('channel')
- zer = numpy.zeros((Nrows,1))
- xxx = x.as_array()[0,:,:-1]
- #
- h = numpy.concatenate((zer,xxx), 1)
- h -= numpy.concatenate((xxx,zer), 1)
-
- zer = numpy.zeros((1,Ncols))
- xxx = x.as_array()[1,:-1,:]
- #
- v = numpy.concatenate((zer,xxx), 0)
- v -= numpy.concatenate((xxx,zer), 0)
- return type(x)(h + v, False, geometry=x.geometry)
-
- def size(self):
- return NotImplemented
-
- def get_max_sing_val(self):
- return self.s1
-
-def PowerMethodNonsquareOld(op,numiters):
- # Initialise random
- # Jakob's
- #inputsize = op.size()[1]
- #x0 = ImageContainer(numpy.random.randn(*inputsize)
- # Edo's
- #vg = ImageGeometry(voxel_num_x=inputsize[0],
- # voxel_num_y=inputsize[1],
- # voxel_num_z=inputsize[2])
- #
- #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x'])
- #print (x0)
- #x0.fill(numpy.random.randn(*x0.shape))
-
- x0 = op.create_image_data()
-
- s = numpy.zeros(numiters)
- # Loop
- for it in numpy.arange(numiters):
- x1 = op.adjoint(op.direct(x0))
- x1norm = numpy.sqrt((x1**2).sum())
- #print ("x0 **********" ,x0)
- #print ("x1 **********" ,x1)
- s[it] = (x1*x0).sum() / (x0*x0).sum()
- x0 = (1.0/x1norm)*x1
- return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
-
-#def PowerMethod(op,numiters):
-# # Initialise random
-# x0 = np.random.randn(400)
-# s = np.zeros(numiters)
-# # Loop
-# for it in np.arange(numiters):
-# x1 = np.dot(op.transpose(),np.dot(op,x0))
-# x1norm = np.sqrt(np.sum(np.dot(x1,x1)))
-# s[it] = np.dot(x1,x0) / np.dot(x1,x0)
-# x0 = (1.0/x1norm)*x1
-# return s, x0
-
-
-def PowerMethodNonsquare(op,numiters , x0=None):
- # Initialise random
- # Jakob's
- # inputsize , outputsize = op.size()
- #x0 = ImageContainer(numpy.random.randn(*inputsize)
- # Edo's
- #vg = ImageGeometry(voxel_num_x=inputsize[0],
- # voxel_num_y=inputsize[1],
- # voxel_num_z=inputsize[2])
- #
- #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x'])
- #print (x0)
- #x0.fill(numpy.random.randn(*x0.shape))
-
- if x0 is None:
- #x0 = op.create_image_data()
- x0 = op.allocate_direct()
- x0.fill(numpy.random.randn(*x0.shape))
-
- s = numpy.zeros(numiters)
- # Loop
- for it in numpy.arange(numiters):
- x1 = op.adjoint(op.direct(x0))
- #x1norm = numpy.sqrt((x1*x1).sum())
- x1norm = x1.norm()
- #print ("x0 **********" ,x0)
- #print ("x1 **********" ,x1)
- s[it] = (x1*x0).sum() / (x0.squared_norm())
- x0 = (1.0/x1norm)*x1
- return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
-
-class LinearOperatorMatrix(Operator):
- def __init__(self,A):
- self.A = A
- self.s1 = None # Largest singular value, initially unknown
- super(LinearOperatorMatrix, self).__init__()
-
- def direct(self,x, out=None):
- if out is None:
- return type(x)(numpy.dot(self.A,x.as_array()))
- else:
- numpy.dot(self.A, x.as_array(), out=out.as_array())
-
-
- def adjoint(self,x, out=None):
- if out is None:
- return type(x)(numpy.dot(self.A.transpose(),x.as_array()))
- else:
- numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array())
-
-
- def size(self):
- return self.A.shape
-
- def get_max_sing_val(self):
- # If unknown, compute and store. If known, simply return it.
- if self.s1 is None:
- self.s1 = svds(self.A,1,return_singular_vectors=False)[0]
- return self.s1
- else:
- return self.s1
- def allocate_direct(self):
- '''allocates the memory to hold the result of adjoint'''
- #numpy.dot(self.A.transpose(),x.as_array())
- M_A, N_A = self.A.shape
- out = numpy.zeros((N_A,1))
- return DataContainer(out)
- def allocate_adjoint(self):
- '''allocate the memory to hold the result of direct'''
- #numpy.dot(self.A.transpose(),x.as_array())
- M_A, N_A = self.A.shape
- out = numpy.zeros((M_A,1))
- return DataContainer(out)
diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py
index 87930b5..a3fde59 100644
--- a/Wrappers/Python/setup.py
+++ b/Wrappers/Python/setup.py
@@ -35,7 +35,9 @@ setup(
'ccpi.framework', 'ccpi.optimisation',
'ccpi.optimisation.operators',
'ccpi.optimisation.algorithms',
- 'ccpi.optimisation.functions'],
+ 'ccpi.optimisation.functions',
+ 'ccpi.contrib','ccpi.contrib.optimisation',
+ 'ccpi.contrib.optimisation.algorithms'],
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py
index a20f289..aeb8454 100755
--- a/Wrappers/Python/test/test_BlockDataContainer.py
+++ b/Wrappers/Python/test/test_BlockDataContainer.py
@@ -7,15 +7,9 @@ Created on Tue Mar 5 16:08:23 2019
import unittest
import numpy
-#from ccpi.plugins.ops import CCPiProjectorSimple
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.optimisation.ops import TomoIdentity
-from ccpi.optimisation.funcs import Norm2sq, Norm1
from ccpi.framework import ImageGeometry, AcquisitionGeometry
from ccpi.framework import ImageData, AcquisitionData
-#from ccpi.optimisation.algorithms import GradientDescent
from ccpi.framework import BlockDataContainer, DataContainer
-#from ccpi.optimisation.Algorithms import CGLS
import functools
from ccpi.optimisation.operators import Gradient, Identity, BlockOperator
diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py
index e1c05fb..b82c849 100644
--- a/Wrappers/Python/test/test_BlockOperator.py
+++ b/Wrappers/Python/test/test_BlockOperator.py
@@ -1,17 +1,11 @@
import unittest
from ccpi.optimisation.operators import BlockOperator
from ccpi.framework import BlockDataContainer
-from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.operators import Identity
from ccpi.framework import ImageGeometry, ImageData
import numpy
from ccpi.optimisation.operators import FiniteDiff
-class TestOperator(TomoIdentity):
- def __init__(self, *args, **kwargs):
- super(TestOperator, self).__init__(*args, **kwargs)
- self.range = kwargs.get('range', self.geometry)
- def range_geometry(self):
- return self.range
class TestBlockOperator(unittest.TestCase):
@@ -21,7 +15,7 @@ class TestBlockOperator(unittest.TestCase):
ImageGeometry(10,20,30) , \
ImageGeometry(10,20,30) ]
x = [ g.allocate() for g in ig ]
- ops = [ TomoIdentity(g) for g in ig ]
+ ops = [ Identity(g) for g in ig ]
K = BlockOperator(*ops)
X = BlockDataContainer(x[0])
@@ -50,7 +44,7 @@ class TestBlockOperator(unittest.TestCase):
ImageGeometry(10,20,30) , \
ImageGeometry(10,20,30) ]
x = [ g.allocate() for g in ig ]
- ops = [ TomoIdentity(g) for g in ig ]
+ ops = [ Identity(g) for g in ig ]
K = BlockOperator(*ops)
self.assertTrue(False)
@@ -70,8 +64,8 @@ class TestBlockOperator(unittest.TestCase):
ImageGeometry(10,22,31) , \
ImageGeometry(10,20,31) ]
x = [ g.allocate() for g in ig ]
- ops = [ TestOperator(g, range=r) for g,r in zip(ig, rg0) ]
- ops += [ TestOperator(g, range=r) for g,r in zip(ig, rg1) ]
+ ops = [ Identity(g, gm_range=r) for g,r in zip(ig, rg0) ]
+ ops += [ Identity(g, gm_range=r) for g,r in zip(ig, rg1) ]
K = BlockOperator(*ops, shape=(2,3))
print ("K col comp? " , K.column_wise_compatible())
@@ -90,7 +84,7 @@ class TestBlockOperator(unittest.TestCase):
ImageGeometry(10,20,30) , \
ImageGeometry(10,20,30) ]
x = [ g.allocate() for g in ig ]
- ops = [ TomoIdentity(g) for g in ig ]
+ ops = [ Identity(g) for g in ig ]
val = 1
# test limit as non Scaled
@@ -121,7 +115,7 @@ class TestBlockOperator(unittest.TestCase):
#ImageGeometry(10,20,30) , \
ImageGeometry(2,3 ) ]
x = [ g.allocate() for g in ig ]
- ops = [ TomoIdentity(g) for g in ig ]
+ ops = [ Identity(g) for g in ig ]
# test limit as non Scaled
@@ -158,7 +152,7 @@ class TestBlockOperator(unittest.TestCase):
print (img.shape, ig.shape)
self.assertTrue(img.shape == (30,20,10))
self.assertEqual(img.sum(), 0)
- Id = TomoIdentity(ig)
+ Id = Identity(ig)
y = Id.direct(img)
numpy.testing.assert_array_equal(y.as_array(), img.as_array())
@@ -167,7 +161,7 @@ class TestBlockOperator(unittest.TestCase):
from ccpi.plugins.ops import CCPiProjectorSimple
from ccpi.optimisation.ops import PowerMethodNonsquare
- from ccpi.optimisation.ops import TomoIdentity
+ from ccpi.optimisation.operators import Identity
from ccpi.optimisation.funcs import Norm2sq, Norm1
from ccpi.framework import ImageGeometry, AcquisitionGeometry
from ccpi.optimisation.Algorithms import GradientDescent
@@ -265,8 +259,8 @@ class TestBlockOperator(unittest.TestCase):
ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
# setup a tomo identity
- Ibig = 1e5 * TomoIdentity(geometry=ig)
- Ismall = 1e-5 * TomoIdentity(geometry=ig)
+ Ibig = 1e5 * Identity(ig)
+ Ismall = 1e-5 * Identity(ig)
# composite operator
Kbig = BlockOperator(A, Ibig, shape=(2,1))
@@ -362,4 +356,4 @@ class TestBlockOperator(unittest.TestCase):
G1 = FiniteDiff(ig1, direction=2, bnd_cond = 'Periodic')
print(ig1.shape==u1.shape)
print (G1.norm())
- numpy.testing.assert_allclose(G1.norm(), numpy.sqrt(4), atol=0.1) \ No newline at end of file
+ numpy.testing.assert_allclose(G1.norm(), numpy.sqrt(4), atol=0.1)
diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py
index 40cd244..8e8ab87 100755
--- a/Wrappers/Python/test/test_DataContainer.py
+++ b/Wrappers/Python/test/test_DataContainer.py
@@ -174,7 +174,7 @@ class TestDataContainer(unittest.TestCase):
def binary_add(self):
print("Test binary add")
X, Y, Z = 512, 512, 512
- X, Y, Z = 1024, 512, 512
+ #X, Y, Z = 1024, 512, 512
steps = [timer()]
a = numpy.ones((X, Y, Z), dtype='float32')
steps.append(timer())
@@ -183,9 +183,10 @@ class TestDataContainer(unittest.TestCase):
#print("a refcount " , sys.getrefcount(a))
ds = DataContainer(a, False, ['X', 'Y', 'Z'])
ds1 = ds.copy()
+ out = ds.copy()
steps.append(timer())
- ds.add(ds1, out=ds)
+ ds.add(ds1, out=out)
steps.append(timer())
t1 = dt(steps)
print("ds.add(ds1, out=ds)", dt(steps))
@@ -196,20 +197,25 @@ class TestDataContainer(unittest.TestCase):
print("ds2 = ds.add(ds1)", dt(steps))
self.assertLess(t1, t2)
- self.assertEqual(ds.as_array()[0][0][0], 2.)
-
+ self.assertEqual(out.as_array()[0][0][0], 2.)
+ self.assertNumpyArrayEqual(out.as_array(), ds2.as_array())
+
ds0 = ds
- ds0.add(2, out=ds0)
steps.append(timer())
- print("ds0.add(2,out=ds0)", dt(steps), 3, ds0.as_array()[0][0][0])
- self.assertEqual(4., ds0.as_array()[0][0][0])
+ ds0.add(2, out=out)
+ steps.append(timer())
+ print("ds0.add(2,out=out)", dt(steps), 3, ds0.as_array()[0][0][0])
+ self.assertEqual(3., out.as_array()[0][0][0])
dt1 = dt(steps)
+ steps.append(timer())
ds3 = ds0.add(2)
steps.append(timer())
print("ds3 = ds0.add(2)", dt(steps), 5, ds3.as_array()[0][0][0])
dt2 = dt(steps)
self.assertLess(dt1, dt2)
+
+ self.assertNumpyArrayEqual(out.as_array(), ds3.as_array())
def binary_subtract(self):
print("Test binary subtract")
@@ -222,16 +228,17 @@ class TestDataContainer(unittest.TestCase):
#print("a refcount " , sys.getrefcount(a))
ds = DataContainer(a, False, ['X', 'Y', 'Z'])
ds1 = ds.copy()
+ out = ds.copy()
steps.append(timer())
- ds.subtract(ds1, out=ds)
+ ds.subtract(ds1, out=out)
steps.append(timer())
t1 = dt(steps)
print("ds.subtract(ds1, out=ds)", dt(steps))
- self.assertEqual(0., ds.as_array()[0][0][0])
+ self.assertEqual(0., out.as_array()[0][0][0])
steps.append(timer())
- ds2 = ds.subtract(ds1)
+ ds2 = out.subtract(ds1)
self.assertEqual(-1., ds2.as_array()[0][0][0])
steps.append(timer())
@@ -247,8 +254,8 @@ class TestDataContainer(unittest.TestCase):
#ds0.__isub__( 2 )
steps.append(timer())
print("ds0.subtract(2,out=ds0)", dt(
- steps), -2., ds0.as_array()[0][0][0])
- self.assertEqual(-2., ds0.as_array()[0][0][0])
+ steps), -1., ds0.as_array()[0][0][0])
+ self.assertEqual(-1., ds0.as_array()[0][0][0])
dt1 = dt(steps)
ds3 = ds0.subtract(2)
@@ -256,8 +263,8 @@ class TestDataContainer(unittest.TestCase):
print("ds3 = ds0.subtract(2)", dt(steps), 0., ds3.as_array()[0][0][0])
dt2 = dt(steps)
self.assertLess(dt1, dt2)
- self.assertEqual(-2., ds0.as_array()[0][0][0])
- self.assertEqual(-4., ds3.as_array()[0][0][0])
+ self.assertEqual(-1., ds0.as_array()[0][0][0])
+ self.assertEqual(-3., ds3.as_array()[0][0][0])
def binary_multiply(self):
print("Test binary multiply")
@@ -300,6 +307,9 @@ class TestDataContainer(unittest.TestCase):
self.assertLess(dt1, dt2)
self.assertEqual(4., ds3.as_array()[0][0][0])
self.assertEqual(2., ds.as_array()[0][0][0])
+
+ ds.multiply(2.5, out=ds0)
+ self.assertEqual(2.5*2., ds0.as_array()[0][0][0])
def binary_divide(self):
print("Test binary divide")
@@ -575,6 +585,32 @@ class TestDataContainer(unittest.TestCase):
norm = dc.norm()
self.assertEqual(sqnorm, 8.0)
numpy.testing.assert_almost_equal(norm, numpy.sqrt(8.0), decimal=7)
+
+ def test_multiply_out(self):
+ print ("test multiply_out")
+ import functools
+ ig = ImageGeometry(10,11,12)
+ u = ig.allocate()
+ a = numpy.ones(u.shape)
+
+ u.fill(a)
+
+ numpy.testing.assert_array_equal(a, u.as_array())
+
+ #u = ig.allocate(ImageGeometry.RANDOM_INT, seed=1)
+ l = functools.reduce(lambda x,y: x*y, (10,11,12), 1)
+
+ a = numpy.zeros((l, ), dtype=numpy.float32)
+ for i in range(l):
+ a[i] = numpy.sin(2 * i* 3.1415/l)
+ b = numpy.reshape(a, u.shape)
+ u.fill(b)
+ numpy.testing.assert_array_equal(b, u.as_array())
+
+ u.multiply(2, out=u)
+ c = b * 2
+ numpy.testing.assert_array_equal(u.as_array(), c)
+
diff --git a/Wrappers/Python/test/test_Gradient.py b/Wrappers/Python/test/test_Gradient.py
index 1d6485c..c6b2d2e 100755
--- a/Wrappers/Python/test/test_Gradient.py
+++ b/Wrappers/Python/test/test_Gradient.py
@@ -1,9 +1,5 @@
import unittest
import numpy
-#from ccpi.plugins.ops import CCPiProjectorSimple
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.optimisation.ops import TomoIdentity
-from ccpi.optimisation.funcs import Norm2sq, Norm1
from ccpi.framework import ImageGeometry, AcquisitionGeometry
from ccpi.framework import ImageData, AcquisitionData
#from ccpi.optimisation.algorithms import GradientDescent
diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py
index 293fb43..5c97545 100644
--- a/Wrappers/Python/test/test_Operator.py
+++ b/Wrappers/Python/test/test_Operator.py
@@ -1,6 +1,6 @@
import unittest
#from ccpi.optimisation.operators import Operator
-from ccpi.optimisation.ops import TomoIdentity
+#from ccpi.optimisation.ops import TomoIdentity
from ccpi.framework import ImageGeometry, ImageData, BlockDataContainer, DataContainer
from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator,\
FiniteDiff
@@ -8,6 +8,7 @@ import numpy
from timeit import default_timer as timer
from ccpi.framework import ImageGeometry
from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff
+from ccpi.optimisation.operators import LinearOperator
def dt(steps):
return steps[-1] - steps[-2]
@@ -53,7 +54,7 @@ class TestOperator(CCPiTestClass):
ig = ImageGeometry(10,20,30)
img = ig.allocate()
scalar = 0.5
- sid = scalar * TomoIdentity(ig)
+ sid = scalar * Identity(ig)
numpy.testing.assert_array_equal(scalar * img.as_array(), sid.direct(img).as_array())
@@ -62,11 +63,12 @@ class TestOperator(CCPiTestClass):
img = ig.allocate()
self.assertTrue(img.shape == (30,20,10))
self.assertEqual(img.sum(), 0)
- Id = TomoIdentity(ig)
+ Id = Identity(ig)
y = Id.direct(img)
numpy.testing.assert_array_equal(y.as_array(), img.as_array())
def test_FiniteDifference(self):
+ print ("test FiniteDifference")
##
N, M = 2, 3
@@ -93,11 +95,46 @@ class TestOperator(CCPiTestClass):
G = Gradient(ig)
u = G.range_geometry().allocate(ImageGeometry.RANDOM_INT)
- res = G.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
+ res = G.domain_geometry().allocate()
G.adjoint(u, out=res)
w = G.adjoint(u)
self.assertNumpyArrayEqual(res.as_array(), w.as_array())
+ u = G.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
+ res = G.range_geometry().allocate()
+ G.direct(u, out=res)
+ w = G.direct(u)
+ self.assertBlockDataContainerEqual(res, w)
+
+ def test_PowerMethod(self):
+
+ N, M = 200, 300
+ niter = 1000
+ ig = ImageGeometry(N, M)
+ Id = Identity(ig)
+
+ G = Gradient(ig)
+
+ uid = Id.domain_geometry().allocate(ImageGeometry.RANDOM_INT, seed=1)
+
+ a = LinearOperator.PowerMethod(Id, niter, uid)
+ b = LinearOperator.PowerMethodNonsquare(Id, niter, uid)
+
+ print ("Edo impl", a[0])
+ print ("old impl", b[0])
+
+ #self.assertAlmostEqual(a[0], b[0])
+ self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=6)
+
+ a = LinearOperator.PowerMethod(G, niter, uid)
+ b = LinearOperator.PowerMethodNonsquare(G, niter, uid)
+
+ print ("Edo impl", a[0])
+ print ("old impl", b[0])
+ self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=2)
+ #self.assertAlmostEqual(a[0], b[0])
+
+
diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py
index a35ffc1..669804e 100755
--- a/Wrappers/Python/test/test_algorithms.py
+++ b/Wrappers/Python/test/test_algorithms.py
@@ -12,7 +12,7 @@ from ccpi.framework import ImageData
from ccpi.framework import AcquisitionData
from ccpi.framework import ImageGeometry
from ccpi.framework import AcquisitionGeometry
-from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.operators import Identity
from ccpi.optimisation.funcs import Norm2sq
from ccpi.optimisation.algorithms import GradientDescent
from ccpi.optimisation.algorithms import CGLS
@@ -26,7 +26,7 @@ class TestAlgorithms(unittest.TestCase):
def setUp(self):
#wget.download('https://github.com/DiamondLightSource/Savu/raw/master/test_data/data/24737_fd.nxs')
#self.filename = '24737_fd.nxs'
- # we use TomoIdentity as the operator and solve the simple least squares
+ # we use Identity as the operator and solve the simple least squares
# problem for a random-valued ImageData or AcquisitionData b?
# Then we know the minimiser is b itself
@@ -48,7 +48,7 @@ class TestAlgorithms(unittest.TestCase):
# fill with random numbers
b.fill(numpy.random.random(x_init.shape))
- identity = TomoIdentity(geometry=ig)
+ identity = Identity(ig)
norm2sq = Norm2sq(identity, b)
@@ -66,7 +66,7 @@ class TestAlgorithms(unittest.TestCase):
# fill with random numbers
b.fill(numpy.random.random(x_init.shape))
- identity = TomoIdentity(geometry=ig)
+ identity = Identity(ig)
alg = CGLS(x_init=x_init, operator=identity, data=b)
alg.max_iteration = 1
@@ -83,7 +83,7 @@ class TestAlgorithms(unittest.TestCase):
x_init = ImageData(geometry=ig)
x_init.fill(numpy.random.random(x_init.shape))
- identity = TomoIdentity(geometry=ig)
+ identity = Identity(ig)
norm2sq = Norm2sq(identity, b)
norm2sq.L = 2 * norm2sq.c * identity.norm()**2
@@ -121,4 +121,4 @@ class TestAlgorithms(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
- \ No newline at end of file
+
diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py
index 05bdd7a..af419c7 100644
--- a/Wrappers/Python/test/test_functions.py
+++ b/Wrappers/Python/test/test_functions.py
@@ -9,7 +9,7 @@ Created on Sat Mar 2 19:24:37 2019
import numpy as np
#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
+from ccpi.optimisation.functions import Function, KullbackLeibler
from ccpi.framework import DataContainer, ImageData, ImageGeometry
from ccpi.optimisation.operators import Identity
from ccpi.optimisation.operators import BlockOperator
@@ -17,15 +17,10 @@ from ccpi.framework import BlockDataContainer
from numbers import Number
from ccpi.optimisation.operators import Gradient
-#from ccpi.optimisation.functions import SimpleL2NormSq
from ccpi.optimisation.functions import L2NormSquared
-#from ccpi.optimisation.functions import SimpleL1Norm
from ccpi.optimisation.functions import L1Norm, MixedL21Norm
-from ccpi.optimisation.funcs import Norm2sq
-# from ccpi.optimisation.functions.L2NormSquared import SimpleL2NormSq, L2NormSq
-# from ccpi.optimisation.functions.L1Norm import SimpleL1Norm, L1Norm
-#from ccpi.optimisation.functions import mixed_L12Norm
+from ccpi.optimisation.functions import Norm2sq
from ccpi.optimisation.functions import ZeroFunction
from ccpi.optimisation.functions import FunctionOperatorComposition
@@ -104,6 +99,7 @@ class TestFunction(unittest.TestCase):
def test_L2NormSquared(self):
# TESTS for L2 and scalar * L2
+ print ("Test L2NormSquared")
M, N, K = 2,3,5
ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K)
@@ -329,7 +325,7 @@ class TestFunction(unittest.TestCase):
a1 = f_no_scaled(U)
a2 = f_scaled(U)
- self.assertNumpyArrayAlmostEqual(a1.as_array(),a2.as_array())
+ self.assertNumpyArrayAlmostEqual(a1,a2)
tmp = [ el**2 for el in U.containers ]
@@ -346,32 +342,26 @@ class TestFunction(unittest.TestCase):
f_no_scaled.proximal_conjugate(U, 1, out=z3)
self.assertBlockDataContainerEqual(z3,z1)
-#
-# f1 = L2NormSq(alpha=1, b=noisy_data)
-# print(f1(noisy_data))
-#
-# f2 = L2NormSq(alpha=5, b=noisy_data).composition_with(op2)
-# print(f2(noisy_data))
-#
-# print(f1.gradient(noisy_data).as_array())
-# print(f2.gradient(noisy_data).as_array())
-##
-# print(f1.proximal(noisy_data,1).as_array())
-# print(f2.proximal(noisy_data,1).as_array())
-#
-#
-# f3 = mixed_L12Norm(alpha = 1).composition_with(op1)
-# print(f3(noisy_data))
-#
-# print(ImageData(op1.direct(noisy_data).power(2).sum(axis=0)).sqrt().sum())
-#
-# print( 5*(op2.direct(d) - noisy_data).power(2).sum(), f2(d))
-#
-# from functions import mixed_L12Norm as mixed_L12Norm_old
-#
-# print(mixed_L12Norm_old(op1,None,alpha)(noisy_data))
-
-
- #
-
+ def test_KullbackLeibler(self):
+ print ("test_KullbackLeibler")
+ N, M = 2,3
+ ig = ImageGeometry(N, M)
+ data = ig.allocate(ImageGeometry.RANDOM_INT)
+ x = ig.allocate(ImageGeometry.RANDOM_INT)
+ bnoise = ig.allocate(ImageGeometry.RANDOM_INT)
+
+ out = ig.allocate()
+
+ f = KullbackLeibler(data, bnoise=bnoise)
+
+ grad = f.gradient(x)
+ f.gradient(x, out=out)
+ numpy.testing.assert_array_equal(grad.as_array(), out.as_array())
+
+ prox = f.proximal(x,1.2)
+ f.proximal(x, 1.2, out=out)
+ numpy.testing.assert_array_equal(prox.as_array(), out.as_array())
+ proxc = f.proximal_conjugate(x,1.2)
+ f.proximal_conjugate(x, 1.2, out=out)
+ numpy.testing.assert_array_equal(proxc.as_array(), out.as_array()) \ No newline at end of file
diff --git a/Wrappers/Python/test/test_run_test.py b/Wrappers/Python/test/test_run_test.py
index c698032..9b4d53b 100755
--- a/Wrappers/Python/test/test_run_test.py
+++ b/Wrappers/Python/test/test_run_test.py
@@ -8,16 +8,15 @@ from ccpi.framework import ImageGeometry
from ccpi.framework import AcquisitionGeometry
from ccpi.optimisation.algorithms import FISTA
#from ccpi.optimisation.algs import FBPD
-from ccpi.optimisation.funcs import Norm2sq
+from ccpi.optimisation.functions import Norm2sq
from ccpi.optimisation.functions import ZeroFunction
from ccpi.optimisation.funcs import Norm1
-from ccpi.optimisation.funcs import TV2D
from ccpi.optimisation.funcs import Norm2
-from ccpi.optimisation.ops import LinearOperatorMatrix
-from ccpi.optimisation.ops import TomoIdentity
-from ccpi.optimisation.ops import Identity
-from ccpi.optimisation.ops import PowerMethodNonsquare
+from ccpi.optimisation.operators import LinearOperatorMatrix
+from ccpi.optimisation.operators import Identity
+#from ccpi.optimisation.ops import PowerMethodNonsquare
+from ccpi.optimisation.operators import LinearOperator
import numpy.testing
@@ -220,7 +219,7 @@ class TestAlgorithms(unittest.TestCase):
# Create object instances with the test data A and b.
f = Norm2sq(A, b, c=0.5, memopt=True)
- f.L = PowerMethodNonsquare(A, 25, x_init)[0]
+ f.L = LinearOperator.PowerMethod(A, 25, x_init)[0]
print ("Lipschitz", f.L)
g0 = ZeroFun()
@@ -289,7 +288,7 @@ class TestAlgorithms(unittest.TestCase):
# Data fidelity term
f_denoise = Norm2sq(I, y, c=0.5, memopt=True)
x_init = ImageData(geometry=ig)
- f_denoise.L = PowerMethodNonsquare(I, 25, x_init)[0]
+ f_denoise.L = LinearOperator.PowerMethod(I, 25, x_init)[0]
# 1-norm regulariser
lam1_denoise = 1.0
@@ -335,43 +334,6 @@ class TestAlgorithms(unittest.TestCase):
self.assertNumpyArrayAlmostEqual(
x_fbpd1_denoise.array.flatten(), x1_denoise.value, 5)
- x1_cvx = x1_denoise.value
- x1_cvx.shape = (N, N)
-
- # Now TV with FBPD
- lam_tv = 0.1
- gtv = TV2D(lam_tv)
- gtv(gtv.op.direct(x_init_denoise))
-
- opt_tv = {'tol': 1e-4, 'iter': 10000}
-
- x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\
- criterfbpdtv_denoise = \
- FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv, opt=opt_tv)
- print(x_fbpdtv_denoise)
- print(criterfbpdtv_denoise[-1])
-
- # Compare to CVXPY
-
- # Construct the problem.
- xtv_denoise = Variable((N, N))
- objectivetv_denoise = Minimize(
- 0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise))
- probtv_denoise = Problem(objectivetv_denoise)
-
- # The optimal objective is returned by prob.solve().
- resulttv_denoise = probtv_denoise.solve(
- verbose=False, solver=SCS, eps=1e-12)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus 1-norm solution and objective value:")
- print(xtv_denoise.value)
- print(objectivetv_denoise.value)
-
- self.assertNumpyArrayAlmostEqual(
- x_fbpdtv_denoise.as_array(), xtv_denoise.value, 1)
-
else:
self.assertTrue(cvx_not_installable)