summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEdoardo Pasca <edo.paskino@gmail.com>2018-09-29 23:52:58 +0100
committerGitHub <noreply@github.com>2018-09-29 23:52:58 +0100
commitd00a41c8c6b95e9a56f6af221ebc2d23d9f8db9d (patch)
treea1de59a693478f0a94f7e1f42476880aaeb0efba
parentf8f294f51aac110049da3eb7c1cb8cbd6a46282f (diff)
downloadframework-d00a41c8c6b95e9a56f6af221ebc2d23d9f8db9d.tar.gz
framework-d00a41c8c6b95e9a56f6af221ebc2d23d9f8db9d.tar.bz2
framework-d00a41c8c6b95e9a56f6af221ebc2d23d9f8db9d.tar.xz
framework-d00a41c8c6b95e9a56f6af221ebc2d23d9f8db9d.zip
Memhandle (#136)
* added inplace algebra correctly * readded object to AcquisitionGeometry and ImageGeometry * Add more memory management * added unary and binary operations to DataContainer * Added unary and binary operations added also unittest * added unittest * added unit tests for binary operators * bugfixes and remove prints * dafault initialisation behaviour not to deep_copy * FISTA to use the efficient memory handling additions * add calls with input out=None Starts to address #134 * add methods with arg out=None * add test for memhandle * major bugfix after removal of create_image_data * fixed FISTA memhandle * some bugfix in FISTA and funcs ops * debug in progress * Fixed several bugs in memory optimised code Added methods allocate_adjoint and allocate_direct to Operator. * test with cvx and adjustments * fixed unittest, need to fix inline algebra * fixed inline algebra definition * initial cvxpy unittest * adds cvx demo as unit test closes #150 check equality of cvx calculated with respect to FISTA and FBDP
-rw-r--r--Wrappers/Python/ccpi/framework.py244
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algs.py78
-rwxr-xr-xWrappers/Python/ccpi/optimisation/funcs.py139
-rwxr-xr-xWrappers/Python/ccpi/optimisation/ops.py114
-rw-r--r--Wrappers/Python/conda-recipe/meta.yaml4
-rwxr-xr-xWrappers/Python/conda-recipe/run_test.py536
-rw-r--r--Wrappers/Python/wip/demo_compare_cvx.py25
-rwxr-xr-xWrappers/Python/wip/demo_memhandle.py193
8 files changed, 1200 insertions, 133 deletions
diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py
index 0c2432f..a34ca37 100644
--- a/Wrappers/Python/ccpi/framework.py
+++ b/Wrappers/Python/ccpi/framework.py
@@ -33,6 +33,15 @@ def find_key(dic, val):
"""return the key of dictionary dic given the value"""
return [k for k, v in dic.items() if v == val][0]
+def message(cls, msg, *args):
+ msg = "{0}: " + msg
+ for i in range(len(args)):
+ msg += " {%d}" %(i+1)
+ args = list(args)
+ args.insert(0, cls.__name__ )
+
+ return msg.format(*args )
+
class ImageGeometry(object):
@@ -211,7 +220,7 @@ class DataContainer(object):
if type(array) == numpy.ndarray:
if deep_copy:
- self.array = array[:]
+ self.array = array.copy()
else:
self.array = array
else:
@@ -335,12 +344,17 @@ class DataContainer(object):
def fill(self, array, **dimension):
'''fills the internal numpy array with the one provided'''
if dimension == {}:
- if numpy.shape(array) != numpy.shape(self.array):
- raise ValueError('Cannot fill with the provided array.' + \
- 'Expecting {0} got {1}'.format(
- numpy.shape(self.array),
- numpy.shape(array)))
- self.array = array[:]
+ if issubclass(type(array), DataContainer) or\
+ issubclass(type(array), numpy.ndarray):
+ if array.shape != self.shape:
+ raise ValueError('Cannot fill with the provided array.' + \
+ 'Expecting {0} got {1}'.format(
+ self.shape,array.shape))
+ if issubclass(type(array), DataContainer):
+ numpy.copyto(self.array, array.array)
+ else:
+ #self.array[:] = array
+ numpy.copyto(self.array, array)
else:
command = 'self.array['
@@ -360,8 +374,10 @@ class DataContainer(object):
def check_dimensions(self, other):
return self.shape == other.shape
-
- def __add__(self, other):
+
+ ## algebra
+
+ def __add__(self, other , *args, out=None, **kwargs):
if issubclass(type(other), DataContainer):
if self.check_dimensions(other):
out = self.as_array() + other.as_array()
@@ -407,7 +423,6 @@ class DataContainer(object):
return self.__div__(other)
def __div__(self, other):
- print ("calling __div__")
if issubclass(type(other), DataContainer):
if self.check_dimensions(other):
out = self.as_array() / other.as_array()
@@ -470,40 +485,6 @@ class DataContainer(object):
type(other)))
# __mul__
-
- #def __abs__(self):
- # operation = FM.OPERATION.ABS
- # return self.callFieldMath(operation, None, self.mask, self.maskOnValue)
- # __abs__
-
- def abs(self):
- out = numpy.abs(self.as_array() )
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
-
- def maximum(self,otherscalar):
- out = numpy.maximum(self.as_array(),otherscalar)
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
-
- def minimum(self,otherscalar):
- out = numpy.minimum(self.as_array(),otherscalar)
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
-
- def sign(self):
- out = numpy.sign(self.as_array() )
- return type(self)(out,
- deep_copy=True,
- dimension_labels=self.dimension_labels,
- geometry=self.geometry)
-
# reverse operand
def __radd__(self, other):
return self + other
@@ -530,7 +511,7 @@ class DataContainer(object):
return type(self)(fother ** self.array ,
dimension_labels=self.dimension_labels,
geometry=self.geometry)
- elif issubclass(other, DataContainer):
+ elif issubclass(type(other), DataContainer):
if self.check_dimensions(other):
return type(self)(other.as_array() ** self.array ,
dimension_labels=self.dimension_labels,
@@ -539,27 +520,59 @@ class DataContainer(object):
raise ValueError('Dimensions do not match')
# __rpow__
- def sum(self):
- return self.as_array().sum()
-
# in-place arithmetic operators:
# (+=, -=, *=, /= , //=,
+ # must return self
+
def __iadd__(self, other):
- return self + other
+ if isinstance(other, (int, float)) :
+ #print ("__iadd__", self.array.shape)
+ numpy.add(self.array, other, out=self.array)
+ elif issubclass(type(other), DataContainer):
+ if self.check_dimensions(other):
+ numpy.add(self.array, other.array, out=self.array)
+ else:
+ raise ValueError('Dimensions do not match')
+ return self
# __iadd__
def __imul__(self, other):
- return self * other
+ if isinstance(other, (int, float)) :
+ #print ("__imul__", self.array.shape)
+ #print ("type(self)", type(self))
+ #print ("self.array", self.array, other)
+ arr = self.as_array()
+ #print ("arr", arr)
+ numpy.multiply(arr, other, out=arr)
+ elif issubclass(type(other), DataContainer):
+ if self.check_dimensions(other):
+ numpy.multiply(self.array, other.array, out=self.array)
+ else:
+ raise ValueError('Dimensions do not match')
+ return self
# __imul__
def __isub__(self, other):
- return self - other
+ if isinstance(other, (int, float)) :
+ numpy.subtract(self.array, other, out=self.array)
+ elif issubclass(type(other), DataContainer):
+ if self.check_dimensions(other):
+ numpy.subtract(self.array, other.array, out=self.array)
+ else:
+ raise ValueError('Dimensions do not match')
+ return self
# __isub__
def __idiv__(self, other):
- print ("call __idiv__")
- return self / other
+ if isinstance(other, (int, float)) :
+ numpy.divide(self.array, other, out=self.array)
+ elif issubclass(type(other), DataContainer):
+ if self.check_dimensions(other):
+ numpy.divide(self.array, other.array, out=self.array)
+ else:
+ raise ValueError('Dimensions do not match')
+ return self
# __idiv__
def __str__ (self, representation=False):
@@ -578,9 +591,6 @@ class DataContainer(object):
dimension_labels=self.dimension_labels,
deep_copy=True,
geometry=self.geometry )
- def copy(self):
- '''alias of clone'''
- return self.clone()
def get_data_axes_order(self,new_order=None):
'''returns the axes label of self as a list
@@ -617,13 +627,127 @@ class DataContainer(object):
.format(len(self.shape),len(new_order)))
-
-
+ def copy(self):
+ '''alias of clone'''
+ return self.clone()
+
+ ## binary operations
+
+ def pixel_wise_binary(self,pwop, x2 , *args, out=None, **kwargs):
+ if out is None:
+ if isinstance(x2, (int, float, complex)):
+ out = pwop(self.as_array() , x2 , *args, **kwargs )
+ elif issubclass(type(x2) , DataContainer):
+ out = pwop(self.as_array() , x2.as_array() , *args, **kwargs )
+ return type(self)(out,
+ deep_copy=False,
+ dimension_labels=self.dimension_labels,
+ geometry=self.geometry)
+
+
+ elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer):
+ if self.check_dimensions(out) and self.check_dimensions(x2):
+ pwop(self.as_array(), x2.as_array(), *args, out=out.as_array(), **kwargs )
+ return type(self)(out.as_array(),
+ deep_copy=False,
+ dimension_labels=self.dimension_labels,
+ geometry=self.geometry)
+ else:
+ raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
+ elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)):
+ if self.check_dimensions(out):
+ pwop(self.as_array(), x2, *args, out=out.as_array(), **kwargs )
+ return type(self)(out.as_array(),
+ deep_copy=False,
+ dimension_labels=self.dimension_labels,
+ geometry=self.geometry)
+ else:
+ raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
+ elif issubclass(type(out), numpy.ndarray):
+ if self.array.shape == out.shape and self.array.dtype == out.dtype:
+ pwop(self.as_array(), x2 , *args, out=out, **kwargs)
+ return type(self)(out,
+ deep_copy=False,
+ dimension_labels=self.dimension_labels,
+ geometry=self.geometry)
+ else:
+ raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out)))
+
+ def add(self, other , *args, out=None, **kwargs):
+ return self.pixel_wise_binary(numpy.add, other, *args, out=out, **kwargs)
+
+ def subtract(self, other , *args, out=None, **kwargs):
+ return self.pixel_wise_binary(numpy.subtract, other, *args, out=out, **kwargs)
+
+ def multiply(self, other , *args, out=None, **kwargs):
+ return self.pixel_wise_binary(numpy.multiply, other, *args, out=out, **kwargs)
+
+ def divide(self, other , *args, out=None, **kwargs):
+ return self.pixel_wise_binary(numpy.divide, other, *args, out=out, **kwargs)
+
+ def power(self, other , *args, out=None, **kwargs):
+ return self.pixel_wise_binary(numpy.power, other, *args, out=out, **kwargs)
+
+ def maximum(self,x2, out=None):
+ return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out)
+
+ ## unary operations
+
+ def pixel_wise_unary(self,pwop, *args, out=None, **kwargs):
+ if out is None:
+ out = pwop(self.as_array() , *args, **kwargs )
+ return type(self)(out,
+ deep_copy=False,
+ dimension_labels=self.dimension_labels,
+ geometry=self.geometry)
+ elif issubclass(type(out), DataContainer):
+ if self.check_dimensions(out):
+ pwop(self.as_array(), *args, out=out.as_array(), **kwargs )
+ return type(self)(out.as_array(),
+ deep_copy=False,
+ dimension_labels=self.dimension_labels,
+ geometry=self.geometry)
+ else:
+ raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape))
+ elif issubclass(type(out), numpy.ndarray):
+ if self.array.shape == out.shape and self.array.dtype == out.dtype:
+ pwop(self.as_array(), *args, out=out, **kwargs)
+ return type(self)(out,
+ deep_copy=False,
+ dimension_labels=self.dimension_labels,
+ geometry=self.geometry)
+ else:
+ raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out)))
+
+ def abs(self, out=None):
+ return self.pixel_wise_unary(numpy.abs, out=out)
+
+ def sign(self, out=None):
+ #out = numpy.sign(self.as_array() )
+ #return type(self)(out,
+ # deep_copy=False,
+ # dimension_labels=self.dimension_labels,
+ # geometry=self.geometry)
+ return self.pixel_wise_unary(numpy.sign , out=out)
+
+ def sqrt(self, out=None):
+ return self.pixel_wise_unary(numpy.sqrt, out=out)
+
+ #def __abs__(self):
+ # operation = FM.OPERATION.ABS
+ # return self.callFieldMath(operation, None, self.mask, self.maskOnValue)
+ # __abs__
+
+ ## reductions
+ def sum(self):
+ return self.as_array().sum()
+
+
class ImageData(DataContainer):
'''DataContainer for holding 2D or 3D DataContainer'''
def __init__(self,
array = None,
- deep_copy=True,
+ deep_copy=False,
dimension_labels=None,
**kwargs):
@@ -1131,4 +1255,4 @@ if __name__ == '__main__':
pixel_num_h=5 , channels=2)
sino = AcquisitionData(geometry=sgeometry)
sino2 = sino.clone()
- \ No newline at end of file
+
diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py
index 570df4b..4a6a383 100755
--- a/Wrappers/Python/ccpi/optimisation/algs.py
+++ b/Wrappers/Python/ccpi/optimisation/algs.py
@@ -43,22 +43,22 @@ def FISTA(x_init, f=None, g=None, opt=None):
# algorithmic parameters
if opt is None:
- opt = {'tol': 1e-4, 'iter': 1000}
- else:
- try:
- max_iter = opt['iter']
- except KeyError as ke:
- opt[ke] = 1000
- try:
- opt['tol'] = 1000
- except KeyError as ke:
- opt[ke] = 1e-4
- tol = opt['tol']
- max_iter = opt['iter']
+ opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False}
+ max_iter = opt['iter'] if 'iter' in opt.keys() else 1000
+ tol = opt['tol'] if 'tol' in opt.keys() else 1e-4
+ memopt = opt['memopt'] if 'memopt' in opt.keys() else False
+
+
# initialization
- x_old = x_init
- y = x_init;
+ if memopt:
+ y = x_init.clone()
+ x_old = x_init.clone()
+ x = x_init.clone()
+ u = x_init.clone()
+ else:
+ x_old = x_init
+ y = x_init;
timing = numpy.zeros(max_iter)
criter = numpy.zeros(max_iter)
@@ -66,22 +66,44 @@ def FISTA(x_init, f=None, g=None, opt=None):
invL = 1/f.L
t_old = 1
+
+ c = f(x_init) + g(x_init)
# algorithm loop
for it in range(0, max_iter):
time0 = time.time()
-
- u = y - invL*f.grad(y)
-
- x = g.prox(u,invL)
-
- t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2)))
-
- y = x + (t_old-1)/t*(x-x_old)
-
- x_old = x
- t_old = t
+ if memopt:
+ # u = y - invL*f.grad(y)
+ # store the result in x_old
+ f.gradient(y, out=u)
+ u.__imul__( -invL )
+ u.__iadd__( y )
+ # x = g.prox(u,invL)
+ g.proximal(u, invL, out=x)
+
+ t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2)))
+
+ # y = x + (t_old-1)/t*(x-x_old)
+ x.subtract(x_old, out=y)
+ y.__imul__ ((t_old-1)/t)
+ y.__iadd__( x )
+
+ x_old.fill(x)
+ t_old = t
+
+
+ else:
+ u = y - invL*f.grad(y)
+
+ x = g.prox(u,invL)
+
+ t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2)))
+
+ y = x + (t_old-1)/t*(x-x_old)
+
+ x_old = x.copy()
+ t_old = t
# time and criterion
timing[it] = time.time() - time0
@@ -93,7 +115,7 @@ def FISTA(x_init, f=None, g=None, opt=None):
#print(it, 'out of', 10, 'iterations', end='\r');
- criter = criter[0:it+1];
+ #criter = criter[0:it+1];
timing = numpy.cumsum(timing[0:it+1]);
return x, it, timing, criter
@@ -127,6 +149,7 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None):
opt[ke] = 1e-4
tol = opt['tol']
max_iter = opt['iter']
+ memopt = opt['memopts'] if 'memopts' in opt.keys() else False
# step-sizes
tau = 2 / (g.L + 2);
@@ -141,6 +164,9 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None):
timing = numpy.zeros(max_iter)
criter = numpy.zeros(max_iter)
+
+
+
# algorithm loop
for it in range(0, max_iter):
diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py
index 4a0a139..490d742 100755
--- a/Wrappers/Python/ccpi/optimisation/funcs.py
+++ b/Wrappers/Python/ccpi/optimisation/funcs.py
@@ -19,16 +19,31 @@
from ccpi.optimisation.ops import Identity, FiniteDiff2D
import numpy
+from ccpi.framework import DataContainer
+def isSizeCorrect(data1 ,data2):
+ if issubclass(type(data1), DataContainer) and \
+ issubclass(type(data2), DataContainer):
+ # check dimensionality
+ if data1.check_dimensions(data2):
+ return True
+ elif issubclass(type(data1) , numpy.ndarray) and \
+ issubclass(type(data2) , numpy.ndarray):
+ return data1.shape == data2.shape
+ else:
+ raise ValueError("{0}: getting two incompatible types: {1} {2}"\
+ .format('Function', type(data1), type(data2)))
+ return False
+
class Function(object):
def __init__(self):
self.op = Identity()
- def __call__(self,x): return 0
- def grad(self, x): return 0
- def prox(self, x, tau): return x
- def gradient(self, x): return self.grad(x)
- def proximal(self, x, tau): return self.prox(x, tau)
+ def __call__(self,x, out=None): return 0
+ def grad(self, x): return 0
+ def prox(self, x, tau): return x
+ def gradient(self, x, out=None): return self.grad(x)
+ def proximal(self, x, tau, out=None): return self.prox(x, tau)
class Norm2(Function):
@@ -39,10 +54,25 @@ class Norm2(Function):
self.gamma = gamma;
self.direction = direction;
- def __call__(self, x):
+ def __call__(self, x, out=None):
- xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction,
+ if out is None:
+ xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction,
keepdims=True))
+ else:
+ if isSizeCorrect(out, x):
+ # check dimensionality
+ if issubclass(type(out), DataContainer):
+ arr = out.as_array()
+ numpy.square(x.as_array(), out=arr)
+ xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True))
+
+ elif issubclass(type(out) , numpy.ndarray):
+ numpy.square(x.as_array(), out=out)
+ xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True))
+ else:
+ raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) )
+
p = numpy.sum(self.gamma*xx)
return p
@@ -55,6 +85,30 @@ class Norm2(Function):
p = x.as_array() * xx
return type(x)(p,geometry=x.geometry)
+ def proximal(self, x, tau, out=None):
+ if out is None:
+ return self.prox(x,tau)
+ else:
+ if isSizeCorrect(out, x):
+ # check dimensionality
+ if issubclass(type(out), DataContainer):
+ numpy.square(x.as_array(), out = out.as_array())
+ xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction,
+ keepdims=True ))
+ xx = numpy.maximum(0, 1 - tau*self.gamma / xx)
+ x.multiply(xx, out= out.as_array())
+
+
+ elif issubclass(type(out) , numpy.ndarray):
+ numpy.square(x.as_array(), out=out)
+ xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True))
+
+ xx = numpy.maximum(0, 1 - tau*self.gamma / xx)
+ x.multiply(xx, out= out)
+ else:
+ raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) )
+
+
class TV2D(Norm2):
@@ -81,10 +135,16 @@ class Norm2sq(Function):
'''
- def __init__(self,A,b,c=1.0):
+ def __init__(self,A,b,c=1.0,memopt=False):
self.A = A # Should be an operator, default identity
self.b = b # Default zero DataSet?
self.c = c # Default 1.
+ self.memopt = memopt
+ if memopt:
+ #self.direct_placehold = A.adjoint(b)
+ self.direct_placehold = A.allocate_direct()
+ self.adjoint_placehold = A.allocate_adjoint()
+
# Compute the Lipschitz parameter from the operator.
# Initialise to None instead and only call when needed.
@@ -93,11 +153,31 @@ class Norm2sq(Function):
def grad(self,x):
#return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b )
- return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b )
+ return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
def __call__(self,x):
#return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel()))
- return self.c*( ( (self.A.direct(x)-self.b)**2).sum() )
+ #if out is None:
+ # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() )
+ #else:
+ y = self.A.direct(x)
+ y.__isub__(self.b)
+ y.__imul__(y)
+ return y.sum() * self.c
+
+ def gradient(self, x, out = None):
+ if self.memopt:
+ #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b )
+
+ self.A.direct(x, out=self.adjoint_placehold)
+ self.adjoint_placehold.__isub__( self.b )
+ self.A.adjoint(self.adjoint_placehold, out=self.direct_placehold)
+ self.direct_placehold.__imul__(2.0 * self.c)
+ # can this be avoided?
+ out.fill(self.direct_placehold)
+ else:
+ return self.grad(x)
+
class ZeroFun(Function):
@@ -111,20 +191,33 @@ class ZeroFun(Function):
return 0
def prox(self,x,tau):
- return x
+ return x.copy()
+
+ def proximal(self, x, tau, out=None):
+ if out is None:
+ return self.prox(x, tau)
+ else:
+ out.fill(x)
# A more interesting example, least squares plus 1-norm minimization.
# Define class to represent 1-norm including prox function
class Norm1(Function):
def __init__(self,gamma):
- # Do nothing
self.gamma = gamma
self.L = 1
+ self.sign_x = None
super(Norm1, self).__init__()
- def __call__(self,x):
- return self.gamma*(x.abs().sum())
+ def __call__(self,x,out=None):
+ if out is None:
+ return self.gamma*(x.abs().sum())
+ else:
+ if not x.shape == out.shape:
+ raise ValueError('Norm1 Incompatible size:',
+ x.shape, out.shape)
+ x.abs(out=out)
+ return out.sum() * self.gamma
def prox(self,x,tau):
return (x.abs() - tau*self.gamma).maximum(0) * x.sign()
@@ -153,3 +246,21 @@ class IndicatorBox(Function):
def prox(self,x,tau=None):
return (x.maximum(self.lower)).minimum(self.upper)
+ def proximal(self, x, tau, out=None):
+ if out is None:
+ return self.prox(x, tau)
+ else:
+ if not x.shape == out.shape:
+ raise ValueError('Norm1 Incompatible size:',
+ x.shape, out.shape)
+ #(x.abs() - tau*self.gamma).maximum(0) * x.sign()
+ x.abs(out = out)
+ out.__isub__(tau*self.gamma)
+ out.maximum(0, out=out)
+ if self.sign_x is None or not x.shape == self.sign_x.shape:
+ self.sign_x = x.sign()
+ else:
+ x.sign(out=self.sign_x)
+
+ out.__imul__( self.sign_x )
+
diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py
index 668b07e..c6775a8 100755
--- a/Wrappers/Python/ccpi/optimisation/ops.py
+++ b/Wrappers/Python/ccpi/optimisation/ops.py
@@ -19,55 +19,106 @@
import numpy
from scipy.sparse.linalg import svds
+from ccpi.framework import DataContainer
+from ccpi.framework import AcquisitionData
+from ccpi.framework import ImageData
+from ccpi.framework import ImageGeometry
+from ccpi.framework import AcquisitionGeometry
# Maybe operators need to know what types they take as inputs/outputs
# to not just use generic DataContainer
class Operator(object):
- def direct(self,x):
+ def direct(self,x, out=None):
return x
- def adjoint(self,x):
+ def adjoint(self,x, out=None):
return x
def size(self):
# To be defined for specific class
raise NotImplementedError
def get_max_sing_val(self):
raise NotImplementedError
+ def allocate_direct(self):
+ raise NotImplementedError
+ def allocate_adjoint(self):
+ raise NotImplementedError
class Identity(Operator):
def __init__(self):
self.s1 = 1.0
super(Identity, self).__init__()
- def direct(self,x):
- return x
+ def direct(self,x,out=None):
+ if out is None:
+ return x.copy()
+ else:
+ out.fill(x)
- def adjoint(self,x):
- return x
+ def adjoint(self,x, out=None):
+ if out is None:
+ return x.copy()
+ else:
+ out.fill(x)
+
+ def size(self):
+ return NotImplemented
+
+ def get_max_sing_val(self):
+ return self.s1
+
+class TomoIdentity(Operator):
+ def __init__(self, geometry, **kwargs):
+ self.s1 = 1.0
+ self.geometry = geometry
+ super(TomoIdentity, self).__init__()
+
+ def direct(self,x,out=None):
+ if out is None:
+ return x.copy()
+ else:
+ out.fill(x)
+
+ def adjoint(self,x, out=None):
+ if out is None:
+ return x.copy()
+ else:
+ out.fill(x)
def size(self):
return NotImplemented
def get_max_sing_val(self):
return self.s1
+ def allocate_direct(self):
+ if issubclass(type(self.geometry), ImageGeometry):
+ return ImageData(geometry=self.geometry)
+ elif issubclass(type(self.geometry), AcquisitionGeometry):
+ return AcquisitionData(geometry=self.geometry)
+ else:
+ raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry))
+ def allocate_adjoint(self):
+ return self.allocate_direct()
+
+
class FiniteDiff2D(Operator):
def __init__(self):
self.s1 = 8.0
super(FiniteDiff2D, self).__init__()
- def direct(self,x):
+ def direct(self,x, out=None):
'''Forward differences with Neumann BC.'''
+ # FIXME this seems to be working only with numpy arrays
d1 = numpy.zeros_like(x.as_array())
d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1]
d2 = numpy.zeros_like(x.as_array())
d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:]
d = numpy.stack((d1,d2),0)
- return type(x)(d,geometry=x.geometry)
+ return type(x)(d,False,geometry=x.geometry)
- def adjoint(self,x):
+ def adjoint(self,x, out=None):
'''Backward differences, Neumann BC.'''
Nrows = x.get_dimension_size('horizontal_x')
Ncols = x.get_dimension_size('horizontal_x')
@@ -76,12 +127,16 @@ class FiniteDiff2D(Operator):
Nchannels = x.get_dimension_size('channel')
zer = numpy.zeros((Nrows,1))
xxx = x.as_array()[0,:,:-1]
- h = numpy.concatenate((zer,xxx), 1) - numpy.concatenate((xxx,zer), 1)
+ #
+ h = numpy.concatenate((zer,xxx), 1)
+ h -= numpy.concatenate((xxx,zer), 1)
zer = numpy.zeros((1,Ncols))
xxx = x.as_array()[1,:-1,:]
- v = numpy.concatenate((zer,xxx), 0) - numpy.concatenate((xxx,zer), 0)
- return type(x)(h + v,geometry=x.geometry)
+ #
+ v = numpy.concatenate((zer,xxx), 0)
+ v -= numpy.concatenate((xxx,zer), 0)
+ return type(x)(h + v, False, geometry=x.geometry)
def size(self):
return NotImplemented
@@ -132,7 +187,7 @@ def PowerMethodNonsquareOld(op,numiters):
def PowerMethodNonsquare(op,numiters):
# Initialise random
# Jakob's
- #inputsize = op.size()[1]
+ inputsize , outputsize = op.size()
#x0 = ImageContainer(numpy.random.randn(*inputsize)
# Edo's
#vg = ImageGeometry(voxel_num_x=inputsize[0],
@@ -143,7 +198,10 @@ def PowerMethodNonsquare(op,numiters):
#print (x0)
#x0.fill(numpy.random.randn(*x0.shape))
- x0 = op.create_image_data()
+
+ #x0 = op.create_image_data()
+ x0 = op.allocate_direct()
+ x0.fill(numpy.random.randn(*x0.shape))
s = numpy.zeros(numiters)
# Loop
@@ -162,11 +220,19 @@ class LinearOperatorMatrix(Operator):
self.s1 = None # Largest singular value, initially unknown
super(LinearOperatorMatrix, self).__init__()
- def direct(self,x):
- return type(x)(numpy.dot(self.A,x.as_array()))
+ def direct(self,x, out=None):
+ if out is None:
+ return type(x)(numpy.dot(self.A,x.as_array()))
+ else:
+ numpy.dot(self.A, x.as_array(), out=out.as_array())
+
- def adjoint(self,x):
- return type(x)(numpy.dot(self.A.transpose(),x.as_array()))
+ def adjoint(self,x, out=None):
+ if out is None:
+ return type(x)(numpy.dot(self.A.transpose(),x.as_array()))
+ else:
+ numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array())
+
def size(self):
return self.A.shape
@@ -178,3 +244,15 @@ class LinearOperatorMatrix(Operator):
return self.s1
else:
return self.s1
+ def allocate_direct(self):
+ '''allocates the memory to hold the result of adjoint'''
+ #numpy.dot(self.A.transpose(),x.as_array())
+ M_A, N_A = self.A.shape
+ out = numpy.zeros((N_A,1))
+ return DataContainer(out)
+ def allocate_adjoint(self):
+ '''allocate the memory to hold the result of direct'''
+ #numpy.dot(self.A.transpose(),x.as_array())
+ M_A, N_A = self.A.shape
+ out = numpy.zeros((M_A,1))
+ return DataContainer(out)
diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml
index 8575b8a..1b3d910 100644
--- a/Wrappers/Python/conda-recipe/meta.yaml
+++ b/Wrappers/Python/conda-recipe/meta.yaml
@@ -20,6 +20,10 @@ requirements:
- numpy
- scipy
- matplotlib
+ run:
+ - python
+ - numpy
+ - cvxpy
about:
home: http://www.ccpi.ac.uk
diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py
index 6a20d05..7df5c07 100755
--- a/Wrappers/Python/conda-recipe/run_test.py
+++ b/Wrappers/Python/conda-recipe/run_test.py
@@ -1,8 +1,33 @@
import unittest
import numpy
-from ccpi.framework import DataContainer, ImageData, AcquisitionData, \
- ImageGeometry, AcquisitionGeometry
+import numpy as np
+from ccpi.framework import DataContainer
+from ccpi.framework import ImageData
+from ccpi.framework import AcquisitionData
+from ccpi.framework import ImageGeometry
+from ccpi.framework import AcquisitionGeometry
import sys
+from timeit import default_timer as timer
+from ccpi.optimisation.algs import FISTA
+from ccpi.optimisation.algs import FBPD
+from ccpi.optimisation.funcs import Norm2sq
+from ccpi.optimisation.funcs import ZeroFun
+from ccpi.optimisation.funcs import Norm1
+from ccpi.optimisation.funcs import TV2D
+
+from ccpi.optimisation.ops import LinearOperatorMatrix
+from ccpi.optimisation.ops import TomoIdentity
+
+from cvxpy import *
+
+
+def aid(x):
+ # This function returns the memory
+ # block address of an array.
+ return x.__array_interface__['data'][0]
+
+def dt (steps):
+ return steps[-1] - steps[-2]
class TestDataContainer(unittest.TestCase):
@@ -21,6 +46,282 @@ class TestDataContainer(unittest.TestCase):
self.assertEqual(sys.getrefcount(a),3)
self.assertEqual(ds.dimension_labels , {0: 'X', 1: 'Y', 2: 'Z', 3: 'W'})
+ def testGb_creation_nocopy(self):
+ X,Y,Z = 512,512,512
+ steps = [timer()]
+ a = numpy.ones((X,Y,Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+ print("test clone")
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y','Z'])
+ #print("a refcount " , sys.getrefcount(a))
+ self.assertEqual(sys.getrefcount(a),3)
+ ds1 = ds.copy()
+ self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array()))
+ ds1 = ds.clone()
+ self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array()))
+
+
+ def testInlineAlgebra(self):
+ print ("Test Inline Algebra")
+ X,Y,Z = 1024,512,512
+ steps = [timer()]
+ a = numpy.ones((X,Y,Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+ print(t0)
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y','Z'])
+ #ds.__iadd__( 2 )
+ ds += 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],3.)
+ #ds.__isub__( 2 )
+ ds -= 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],1.)
+ #ds.__imul__( 2 )
+ ds *= 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],2.)
+ #ds.__idiv__( 2 )
+ ds /= 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],1.)
+
+ ds1 = ds.copy()
+ #ds1.__iadd__( 1 )
+ ds1 += 1
+ #ds.__iadd__( ds1 )
+ ds += ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],3.)
+ #ds.__isub__( ds1 )
+ ds -= ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],1.)
+ #ds.__imul__( ds1 )
+ ds *= ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],2.)
+ #ds.__idiv__( ds1 )
+ ds /= ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],1.)
+
+
+ def test_unary_operations(self):
+ print ("Test unary operations")
+ X,Y,Z = 1024,512,512
+ steps = [timer()]
+ a = -numpy.ones((X,Y,Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+ print(t0)
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y','Z'])
+
+ ds.sign(out=ds)
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],-1.)
+
+ ds.abs(out=ds)
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],1.)
+
+ ds.__imul__( 2 )
+ ds.sqrt(out=ds)
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],numpy.sqrt(2., dtype='float32'))
+
+
+
+ def test_binary_operations(self):
+ self.binary_add()
+ self.binary_subtract()
+ self.binary_multiply()
+ self.binary_divide()
+
+ def binary_add(self):
+ print ("Test binary add")
+ X,Y,Z = 512,512,512
+ steps = [timer()]
+ a = numpy.ones((X,Y,Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y','Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.add(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.add(ds1, out=ds)",dt(steps))
+ steps.append(timer())
+ ds2 = ds.add(ds1)
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.add(ds1)",dt(steps))
+
+ self.assertLess(t1,t2)
+ self.assertEqual(ds.as_array()[0][0][0] , 2.)
+
+ ds0 = ds
+ ds0.add(2,out=ds0)
+ steps.append(timer())
+ print ("ds0.add(2,out=ds0)", dt(steps), 3 , ds0.as_array()[0][0][0])
+ self.assertEqual(4., ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.add(2)
+ steps.append(timer())
+ print ("ds3 = ds0.add(2)", dt(steps), 5 , ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1,dt2)
+
+ def binary_subtract(self):
+ print ("Test binary subtract")
+ X,Y,Z = 512,512,512
+ steps = [timer()]
+ a = numpy.ones((X,Y,Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y','Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.subtract(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.subtract(ds1, out=ds)",dt(steps))
+ self.assertEqual(0., ds.as_array()[0][0][0])
+
+ steps.append(timer())
+ ds2 = ds.subtract(ds1)
+ self.assertEqual(-1., ds2.as_array()[0][0][0])
+
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.subtract(ds1)",dt(steps))
+
+ self.assertLess(t1,t2)
+
+ del ds1
+ ds0 = ds.copy()
+ steps.append(timer())
+ ds0.subtract(2,out=ds0)
+ #ds0.__isub__( 2 )
+ steps.append(timer())
+ print ("ds0.subtract(2,out=ds0)", dt(steps), -2. , ds0.as_array()[0][0][0])
+ self.assertEqual(-2., ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.subtract(2)
+ steps.append(timer())
+ print ("ds3 = ds0.subtract(2)", dt(steps), 0. , ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1,dt2)
+ self.assertEqual(-2., ds0.as_array()[0][0][0])
+ self.assertEqual(-4., ds3.as_array()[0][0][0])
+
+ def binary_multiply(self):
+ print ("Test binary multiply")
+ X,Y,Z = 1024,512,512
+ steps = [timer()]
+ a = numpy.ones((X,Y,Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y','Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.multiply(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.multiply(ds1, out=ds)",dt(steps))
+ steps.append(timer())
+ ds2 = ds.multiply(ds1)
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.multiply(ds1)",dt(steps))
+
+ self.assertLess(t1,t2)
+
+ ds0 = ds
+ ds0.multiply(2,out=ds0)
+ steps.append(timer())
+ print ("ds0.multiply(2,out=ds0)", dt(steps), 2. , ds0.as_array()[0][0][0])
+ self.assertEqual(2., ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.multiply(2)
+ steps.append(timer())
+ print ("ds3 = ds0.multiply(2)", dt(steps), 4. , ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1,dt2)
+ self.assertEqual(4., ds3.as_array()[0][0][0])
+ self.assertEqual(2., ds.as_array()[0][0][0])
+
+ def binary_divide(self):
+ print ("Test binary divide")
+ X,Y,Z = 1024,512,512
+ steps = [timer()]
+ a = numpy.ones((X,Y,Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y','Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.divide(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.divide(ds1, out=ds)",dt(steps))
+ steps.append(timer())
+ ds2 = ds.divide(ds1)
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.divide(ds1)",dt(steps))
+
+ self.assertLess(t1,t2)
+ self.assertEqual(ds.as_array()[0][0][0] , 1.)
+
+ ds0 = ds
+ ds0.divide(2,out=ds0)
+ steps.append(timer())
+ print ("ds0.divide(2,out=ds0)", dt(steps), 0.5 , ds0.as_array()[0][0][0])
+ self.assertEqual(0.5, ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.divide(2)
+ steps.append(timer())
+ print ("ds3 = ds0.divide(2)", dt(steps), 0.25 , ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1,dt2)
+ self.assertEqual(.25, ds3.as_array()[0][0][0])
+ self.assertEqual(.5, ds.as_array()[0][0][0])
+
+
def test_creation_copy(self):
shape = (2,3,4,5)
size = shape[0]
@@ -147,6 +448,235 @@ class TestDataContainer(unittest.TestCase):
res = False
print (err)
self.assertTrue(res)
+
+
+ def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
+ res = True
+ try:
+ numpy.testing.assert_array_almost_equal(first, second, decimal)
+ except AssertionError as err:
+ res = False
+ print (err)
+ self.assertTrue(res)
+
+class TestAlgorithms(unittest.TestCase):
+ def assertNumpyArrayEqual(self, first, second):
+ res = True
+ try:
+ numpy.testing.assert_array_equal(first, second)
+ except AssertionError as err:
+ res = False
+ print (err)
+ self.assertTrue(res)
+
+
+ def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
+ res = True
+ try:
+ numpy.testing.assert_array_almost_equal(first, second, decimal)
+ except AssertionError as err:
+ res = False
+ print (err)
+ self.assertTrue(res)
+ def test_FISTA(self):
+ # Problem data.
+ m = 30
+ n = 20
+ np.random.seed(1)
+ Amat = np.random.randn(m, n)
+ A = LinearOperatorMatrix(Amat)
+ bmat = np.random.randn(m)
+ bmat.shape = (bmat.shape[0],1)
+
+ # A = Identity()
+ # Change n to equal to m.
+
+ b = DataContainer(bmat)
+
+ # Regularization parameter
+ lam = 10
+ opt = {'memopt':True}
+ # Create object instances with the test data A and b.
+ f = Norm2sq(A,b,c=0.5, memopt=True)
+ g0 = ZeroFun()
+
+ # Initial guess
+ x_init = DataContainer(np.zeros((n,1)))
+
+ f.grad(x_init)
+
+ # Run FISTA for least squares plus zero function.
+ x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)
+
+ # Print solution and final objective/criterion value for comparison
+ print("FISTA least squares plus zero function solution and objective value:")
+ print(x_fista0.array)
+ print(criter0[-1])
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x0 = Variable(n)
+ objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) )
+ prob0 = Problem(objective0)
+
+ # The optimal objective is returned by prob.solve().
+ result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus zero function solution and objective value:")
+ print(x0.value)
+ print(objective0.value)
+ self.assertNumpyArrayAlmostEqual(
+ numpy.squeeze(x_fista0.array),x0.value,6)
+ # Create 1-norm object instance
+ g1 = Norm1(lam)
+
+ g1(x_init)
+ g1.prox(x_init,0.02)
+
+ # Combine with least squares and solve using generic FISTA implementation
+ x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt)
+
+ # Print for comparison
+ print("FISTA least squares plus 1-norm solution and objective value:")
+ print(x_fista1)
+ print(criter1[-1])
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x1 = Variable(n)
+ objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
+ prob1 = Problem(objective1)
+
+ # The optimal objective is returned by prob.solve().
+ result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(x1.value)
+ print(objective1.value)
+
+ self.assertNumpyArrayAlmostEqual(
+ numpy.squeeze(x_fista1.array),x1.value,6)
+
+ # Now try another algorithm FBPD for same problem:
+ x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1)
+ print(x_fbpd1)
+ print(criterfbpd1[-1])
+
+ self.assertNumpyArrayAlmostEqual(
+ numpy.squeeze(x_fbpd1.array),x1.value,6)
+ # Plot criterion curve to see both FISTA and FBPD converge to same value.
+ # Note that FISTA is very efficient for 1-norm minimization so it beats
+ # FBPD in this test by a lot. But FBPD can handle a larger class of problems
+ # than FISTA can.
+
+
+ # Now try 1-norm and TV denoising with FBPD, first 1-norm.
+
+ # Set up phantom size NxN by creating ImageGeometry, initialising the
+ # ImageData object with this geometry and empty array and finally put some
+ # data into its array, and display as image.
+ N = 64
+ ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
+ Phantom = ImageData(geometry=ig)
+
+ x = Phantom.as_array()
+ x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
+ x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
+
+
+ # Identity operator for denoising
+ I = TomoIdentity(ig)
+
+ # Data and add noise
+ y = I.direct(Phantom)
+ y.array = y.array + 0.1*np.random.randn(N, N)
+
+
+ # Data fidelity term
+ f_denoise = Norm2sq(I,y,c=0.5,memopt=True)
+
+ # 1-norm regulariser
+ lam1_denoise = 1.0
+ g1_denoise = Norm1(lam1_denoise)
+
+ # Initial guess
+ x_init_denoise = ImageData(np.zeros((N,N)))
+
+ # Combine with least squares and solve using generic FISTA implementation
+ x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
+
+ print(x_fista1_denoise)
+ print(criter1_denoise[-1])
+
+
+ # Now denoise LS + 1-norm with FBPD
+ x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise)
+ print(x_fbpd1_denoise)
+ print(criterfbpd1_denoise[-1])
+
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x1_denoise = Variable(N**2,1)
+ objective1_denoise = Minimize(0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise,1) )
+ prob1_denoise = Problem(objective1_denoise)
+
+ # The optimal objective is returned by prob.solve().
+ result1_denoise = prob1_denoise.solve(verbose=False,solver=SCS,eps=1e-12)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(x1_denoise.value)
+ print(objective1_denoise.value)
+ self.assertNumpyArrayAlmostEqual(
+ x_fista1_denoise.array.flatten(),x1_denoise.value,5)
+
+ self.assertNumpyArrayAlmostEqual(
+ x_fbpd1_denoise.array.flatten(),x1_denoise.value,5)
+ x1_cvx = x1_denoise.value
+ x1_cvx.shape = (N,N)
+
+
+ # Now TV with FBPD
+ lam_tv = 0.1
+ gtv = TV2D(lam_tv)
+ gtv(gtv.op.direct(x_init_denoise))
+
+ opt_tv = {'tol': 1e-4, 'iter': 10000}
+
+ x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv)
+ print(x_fbpdtv_denoise)
+ print(criterfbpdtv_denoise[-1])
+
+
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ xtv_denoise = Variable((N,N))
+ objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) )
+ probtv_denoise = Problem(objectivetv_denoise)
+
+ # The optimal objective is returned by prob.solve().
+ resulttv_denoise = probtv_denoise.solve(verbose=False,solver=SCS,eps=1e-12)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(xtv_denoise.value)
+ print(objectivetv_denoise.value)
+
+ self.assertNumpyArrayAlmostEqual(
+ x_fbpdtv_denoise.as_array(),xtv_denoise.value,1)
+
# =============================================================================
# def test_upper(self):
# self.assertEqual('foo'.upper(), 'FOO')
@@ -164,4 +694,4 @@ class TestDataContainer(unittest.TestCase):
# =============================================================================
if __name__ == '__main__':
- unittest.main() \ No newline at end of file
+ unittest.main()
diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py
index cbfe50e..2df11c1 100644
--- a/Wrappers/Python/wip/demo_compare_cvx.py
+++ b/Wrappers/Python/wip/demo_compare_cvx.py
@@ -3,7 +3,7 @@ from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataCo
from ccpi.optimisation.algs import FISTA, FBPD, CGLS
from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D
-from ccpi.optimisation.ops import LinearOperatorMatrix, Identity
+from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity
# Requires CVXPY, see http://www.cvxpy.org/
# CVXPY can be installed in anaconda using
@@ -33,9 +33,9 @@ b = DataContainer(bmat)
# Regularization parameter
lam = 10
-
+opt = {'memopt':True}
# Create object instances with the test data A and b.
-f = Norm2sq(A,b,c=0.5)
+f = Norm2sq(A,b,c=0.5, memopt=True)
g0 = ZeroFun()
# Initial guess
@@ -44,7 +44,7 @@ x_init = DataContainer(np.zeros((n,1)))
f.grad(x_init)
# Run FISTA for least squares plus zero function.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0)
+x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)
# Print solution and final objective/criterion value for comparison
print("FISTA least squares plus zero function solution and objective value:")
@@ -56,7 +56,7 @@ if use_cvxpy:
# Construct the problem.
x0 = Variable(n)
- objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat) )
+ objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) )
prob0 = Problem(objective0)
# The optimal objective is returned by prob.solve().
@@ -83,7 +83,7 @@ g1(x_init)
g1.prox(x_init,0.02)
# Combine with least squares and solve using generic FISTA implementation
-x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1)
+x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt)
# Print for comparison
print("FISTA least squares plus 1-norm solution and objective value:")
@@ -95,7 +95,7 @@ if use_cvxpy:
# Construct the problem.
x1 = Variable(n)
- objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat) + lam*norm(x1,1) )
+ objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
prob1 = Problem(objective1)
# The optimal objective is returned by prob.solve().
@@ -147,7 +147,7 @@ plt.title('Phantom image')
plt.show()
# Identity operator for denoising
-I = Identity()
+I = TomoIdentity(ig)
# Data and add noise
y = I.direct(Phantom)
@@ -158,7 +158,7 @@ plt.title('Noisy image')
plt.show()
# Data fidelity term
-f_denoise = Norm2sq(I,y,c=0.5)
+f_denoise = Norm2sq(I,y,c=0.5,memopt=True)
# 1-norm regulariser
lam1_denoise = 1.0
@@ -168,7 +168,7 @@ g1_denoise = Norm1(lam1_denoise)
x_init_denoise = ImageData(np.zeros((N,N)))
# Combine with least squares and solve using generic FISTA implementation
-x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise)
+x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
print(x_fista1_denoise)
print(criter1_denoise[-1])
@@ -229,7 +229,8 @@ if use_cvxpy:
# Compare to CVXPY
# Construct the problem.
- xtv_denoise = Variable(N,N)
+ xtv_denoise = Variable((N,N))
+ print (xtv_denoise.shape)
objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) )
probtv_denoise = Problem(objectivetv_denoise)
@@ -247,4 +248,4 @@ plt.title('CVX TV')
plt.show()
plt.loglog([0,opt_tv['iter']], [objectivetv_denoise.value,objectivetv_denoise.value], label='CVX TV')
-plt.loglog(criterfbpdtv_denoise, label='FBPD TV') \ No newline at end of file
+plt.loglog(criterfbpdtv_denoise, label='FBPD TV')
diff --git a/Wrappers/Python/wip/demo_memhandle.py b/Wrappers/Python/wip/demo_memhandle.py
new file mode 100755
index 0000000..db48d73
--- /dev/null
+++ b/Wrappers/Python/wip/demo_memhandle.py
@@ -0,0 +1,193 @@
+
+from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
+from ccpi.optimisation.algs import FISTA, FBPD, CGLS
+from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D
+
+from ccpi.optimisation.ops import LinearOperatorMatrix, Identity
+from ccpi.optimisation.ops import TomoIdentity
+
+# Requires CVXPY, see http://www.cvxpy.org/
+# CVXPY can be installed in anaconda using
+# conda install -c cvxgrp cvxpy libgcc
+
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+# Problem data.
+m = 30
+n = 20
+np.random.seed(1)
+Amat = np.random.randn(m, n)
+A = LinearOperatorMatrix(Amat)
+bmat = np.random.randn(m)
+bmat.shape = (bmat.shape[0],1)
+
+
+
+# A = Identity()
+# Change n to equal to m.
+
+b = DataContainer(bmat)
+
+# Regularization parameter
+lam = 10
+
+# Create object instances with the test data A and b.
+f = Norm2sq(A,b,c=0.5, memopt=True)
+g0 = ZeroFun()
+
+# Initial guess
+x_init = DataContainer(np.zeros((n,1)))
+
+f.grad(x_init)
+opt = {'memopt': True}
+# Run FISTA for least squares plus zero function.
+x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0)
+x_fista0_m, it0_m, timing0_m, criter0_m = FISTA(x_init, f, g0, opt=opt)
+
+iternum = [i for i in range(len(criter0))]
+# Print solution and final objective/criterion value for comparison
+print("FISTA least squares plus zero function solution and objective value:")
+print(x_fista0.array)
+print(criter0[-1])
+
+
+# Plot criterion curve to see FISTA converge to same value as CVX.
+#iternum = np.arange(1,1001)
+plt.figure()
+plt.loglog(iternum,criter0,label='FISTA LS')
+plt.loglog(iternum,criter0_m,label='FISTA LS memopt')
+plt.legend()
+plt.show()
+#%%
+# Create 1-norm object instance
+g1 = Norm1(lam)
+
+g1(x_init)
+g1.prox(x_init,0.02)
+
+# Combine with least squares and solve using generic FISTA implementation
+x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1)
+x_fista1_m, it1_m, timing1_m, criter1_m = FISTA(x_init, f, g1, opt=opt)
+iternum = [i for i in range(len(criter1))]
+# Print for comparison
+print("FISTA least squares plus 1-norm solution and objective value:")
+print(x_fista1)
+print(criter1[-1])
+
+
+# Now try another algorithm FBPD for same problem:
+x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1)
+iternum = [i for i in range(len(criterfbpd1))]
+print(x_fbpd1)
+print(criterfbpd1[-1])
+
+# Plot criterion curve to see both FISTA and FBPD converge to same value.
+# Note that FISTA is very efficient for 1-norm minimization so it beats
+# FBPD in this test by a lot. But FBPD can handle a larger class of problems
+# than FISTA can.
+plt.figure()
+plt.loglog(iternum,criter1,label='FISTA LS+1')
+plt.loglog(iternum,criter1_m,label='FISTA LS+1 memopt')
+plt.legend()
+plt.show()
+
+plt.figure()
+plt.loglog(iternum,criter1,label='FISTA LS+1')
+plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
+plt.legend()
+plt.show()
+#%%
+# Now try 1-norm and TV denoising with FBPD, first 1-norm.
+
+# Set up phantom size NxN by creating ImageGeometry, initialising the
+# ImageData object with this geometry and empty array and finally put some
+# data into its array, and display as image.
+N = 1000
+ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
+Phantom = ImageData(geometry=ig)
+
+x = Phantom.as_array()
+x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
+x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
+
+plt.imshow(x)
+plt.title('Phantom image')
+plt.show()
+
+# Identity operator for denoising
+I = TomoIdentity(ig)
+
+# Data and add noise
+y = I.direct(Phantom)
+y.array += 0.1*np.random.randn(N, N)
+
+plt.figure()
+plt.imshow(y.array)
+plt.title('Noisy image')
+plt.show()
+
+# Data fidelity term
+f_denoise = Norm2sq(I,y,c=0.5, memopt=True)
+
+# 1-norm regulariser
+lam1_denoise = 1.0
+g1_denoise = Norm1(lam1_denoise)
+
+# Initial guess
+x_init_denoise = ImageData(np.zeros((N,N)))
+opt = {'memopt': False, 'iter' : 50}
+# Combine with least squares and solve using generic FISTA implementation
+print ("no memopt")
+x_fista1_denoise, it1_denoise, timing1_denoise, \
+ criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
+opt = {'memopt': True, 'iter' : 50}
+print ("yes memopt")
+x_fista1_denoise_m, it1_denoise_m, timing1_denoise_m, \
+ criter1_denoise_m = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
+
+print(x_fista1_denoise)
+print(criter1_denoise[-1])
+
+plt.figure()
+plt.imshow(x_fista1_denoise.as_array())
+plt.title('FISTA LS+1')
+plt.show()
+
+plt.figure()
+plt.imshow(x_fista1_denoise_m.as_array())
+plt.title('FISTA LS+1 memopt')
+plt.show()
+
+plt.figure()
+plt.loglog(iternum,criter1_denoise,label='FISTA LS+1')
+plt.loglog(iternum,criter1_denoise_m,label='FISTA LS+1 memopt')
+plt.legend()
+plt.show()
+#%%
+# Now denoise LS + 1-norm with FBPD
+x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise)
+print(x_fbpd1_denoise)
+print(criterfbpd1_denoise[-1])
+
+plt.figure()
+plt.imshow(x_fbpd1_denoise.as_array())
+plt.title('FBPD LS+1')
+plt.show()
+
+
+# Now TV with FBPD
+lam_tv = 0.1
+gtv = TV2D(lam_tv)
+gtv(gtv.op.direct(x_init_denoise))
+
+opt_tv = {'tol': 1e-4, 'iter': 10000}
+
+x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv)
+print(x_fbpdtv_denoise)
+print(criterfbpdtv_denoise[-1])
+
+plt.imshow(x_fbpdtv_denoise.as_array())
+plt.title('FBPD TV')
+plt.show()