summaryrefslogtreecommitdiffstats
path: root/Wrappers
diff options
context:
space:
mode:
Diffstat (limited to 'Wrappers')
-rwxr-xr-xWrappers/Python/ccpi/framework/framework.py86
-rw-r--r--[-rwxr-xr-x]Wrappers/Python/ccpi/optimisation/functions/Function.py131
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py4
-rwxr-xr-xWrappers/Python/ccpi/optimisation/functions/ScaledFunction.py13
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/__init__.py1
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/BlockOperator.py30
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py34
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py12
8 files changed, 201 insertions, 110 deletions
diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py
index 499a2cd..0c43737 100755
--- a/Wrappers/Python/ccpi/framework/framework.py
+++ b/Wrappers/Python/ccpi/framework/framework.py
@@ -27,6 +27,7 @@ import sys
from datetime import timedelta, datetime
import warnings
from functools import reduce
+from numbers import Number
def find_key(dic, val):
"""return the key of dictionary dic given the value"""
@@ -54,8 +55,7 @@ class ImageGeometry(object):
center_x=0,
center_y=0,
center_z=0,
- channels=1,
- dimension_labels=None):
+ channels=1):
self.voxel_num_x = voxel_num_x
self.voxel_num_y = voxel_num_y
@@ -67,7 +67,28 @@ class ImageGeometry(object):
self.center_y = center_y
self.center_z = center_z
self.channels = channels
- self.dimension_labels = dimension_labels
+
+ # this is some code repetition
+ if self.channels > 1:
+ if self.voxel_num_z>1:
+ self.length = 4
+ self.shape = (self.channels, self.voxel_num_z, self.voxel_num_y, self.voxel_num_x)
+ dim_labels = ['channel' ,'vertical' , 'horizontal_y' , 'horizontal_x']
+ else:
+ self.length = 3
+ self.shape = (self.channels, self.voxel_num_y, self.voxel_num_x)
+ dim_labels = ['channel' , 'horizontal_y' , 'horizontal_x']
+ else:
+ if self.voxel_num_z>1:
+ self.length = 3
+ self.shape = (self.voxel_num_z, self.voxel_num_y, self.voxel_num_x)
+ dim_labels = ['vertical', 'horizontal_y' , 'horizontal_x']
+ else:
+ self.length = 2
+ self.shape = (self.voxel_num_y, self.voxel_num_x)
+ dim_labels = ['horizontal_y' , 'horizontal_x']
+
+ self.dimension_labels = dim_labels
def get_min_x(self):
return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x
@@ -115,11 +136,18 @@ class ImageGeometry(object):
return repres
def allocate(self, value=0, dimension_labels=None):
'''allocates an ImageData according to the size expressed in the instance'''
- if dimension_labels is None:
- dimension_labels = self.dimension_labels
- out = ImageData(geometry=self, dimension_labels=dimension_labels)
- if value != 0:
- out += value
+ out = ImageData(geometry=self)
+ if isinstance(value, Number):
+ if value != 0:
+ out += value
+ else:
+ if value == 'random':
+ out.fill(numpy.random.random_sample(self.shape))
+ elif value == 'random_int':
+ out.fill(numpy.random.randint(1, 10 + 1,size=self.shape))
+ if dimension_labels is not None:
+ if dimension_labels != self.dimension_labels:
+ return out.subset(dimensions=dimension_labels)
return out
class AcquisitionGeometry(object):
@@ -135,7 +163,6 @@ class AcquisitionGeometry(object):
dist_center_detector=None,
channels=1,
angle_unit='degree',
- dimension_labels=None
):
"""
General inputs for standard type projection geometries
@@ -166,6 +193,7 @@ class AcquisitionGeometry(object):
self.geom_type = geom_type # 'parallel' or 'cone'
self.dimension = dimension # 2D or 3D
self.angles = angles
+ num_of_angles = len (angles)
self.dist_source_center = dist_source_center
self.dist_center_detector = dist_center_detector
@@ -176,7 +204,24 @@ class AcquisitionGeometry(object):
self.pixel_size_v = pixel_size_v
self.channels = channels
- self.dimension_labels = dimension_labels
+
+ if channels > 1:
+ if pixel_num_v > 1:
+ shape = (channels, num_of_angles , pixel_num_v, pixel_num_h)
+ dim_labels = ['channel' , 'angle' , 'vertical' , 'horizontal']
+ else:
+ shape = (channels , num_of_angles, pixel_num_h)
+ dim_labels = ['channel' , 'angle' , 'horizontal']
+ else:
+ if pixel_num_v > 1:
+ shape = (num_of_angles, pixel_num_v, pixel_num_h)
+ dim_labels = ['angle' , 'vertical' , 'horizontal']
+ else:
+ shape = (num_of_angles, pixel_num_h)
+ dim_labels = ['angle' , 'horizontal']
+ self.shape = shape
+
+ self.dimension_labels = dim_labels
def clone(self):
'''returns a copy of the AcquisitionGeometry'''
@@ -204,11 +249,18 @@ class AcquisitionGeometry(object):
return repres
def allocate(self, value=0, dimension_labels=None):
'''allocates an AcquisitionData according to the size expressed in the instance'''
- if dimension_labels is None:
- dimension_labels = self.dimension_labels
- out = AcquisitionData(geometry=self, dimension_labels=dimension_labels)
- if value != 0:
- out += value
+ out = AcquisitionData(geometry=self)
+ if isinstance(value, Number):
+ if value != 0:
+ out += value
+ else:
+ if value == 'random':
+ out.fill(numpy.random.random_sample(self.shape))
+ elif value == 'random_int':
+ out.fill(numpy.random.out.fill(numpy.random.randint(1, 10 + 1,size=self.shape)))
+ if dimension_labels is not None:
+ if dimension_labels != self.dimension_labels:
+ return out.subset(dimensions=dimension_labels)
return out
class DataContainer(object):
'''Generic class to hold data
@@ -907,7 +959,7 @@ class AcquisitionData(DataContainer):
if channels > 1:
if vert > 1:
shape = (channels, num_of_angles , vert, horiz)
- dim_labels = ['channel' , ' angle' ,
+ dim_labels = ['channel' , 'angle' ,
'vertical' , 'horizontal']
else:
shape = (channels , num_of_angles, horiz)
@@ -936,7 +988,7 @@ class AcquisitionData(DataContainer):
elif dim == 'horizontal':
shape.append(horiz)
if len(shape) != len(dimension_labels):
- raise ValueError('Missing {0} axes.\nExpected{1} got {2}}'\
+ raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\
.format(
len(dimension_labels) - len(shape),
dimension_labels, shape)
diff --git a/Wrappers/Python/ccpi/optimisation/functions/Function.py b/Wrappers/Python/ccpi/optimisation/functions/Function.py
index fa81dfc..82f24a6 100755..100644
--- a/Wrappers/Python/ccpi/optimisation/functions/Function.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/Function.py
@@ -1,62 +1,69 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import warnings
-
-class Function(object):
- '''Abstract class representing a function
-
- Members:
- L is the Lipschitz constant of the gradient of the Function
- '''
- def __init__(self):
- self.L = None
-
- def __call__(self,x, out=None):
- '''Evaluates the function at x '''
- raise NotImplementedError
-
- def gradient(self, x, out=None):
- '''Returns the gradient of the function at x, if the function is differentiable'''
- raise NotImplementedError
-
- def proximal(self, x, tau, out=None):
- '''This returns the proximal operator for the function at x, tau'''
- raise NotImplementedError
-
- def convex_conjugate(self, x, out=None):
- '''This evaluates the convex conjugate of the function at x'''
- raise NotImplementedError
-
- def proximal_conjugate(self, x, tau, out = None):
- '''This returns the proximal operator for the convex conjugate of the function at x, tau'''
- raise NotImplementedError
-
- def grad(self, x):
- '''Alias of gradient(x,None)'''
- warnings.warn('''This method will disappear in following
- versions of the CIL. Use gradient instead''', DeprecationWarning)
- return self.gradient(x, out=None)
-
- def prox(self, x, tau):
- '''Alias of proximal(x, tau, None)'''
- warnings.warn('''This method will disappear in following
- versions of the CIL. Use proximal instead''', DeprecationWarning)
- return self.proximal(x, out=None)
-
+# -*- coding: utf-8 -*-
+# This work is part of the Core Imaging Library developed by
+# Visual Analytics and Imaging System Group of the Science Technology
+# Facilities Council, STFC
+
+# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
+
+class Function(object):
+ '''Abstract class representing a function
+
+ Members:
+ L is the Lipschitz constant of the gradient of the Function
+ '''
+ def __init__(self):
+ self.L = None
+
+ def __call__(self,x, out=None):
+ '''Evaluates the function at x '''
+ raise NotImplementedError
+
+ def gradient(self, x, out=None):
+ '''Returns the gradient of the function at x, if the function is differentiable'''
+ raise NotImplementedError
+
+ def proximal(self, x, tau, out=None):
+ '''This returns the proximal operator for the function at x, tau'''
+ raise NotImplementedError
+
+ def convex_conjugate(self, x, out=None):
+ '''This evaluates the convex conjugate of the function at x'''
+ raise NotImplementedError
+
+ def proximal_conjugate(self, x, tau, out = None):
+ '''This returns the proximal operator for the convex conjugate of the function at x, tau'''
+ raise NotImplementedError
+
+ def grad(self, x):
+ '''Alias of gradient(x,None)'''
+ warnings.warn('''This method will disappear in following
+ versions of the CIL. Use gradient instead''', DeprecationWarning)
+ return self.gradient(x, out=None)
+
+ def prox(self, x, tau):
+ '''Alias of proximal(x, tau, None)'''
+ warnings.warn('''This method will disappear in following
+ versions of the CIL. Use proximal instead''', DeprecationWarning)
+ return self.proximal(x, out=None)
+
+ def __rmul__(self, scalar):
+ '''Defines the multiplication by a scalar on the left
+
+ returns a ScaledFunction'''
+ return ScaledFunction(self, scalar)
+
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
index 9267565..1baf365 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
@@ -23,7 +23,7 @@ class SimpleL2NormSq(Function):
self.L = 2
def __call__(self, x):
- return self.alpha * x.power(2).sum()
+ return x.power(2).sum()
def gradient(self,x, out=None):
if out is None:
@@ -61,7 +61,7 @@ class L2NormSq(SimpleL2NormSq):
else:
return SimpleL2NormSq.__call__(self, x - self.b)
- def gradient(self, x):
+ def gradient(self, x, out=None):
if self.b is None:
return 2 * x
else:
diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py
index 7e2f20a..8a52566 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py
@@ -28,13 +28,14 @@ class ScaledFunction(object):
'''Evaluates the function at x '''
return self.scalar * self.function(x)
- def convex_conjugate(self, x, out=None):
+ def convex_conjugate(self, x):
'''returns the convex_conjugate of the scaled function '''
- if out is None:
- return self.scalar * self.function.convex_conjugate(x/self.scalar, out=out)
- else:
- out.fill(self.function.convex_conjugate(x/self.scalar))
- out *= self.scalar
+ # if out is None:
+ # return self.scalar * self.function.convex_conjugate(x/self.scalar)
+ # else:
+ # out.fill(self.function.convex_conjugate(x/self.scalar))
+ # out *= self.scalar
+ return self.scalar * self.function.convex_conjugate(x/self.scalar)
def proximal_conjugate(self, x, tau, out = None):
'''This returns the proximal operator for the function at x, tau
diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
index f775e52..9030454 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
@@ -6,4 +6,5 @@ from .L1Norm import SimpleL1Norm, L1Norm
from .L2NormSquared import L2NormSq, SimpleL2NormSq
from .mixed_L12Norm import mixed_L12Norm
from .BlockFunction import BlockFunction
+from .ScaledFunction import ScaledFunction
from .FunctionOperatorComposition import FunctionOperatorComposition
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
index 21ea104..4ff38c6 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -11,7 +11,7 @@ import functools
from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer
from ccpi.optimisation.operators import Operator, LinearOperator
from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator
-
+from ccpi.framework import BlockGeometry
class BlockOperator(Operator):
'''Class to hold a block operator
@@ -71,14 +71,23 @@ class BlockOperator(Operator):
return numpy.asarray(b)
def direct(self, x, out=None):
- shape = self.get_output_shape(x.shape)
+ '''Direct operation for the BlockOperator
+
+ BlockOperator work on BlockDataContainer, but they will work on DataContainers
+ and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1)
+ '''
+ if not isinstance (x, BlockDataContainer):
+ x_b = BlockDataContainer(x)
+ else:
+ x_b = x
+ shape = self.get_output_shape(x_b.shape)
res = []
for row in range(self.shape[0]):
for col in range(self.shape[1]):
if col == 0:
- prod = self.get_item(row,col).direct(x.get_item(col))
+ prod = self.get_item(row,col).direct(x_b.get_item(col))
else:
- prod += self.get_item(row,col).direct(x.get_item(col))
+ prod += self.get_item(row,col).direct(x_b.get_item(col))
res.append(prod)
return BlockDataContainer(*res, shape=shape)
@@ -89,18 +98,25 @@ class BlockOperator(Operator):
This method exists in BlockOperator as it is not known what type of
Operator it will contain.
+ BlockOperator work on BlockDataContainer, but they will work on DataContainers
+ and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1)
+
Raises: ValueError if the contained Operators are not linear
'''
if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True):
raise ValueError('Not all operators in Block are linear.')
- shape = self.get_output_shape(x.shape, adjoint=True)
+ if not isinstance (x, BlockDataContainer):
+ x_b = BlockDataContainer(x)
+ else:
+ x_b = x
+ shape = self.get_output_shape(x_b.shape, adjoint=True)
res = []
for row in range(self.shape[1]):
for col in range(self.shape[0]):
if col == 0:
- prod = self.get_item(row, col).adjoint(x.get_item(col))
+ prod = self.get_item(row, col).adjoint(x_b.get_item(col))
else:
- prod += self.get_item(row, col).adjoint(x.get_item(col))
+ prod += self.get_item(row, col).adjoint(x_b.get_item(col))
res.append(prod)
return BlockDataContainer(*res, shape=shape)
diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
index 999975c..24c4e4b 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
@@ -32,16 +32,21 @@ class FiniteDiff(Operator):
self.direction = direction
self.bnd_cond = bnd_cond
+ # Domain Geometry = Range Geometry if not stated
if self.gm_range is None:
self.gm_range = self.gm_domain
-
- if self.direction + 1 > len(gm_domain):
+ # check direction and "length" of geometry
+ if self.direction + 1 > len(self.gm_domain.shape):
raise ValueError('Gradient directions more than geometry domain')
-
+
+ #self.voxel_size = kwargs.get('voxel_size',1)
+ # this wrongly assumes a homogeneous voxel size
+ self.voxel_size = self.gm_domain.voxel_size_x
+
+
def direct(self, x, out=None):
-
-# x_asarr = x.as_array()
- x_asarr = x
+
+ x_asarr = x.as_array()
x_sz = len(x.shape)
if out is None:
@@ -157,13 +162,13 @@ class FiniteDiff(Operator):
else:
raise NotImplementedError
- res = out
- return res
+ res = out/self.voxel_size
+ return type(x)(res)
def adjoint(self, x, out=None):
-# x_asarr = x.as_array()
- x_asarr = x
+ x_asarr = x.as_array()
+ #x_asarr = x
x_sz = len(x.shape)
if out is None:
@@ -294,19 +299,20 @@ class FiniteDiff(Operator):
else:
raise NotImplementedError
- res = out
- return res
+ res = out/self.voxel_size
+ return type(x)(-res)
def range_geometry(self):
+ '''Returns the range geometry'''
return self.gm_range
def domain_geometry(self):
- '''currently is a tuple'''
+ '''Returns the domain geometry'''
return self.gm_domain
def norm(self):
x0 = self.gm_domain.allocate()
- x0 = np.random.random_sample(x0.shape)
+ x0.fill( np.random.random_sample(x0.shape) )
self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0)
return self.s1
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index 2eb77ce..d0d0f43 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -27,6 +27,7 @@ class Gradient(Operator):
if self.gm_range is None:
+ #FIXME this should be a BlockGeometry
self.gm_range = ((len(self.gm_domain),)+self.gm_domain)
# Kwargs Default options
@@ -39,9 +40,16 @@ class Gradient(Operator):
def direct(self, x, out=None):
- tmp = np.zeros(self.gm_range)
+ #tmp = np.zeros(self.gm_range)
+ tmp = self.gm_range.allocate()
for i in range(len(self.gm_domain)):
- tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i]
+ #tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i]
+ if self.correlation == 'Space':
+ if i == 0 :
+ i+=1
+ tmp[i].fill(
+ FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i]
+ )
# return type(x)(tmp)
return type(x)(tmp)