From c6312710a26e12875af7961f047ef203a541dc68 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 31 Jan 2018 13:13:59 +0000 Subject: Working chainable DataSetProcessors --- Wrappers/Python/ccpi/common.py | 403 ----------------------------------- Wrappers/Python/ccpi/framework.py | 69 +++++- Wrappers/Python/test/regularizers.py | 178 ++++++++++++++-- 3 files changed, 227 insertions(+), 423 deletions(-) delete mode 100644 Wrappers/Python/ccpi/common.py (limited to 'Wrappers/Python') diff --git a/Wrappers/Python/ccpi/common.py b/Wrappers/Python/ccpi/common.py deleted file mode 100644 index e2816db..0000000 --- a/Wrappers/Python/ccpi/common.py +++ /dev/null @@ -1,403 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import abc -import numpy -import os -import sys -import time -import vtk - -if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: - ABC = abc.ABC -else: - ABC = abc.ABCMeta('ABC', (), {}) - -def find_key(dic, val): - """return the key of dictionary dic given the value""" - return [k for k, v in dic.items() if v == val][0] - -class CCPiBaseClass(ABC): - def __init__(self, **kwargs): - self.acceptedInputKeywords = [] - self.pars = {} - self.debug = True - # add keyworded arguments as accepted input keywords and add to the - # parameters - for key, value in kwargs.items(): - self.acceptedInputKeywords.append(key) - #print ("key {0}".format(key)) - #self.setParameter(key.__name__=value) - self.setParameter(**{key:value}) - - def setParameter(self, **kwargs): - '''set named parameter for the reconstructor engine - - raises Exception if the named parameter is not recognized - - ''' - for key , value in kwargs.items(): - if key in self.acceptedInputKeywords: - self.pars[key] = value - else: - raise KeyError('Wrong parameter "{0}" for {1}'.format(key, - self.__class__.__name__ )) - # setParameter - - def getParameter(self, key): - if type(key) is str: - if key in self.acceptedInputKeywords: - return self.pars[key] - else: - raise KeyError('Unrecongnised parameter: {0} '.format(key) ) - elif type(key) is list: - outpars = [] - for k in key: - outpars.append(self.getParameter(k)) - return outpars - else: - raise Exception('Unhandled input {0}' .format(str(type(key)))) - #getParameter - - def log(self, msg): - if self.debug: - print ("{0}: {1}".format(self.__class__.__name__, msg)) - -class DataSet(): - '''Generic class to hold data''' - - def __init__ (self, array, deep_copy=True, dimension_labels=None, - **kwargs): - '''Holds the data''' - - self.shape = numpy.shape(array) - self.number_of_dimensions = len (self.shape) - self.dimension_labels = {} - - if dimension_labels is not None and \ - len (dimension_labels) == self.number_of_dimensions: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = dimension_labels[i] - else: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = 'dimension_{0:02}'.format(i) - - if type(array) == numpy.ndarray: - if deep_copy: - self.array = array[:] - else: - self.array = array - else: - raise TypeError('Array must be NumpyArray') - - def as_array(self, dimensions=None): - '''Returns the DataSet as Numpy Array - - Returns the pointer to the array if dimensions is not set. - If dimensions is set, it first creates a new DataSet with the subset - and then it returns the pointer to the array''' - if dimensions is not None: - return self.subset(dimensions).as_array() - return self.array - - def subset(self, dimensions=None): - '''Creates a DataSet containing a subset of self according to the - labels in dimensions''' - if dimensions is None: - return self.array - else: - # check that all the requested dimensions are in the array - # this is done by checking the dimension_labels - proceed = True - unknown_key = '' - # axis_order contains the order of the axis that the user wants - # in the output DataSet - axis_order = [] - if type(dimensions) == list: - for dl in dimensions: - if dl not in self.dimension_labels.values(): - proceed = False - unknown_key = dl - break - else: - axis_order.append(find_key(self.dimension_labels, dl)) - if not proceed: - raise KeyError('Unknown key specified {0}'.format(dl)) - - # slice away the unwanted data from the array - unwanted_dimensions = self.dimension_labels.copy() - left_dimensions = [] - for ax in sorted(axis_order): - this_dimension = unwanted_dimensions.pop(ax) - left_dimensions.append(this_dimension) - #print ("unwanted_dimensions {0}".format(unwanted_dimensions)) - #print ("left_dimensions {0}".format(left_dimensions)) - #new_shape = [self.shape[ax] for ax in axis_order] - #print ("new_shape {0}".format(new_shape)) - command = "self.array" - for i in range(self.number_of_dimensions): - if self.dimension_labels[i] in unwanted_dimensions.values(): - command = command + "[0]" - else: - command = command + "[:]" - #print ("command {0}".format(command)) - cleaned = eval(command) - # cleaned has collapsed dimensions in the same order of - # self.array, but we want it in the order stated in the - # "dimensions". - # create axes order for numpy.transpose - axes = [] - for key in dimensions: - #print ("key {0}".format( key)) - for i in range(len( left_dimensions )): - ld = left_dimensions[i] - #print ("ld {0}".format( ld)) - if ld == key: - axes.append(i) - #print ("axes {0}".format(axes)) - - cleaned = numpy.transpose(cleaned, axes).copy() - - return DataSet(cleaned , True, dimensions) - - def fill(self, array): - '''fills the internal numpy array with the one provided''' - if numpy.shape(array) != numpy.shape(self.array): - raise ValueError('Cannot fill with the provided array.' + \ - 'Expecting {0} got {1}'.format( - numpy.shape(self.array), - numpy.shape(array))) - self.array = array[:] - - -class SliceData(DataSet): - '''DataSet for holding 2D images''' - def __init__(self, array, deep_copy=True, dimension_labels=None, - **kwargs): - - if type(array) == DataSet: - # if the array is a DataSet get the info from there - if array.number_of_dimensions != 2: - raise ValueError('Number of dimensions are != 2: {0}'\ - .format(array.number_of_dimensions)) - - DataSet.__init__(self, array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif type(array) == numpy.ndarray: - if dimension_labels is None: - dimension_labels = ['horizontal_x' , 'horizontal_y' , 'vertical'] - shape = numpy.shape(array) - ndims = len(shape) - if ndims != 3: - raise ValueError('Number of dimensions are != 2: {0}'.format(ndims)) - - DataSet.__init__(self, array, deep_copy, dimension_labels, **kwargs) - - # Metadata - self.origin = [0,0] - self.spacing = [1,1] - - # load metadata from kwargs if present - for key, value in kwargs.items(): - if key == 'origin' : - if type(value) == list and len (value) == 2: - self.origin = value - if key == 'spacing' : - if type(value) == list and len (value) == 2: - self.spacing = value - - def rotate(self, center_of_rotation, angle): - pass - - - -class VolumeData(DataSet): - '''DataSet for holding 3D images''' - def __init__(self, array, deep_copy=True, dimension_labels=None, - **kwargs): - - if type(array) == DataSet: - # if the array is a DataSet get the info from there - if array.number_of_dimensions != 3: - raise ValueError('Number of dimensions are != 3: {0}'\ - .format(array.number_of_dimensions)) - - DataSet.__init__(self, array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif type(array) == numpy.ndarray: - if dimension_labels is None: - dimension_labels = ['horizontal_x' , 'horizontal_y' , 'vertical'] - shape = numpy.shape(array) - ndims = len(shape) - if ndims != 3: - raise ValueError('Number of dimensions are != 3: {0}'.format(ndims)) - - DataSet.__init__(self, array, deep_copy, dimension_labels, **kwargs) - - # Metadata - self.origin = [0,0,0] - self.spacing = [1,1,1] - - # load metadata from kwargs if present - for key, value in kwargs.items(): - if key == 'origin' : - if type(value) == list and len (value) == 3: - self.origin = value - if key == 'spacing' : - if type(value) == list and len (value) == 3: - self.spacing = value - -class DataSetProcessor(CCPiBaseClass): - '''Abstract class for a DataSetProcessor''' - - def __init__(self, number_of_inputs, number_of_outputs, **kwargs): - kwargs['number_of_inputs'] = number_of_inputs - kwargs['number_of_outputs'] = number_of_outputs - - CCPiBaseClass.__init__(self, **kwargs) - - - - def setInput(self, **inData): - '''set the input data for the Processor - - this calls the setParameter method''' - self.setParameter(**inData) - - def getOutput(self): - raise NotImplementedError('The getOutput method is not implemented!') - - def apply(self): - raise NotImplementedError('The apply method is not implemented!') - - - -class AX(DataSetProcessor): - '''Example DataSetProcessor - The AXPY routines perform a vector multiplication operation defined as - - y := a*x - where: - - a is a scalar - - x a DataSet. - ''' - - def __init__(self, scalar, input_dataset): - kwargs = {'scalar':scalar, - 'input_dataset':input_dataset, - 'output_dataset': None} - DataSetProcessor.__init__(self, 2, 1, **kwargs) - - - - def apply(self): - a, x = self.getParameter(['scalar' , 'input_dataset' ]) - - y = DataSet( a * x.as_array() , True, - dimension_labels=x.dimension_labels ) - self.setParameter(output_dataset=y) - - def getOutput(self): - return self.getParameter( 'output_dataset' ) - - -class PixelByPixelDataSetProcessor(DataSetProcessor): - '''Example DataSetProcessor - - This processor applies a python function to each pixel of the DataSet - - f is a python function - - x a DataSet. - ''' - - def __init__(self, pyfunc, input_dataset): - kwargs = {'pyfunc':pyfunc, - 'input_dataset':input_dataset, - 'output_dataset': None} - DataSetProcessor.__init__(self, 2, 1, **kwargs) - - - - def apply(self): - pyfunc, x = self.getParameter(['pyfunc' , 'input_dataset' ]) - - eval_func = numpy.frompyfunc(pyfunc,1,1) - - - y = DataSet( eval_func( x.as_array() ) , True, - dimension_labels=x.dimension_labels ) - self.setParameter(output_dataset=y) - - def getOutput(self): - return self.getParameter( 'output_dataset' ) - -if __name__ == '__main__': - shape = (2,3,4,5) - size = shape[0] - for i in range(1, len(shape)): - size = size * shape[i] - a = numpy.asarray([i for i in range( size )]) - a = numpy.reshape(a, shape) - ds = DataSet(a, False, ['X', 'Y','Z' ,'W']) - print ("ds label {0}".format(ds.dimension_labels)) - subset = ['W' ,'X'] - b = ds.subset( subset ) - print ("b label {0} shape {1}".format(b.dimension_labels, - numpy.shape(b.as_array()))) - c = ds.subset(['Z','W','X']) - - # Create a VolumeData sharing the array with c - volume0 = VolumeData(c.as_array(), False, dimensions = c.dimension_labels) - volume1 = VolumeData(c, False) - - print ("volume0 {0} volume1 {1}".format(id(volume0.array), - id(volume1.array))) - - # Create a VolumeData copying the array from c - volume2 = VolumeData(c.as_array(), dimensions = c.dimension_labels) - volume3 = VolumeData(c) - - print ("volume2 {0} volume3 {1}".format(id(volume2.array), - id(volume3.array))) - - # single number DataSet - sn = DataSet(numpy.asarray([1])) - - ax = AX(scalar = 2 , input_dataset=c) - ax.apply() - print ("ax in {0} out {1}".format(c.as_array().flatten(), - ax.getOutput().as_array().flatten())) - axm = AX(scalar = 0.5 , input_dataset=ax.getOutput()) - axm.apply() - print ("axm in {0} out {1}".format(c.as_array(), axm.getOutput().as_array())) - - # create a PixelByPixelDataSetProcessor - - #define a python function which will take only one input (the pixel value) - pyfunc = lambda x: -x if x > 20 else x - clip = PixelByPixelDataSetProcessor(pyfunc,c) - clip.apply() - - print ("clip in {0} out {1}".format(c.as_array(), clip.getOutput().as_array())) - - - - \ No newline at end of file diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 5135c87..ba24bef 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -296,7 +296,7 @@ class InstrumentGeometry(CCPiBaseClass): -class DataSetProcessor(CCPiBaseClass): +class DataSetProcessor1(CCPiBaseClass): '''Abstract class for a DataSetProcessor inputs: dictionary of inputs @@ -355,7 +355,7 @@ class DataSetProcessor(CCPiBaseClass): -class AX(DataSetProcessor): +class AX(DataSetProcessor1): '''Example DataSetProcessor The AXPY routines perform a vector multiplication operation defined as @@ -374,7 +374,7 @@ class AX(DataSetProcessor): } for key, value in wargs.items(): kwargs[key] = value - DataSetProcessor.__init__(self, **kwargs) + DataSetProcessor1.__init__(self, **kwargs) @@ -388,7 +388,7 @@ class AX(DataSetProcessor): -class PixelByPixelDataSetProcessor(DataSetProcessor): +class PixelByPixelDataSetProcessor(DataSetProcessor1): '''Example DataSetProcessor This processor applies a python function to each pixel of the DataSet @@ -402,7 +402,7 @@ class PixelByPixelDataSetProcessor(DataSetProcessor): kwargs = {'pyfunc':pyfunc, 'input_dataset':input_dataset, 'output_dataset': None} - DataSetProcessor.__init__(self, **kwargs) + DataSetProcessor1.__init__(self, **kwargs) @@ -416,6 +416,61 @@ class PixelByPixelDataSetProcessor(DataSetProcessor): dimension_labels=x.dimension_labels ) return y +class DataSetProcessor(): + '''Defines a generic DataSet processor + + accepts DataSet as inputs and + outputs DataSet + additional attributes can be defined with __setattr__ + ''' + + def __init__(self): + pass + + def __setattr__(self, name, value): + if name == 'input': + self.setInput(value) + elif name in self.__dict__.keys(): + self.__dict__[name] = value + else: + raise KeyError('Attribute {0} not found'.format(name)) + #pass + + def setInput(self, dataset): + print('Setting input as {0}...'.format(dataset)) + if issubclass(type(dataset), DataSet): + if self.checkInput(dataset): + self.__dict__['input'] = dataset + else: + raise TypeError("Input type mismatch: got {0} expecting {1}"\ + .format(type(dataset), DataSet)) + + def checkInput(self, dataset): + '''Checks parameters of the input DataSet + + Should raise an Error if the DataSet does not match expectation, e.g. + if the expected input DataSet is 3D and the Processor expects 2D. + ''' + raise NotImplementedError('Implement basic checks for input DataSet') + + def getOutput(self): + if None in self.__dict__.values(): + raise ValueError('Not all parameters have been passed') + return self.process() + + def setInputProcessor(self, processor): + print('Setting input as {0}...'.format(processor)) + if issubclass(type(processor), DataSetProcessor): + self.__dict__['input'] = processor + else: + raise TypeError("Input type mismatch: got {0} expecting {1}"\ + .format(type(processor), DataSetProcessor)) + + + def process(self): + raise NotImplementedError('process must be implemented') + + if __name__ == '__main__': shape = (2,3,4,5) @@ -472,7 +527,9 @@ if __name__ == '__main__': print ("clip in {0} out {1}".format(c.as_array(), clip.getOutput().as_array())) - + dsp = DataSetProcessor() + dsp.setInput(ds) + dsp.input = a # pipeline # Pipeline # Pipeline.setProcessor(0, ax) diff --git a/Wrappers/Python/test/regularizers.py b/Wrappers/Python/test/regularizers.py index 003340c..4ac8d28 100644 --- a/Wrappers/Python/test/regularizers.py +++ b/Wrappers/Python/test/regularizers.py @@ -13,9 +13,9 @@ import timeit from ccpi.filters.cpu_regularizers_boost import SplitBregman_TV , FGP_TV ,\ LLT_model, PatchBased_Regul ,\ TGV_PD -from ccpi.framework import DataSetProcessor, DataSet +from ccpi.framework import DataSetProcessor, DataSetProcessor1, DataSet -class SplitBregmanTVRegularizer(DataSetProcessor): +class SplitBregmanTVRegularizer(DataSetProcessor1): '''Regularizers DataSetProcessor ''' @@ -32,7 +32,7 @@ class SplitBregmanTVRegularizer(DataSetProcessor): } for key, value in wargs.items(): kwargs[key] = value - DataSetProcessor.__init__(self, **kwargs) + DataSetProcessor1.__init__(self, **kwargs) @@ -50,7 +50,7 @@ class SplitBregmanTVRegularizer(DataSetProcessor): #self.setParameter(output_dataset=y) return y -class FGPTVRegularizer(DataSetProcessor): +class FGPTVRegularizer(DataSetProcessor1): '''Regularizers DataSetProcessor ''' @@ -67,7 +67,7 @@ class FGPTVRegularizer(DataSetProcessor): } for key, value in wargs.items(): kwargs[key] = value - DataSetProcessor.__init__(self, **kwargs) + DataSetProcessor1.__init__(self, **kwargs) @@ -93,7 +93,106 @@ class FGPTVRegularizer(DataSetProcessor): if issubclass(type(other) , DataSetProcessor): self.setParameter(input = other.getOutput()[0]) - +class SBTV(DataSetProcessor): + '''Regularizers DataSetProcessor + ''' + + + + def __init__(self): + attributes = {'regularization_parameter':None, + 'number_of_iterations': 35, + 'tolerance_constant': 0.0001, + 'TV_penalty':0, + 'input' : None + } + for key, value in attributes.items(): + self.__dict__[key] = value + + def checkInput(self, dataset): + '''Checks number of dimensions input DataSet + + Expected input is 2D or 3D + ''' + if dataset.number_of_dimensions == 2 or \ + dataset.number_of_dimensions == 3: + return True + else: + raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ + .format(dataset.number_of_dimensions)) + + def process(self): + '''Executes the processor + + Basic checks are run in here + ''' + + if issubclass(type(self.input), DataSetProcessor): + dsi = self.input.getOutput()[0] + else: + dsi = self.input + if None in self.__dict__.values(): + raise ValueError('Not all parameters have been passed') + out = SplitBregman_TV (dsi.as_array(), + self.regularization_parameter, + self.number_of_iterations, + self.tolerance_constant, + self.TV_penalty) + print (type(out)) + y = DataSet( out[0] , False ) + #self.setParameter(output_dataset=y) + return y + +class FGPTV(DataSetProcessor): + '''Regularizers DataSetProcessor + ''' + + + + def __init__(self): + attributes = {'regularization_parameter':None, + 'number_of_iterations': 35, + 'tolerance_constant': 0.0001, + 'TV_penalty':0, + 'input' : None + } + for key, value in attributes.items(): + self.__dict__[key] = value + + def checkInput(self, dataset): + '''Checks number of dimensions input DataSet + + Expected input is 2D or 3D + ''' + if dataset.number_of_dimensions == 2 or \ + dataset.number_of_dimensions == 3: + return True + else: + raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ + .format(dataset.number_of_dimensions)) + + def process(self): + '''Executes the processor + + Basic checks are run in here + ''' + + if issubclass(type(self.input), DataSetProcessor): + dsi = self.input.getOutput() + else: + dsi = self.input + if None in self.__dict__.values(): + raise ValueError('Not all parameters have been passed') + out = FGP_TV (dsi.as_array(), + self.regularization_parameter, + self.number_of_iterations, + self.tolerance_constant, + self.TV_penalty) + print (type(out)) + y = DataSet( out[0] , False ) + #self.setParameter(output_dataset=y) + return y + if __name__ == '__main__': filename = os.path.join(".." , ".." , ".." , ".." , "CCPi-FISTA_Reconstruction", "data" , @@ -192,13 +291,18 @@ if __name__ == '__main__': #cmap="gray" ) - reg3 = FGPTVRegularizer(reg, - pars['regularization_parameter'], - pars['number_of_iterations'], - pars['tolerance_constant'], - pars['TV_penalty'], - hold_input=False, hold_output=True) - chain = reg3.getOutput() + +# 'regularization_parameter':40 , \ +# 'number_of_iterations' :350 ,\ +# 'tolerance_constant':0.01 , \ +# 'TV_penalty': 0 + reg3 = SBTV() + reg3.number_of_iterations = 350 + reg3.tolerance_constant = 0.01 + reg3.regularization_parameter = 40 + reg3.TV_penalty = 0 + reg3.setInput(lena) + dataprocessoroutput = reg3.getOutput() #txtstr = printParametersToString(pars) #txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) @@ -211,7 +315,53 @@ if __name__ == '__main__': # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) # place a text box in upper left in axes coords - a.text(0.05, 0.95, 'chain', transform=a.transAxes, fontsize=14, + a.text(0.05, 0.95, 'SBTV', transform=a.transAxes, fontsize=14, + verticalalignment='top', bbox=props) + imgplot = plt.imshow(dataprocessoroutput.as_array(),\ + #cmap="gray" + ) + reg4 = FGPTV() + reg4.number_of_iterations = 350 + reg4.tolerance_constant = 0.01 + reg4.regularization_parameter = 40 + reg4.TV_penalty = 0 + reg4.setInput(lena) + dataprocessoroutput2 = reg4.getOutput() + + #txtstr = printParametersToString(pars) + #txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) + #print (txtstr) + + + a=fig.add_subplot(2,3,5) + + + # these are matplotlib.patch.Patch properties + props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) + # place a text box in upper left in axes coords + a.text(0.05, 0.95, 'FGPTV', transform=a.transAxes, fontsize=14, + verticalalignment='top', bbox=props) + imgplot = plt.imshow(dataprocessoroutput2.as_array(),\ + #cmap="gray" + ) + + + #reg4.input = None + reg4.setInputProcessor(reg3) + chain = reg4.process() + + #txtstr = printParametersToString(pars) + #txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) + #print (txtstr) + + + a=fig.add_subplot(2,3,6) + + + # these are matplotlib.patch.Patch properties + props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) + # place a text box in upper left in axes coords + a.text(0.05, 0.95, 'SBTV + FGPTV', transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(chain.as_array(),\ #cmap="gray" -- cgit v1.2.3