1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
|
"""!
@brief Double-layer oscillatory network with phase oscillator for image segmentation.
@details Implementation based on paper @cite inproceedings::nnet::syncsegm::1.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import warnings
from math import floor
try:
from PIL import Image
except Exception as error_instance:
warnings.warn("Impossible to import PIL (please, install 'PIL'), pyclustering's visualization "
"functionality is partially not available (details: '%s')." % str(error_instance))
from pyclustering.cluster.syncnet import syncnet
from pyclustering.nnet import solve_type, initial_type
from pyclustering.nnet.sync import sync_visualizer
from pyclustering.utils import read_image
class syncsegm_visualizer:
"""!
@brief Result visualizer of double-layer oscillatory network 'syncsegm'.
"""
@staticmethod
def show_first_layer_dynamic(analyser):
"""!
@brief Shows output dynamic of the first layer.
@param[in] analyser (syncsegm_analyser): Analyser of output dynamic of the 'syncsegm' oscillatory network.
"""
sync_visualizer.show_output_dynamic(analyser.get_first_layer_analyser());
@staticmethod
def show_second_layer_dynamic(analyser):
"""!
@brief Shows output dynamic of the second layer.
@param[in] analyser (syncsegm_analyser): Analyser of output dynamic of the 'syncsegm' oscillatory network.
"""
second_layer_analysers = analyser.get_second_layer_analysers();
analysers_sequence = [ object_segment_analyser['analyser'] for object_segment_analyser in second_layer_analysers ]
sync_visualizer.show_output_dynamics(analysers_sequence);
class syncsegm_analyser:
"""!
@brief Performs analysis of output dynamic of the double-layer oscillatory network 'syncsegm' to extract information about segmentation results.
"""
def __init__(self, color_analyser, object_segment_analysers = None):
"""!
@brief Constructor of the analyser.
@param[in] color_analyser (list): Analyser of coloring segmentation results of the first layer.
@param[in] object_segment_analysers (list): Analysers of objects on image segments - results of the second layer.
"""
self.__color_analyser = color_analyser;
self.__object_segment_analysers = object_segment_analysers;
def get_first_layer_analyser(self):
"""!
@brief Returns analyser of coloring segmentation of the first layer.
"""
return self.__color_analyser;
def get_second_layer_analysers(self):
"""!
@brief Returns analysers of object segmentation of the second layer.
"""
return self.__object_segment_analysers;
def allocate_colors(self, eps = 0.01, noise_size = 1):
"""!
@brief Allocates color segments.
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
@return (list) Color segments where each color segment consists of indexes of pixels that forms color segment.
"""
segments = self.__color_analyser.allocate_clusters(eps);
real_segments = [cluster for cluster in segments if len(cluster) > noise_size];
return real_segments;
def allocate_objects(self, eps = 0.01, noise_size = 1):
"""!
@brief Allocates object segments.
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
@return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
"""
if (self.__object_segment_analysers is None):
return [];
segments = [];
for object_segment_analyser in self.__object_segment_analysers:
indexes = object_segment_analyser['color_segment'];
analyser = object_segment_analyser['analyser'];
segments += analyser.allocate_clusters(eps, indexes);
real_segments = [segment for segment in segments if len(segment) > noise_size];
return real_segments;
class syncsegm:
"""!
@brief Class represents segmentation algorithm syncsegm.
@details syncsegm is a bio-inspired algorithm that is based on double-layer oscillatory network that uses modified Kuramoto model.
Algorithm extracts colors and colored objects. It uses only CCORE (C++ implementation of pyclustering) parts to implement the algorithm.
CCORE option is True by default to use sync network in the pyclustering core - C/C++ shared library for processing that significantly increases performance.
Example:
@code
# create oscillatory for image segmentaion - extract colors (radius 128) and objects (radius 4),
# and ignore noise (segments with size that is less than 10 pixels)
algorithm = syncsegm(128, 4, 10);
# extract segments (colors and objects)
analyser = algorithm(path_to_file);
# obtain segmentation results (only colors - from the first layer)
color_segments = analyser.allocate_colors(0.01, 10);
draw_image_mask_segments(path_to_file, color_segments);
# obtain segmentation results (objects - from the second layer)
object_segments = analyser.allocate_objects(0.01, 10);
draw_image_mask_segments(path_to_file, object_segments);
@endcode
"""
def __init__(self, color_radius, object_radius, noise_size = 0, ccore = True):
"""!
@brief Contructor of the oscillatory network SYNC for cluster analysis.
@param[in] color_radius (double): Radius of color connectivity (color similarity) for the first layer.
@param[in] object_radius (double): Radius of object connectivity (object similarity) for the second layer,
if 'None' then object segmentation is not performed (only color segmentation).
@param[in] noise_size (double): Size of segment that should be considered as a noise and ignored by the second layer.
@param[in] ccore (bool): If 'True' then C/C++ implementation is used to increase performance.
"""
self.__color_radius = color_radius;
self.__object_radius = object_radius;
self.__noise_size = noise_size;
self.__order_color = 0.9995;
self.__order_object = 0.999;
self.__network = None;
self.__ccore = ccore;
def process(self, image_source, collect_dynamic = False, order_color = 0.9995, order_object = 0.999):
"""!
@brief Performs image segmentation.
@param[in] image_source (string): Path to image file that should be processed.
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of each layer of the network is collected.
@param[in] order_color (double): Local synchronization order for the first layer - coloring segmentation.
@param[in] order_object (double): Local synchronization order for the second layer - object segmentation.
@return (syncsegm_analyser) Analyser of segmentation results by the network.
"""
self.__order_color = order_color
self.__order_object = order_object
data = read_image(image_source)
color_analyser = self.__analyse_colors(data, collect_dynamic)
if self.__object_radius is None:
return syncsegm_analyser(color_analyser, None)
object_segment_analysers = self.__analyse_objects(image_source, color_analyser, collect_dynamic)
return syncsegm_analyser(color_analyser, object_segment_analysers)
def __analyse_colors(self, image_data, collect_dynamic):
"""!
@brief Performs color segmentation by the first layer.
@param[in] image_data (array_like): Image sample as a array-like structure.
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of the first layer of the network is collected.
@return (syncnet_analyser) Analyser of color segmentation results of the first layer.
"""
network = syncnet(image_data, self.__color_radius, initial_phases = initial_type.RANDOM_GAUSSIAN, ccore = self.__ccore);
analyser = network.process(self.__order_color, solve_type.FAST, collect_dynamic);
return analyser;
def __analyse_objects(self, image_source, color_analyser, collect_dynamic):
"""!
@brief Performs object segmentation by the second layer.
@param[in] image_source (string): Path to image file that should be processed.
@param[in] color_analyser (syncnet_analyser): Analyser of color segmentation results.
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of the first layer of the network is collected.
@return (map) Analysers of object segments.
"""
# continue analysis
pointer_image = Image.open(image_source);
image_size = pointer_image.size;
object_analysers = [];
color_segments = color_analyser.allocate_clusters();
for segment in color_segments:
object_analyser = self.__analyse_color_segment(image_size, segment, collect_dynamic);
if (object_analyser is not None):
object_analysers.append( { 'color_segment': segment, 'analyser': object_analyser } );
pointer_image.close();
return object_analysers;
def __analyse_color_segment(self, image_size, color_segment, collect_dynamic):
"""!
@brief Performs object segmentation of separate segment.
@param[in] image_size (list): Image size presented as a [width x height].
@param[in] color_segment (list): Image segment that should be processed.
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of the second layer of the network is collected.
@return (syncnet_analyser) Analyser of object segmentation results of the second layer.
"""
coordinates = self.__extract_location_coordinates(image_size, color_segment);
if (len(coordinates) < self.__noise_size):
return None;
network = syncnet(coordinates, self.__object_radius, initial_phases = initial_type.EQUIPARTITION, ccore = True);
analyser = network.process(self.__order_object, solve_type.FAST, collect_dynamic);
return analyser;
def __extract_location_coordinates(self, image_size, color_segment):
"""!
@brief Extracts coordinates of specified image segment.
@param[in] image_size (list): Image size presented as a [width x height].
@param[in] color_segment (list): Image segment whose coordinates should be extracted.
@return (list) Coordinates of each pixel.
"""
coordinates = [];
for index in color_segment:
y = floor(index / image_size[0]);
x = index - y * image_size[0];
coordinates.append([x, y]);
return coordinates;
|