gostyle_old, commit old
[gostyle.git] / kohonen.py
blobd83b0273ebb2ec0f1cc5884f60e0faa4044e18d7
1 # Copyright (c) 2009 Leif Johnson <leif@leifjohnson.net>
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
10 # The above copyright notice and this permission notice shall be included in all
11 # copies or substantial portions of the Software.
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
21 '''Basic self-organizing map implementation.
23 This module contains the following Kohonen map implementations :
25 - Map. A standard rectangular N-dimensional Kohonen map.
27 - Gas. A vector quantizer that does not have a fixed topology. Neurons in a
28 gas are sorted for updates based on their distance from the cue, with the
29 sort order defining a topology for each cue presentation.
31 - GrowingGas. A Gas-based quantizer that can add neurons dynamically to
32 explain high-error areas of the input space.
34 - Filter. A wrapper over an underlying Map instance that maintains an explicit
35 estimate of the likelihood of each neuron.
37 These are tested using the kohonen_test.py file in this source distribution.
39 Because they have a grid topology, Map objects have some cool visualization
40 options, including Map.neuron_colormap and Map.distance_heatmap. These require
41 the Python Image Library.
43 There are also some small utility classes :
45 - DistanceMetric. A callable that takes two arrays with the same shapes and
46 returns a 2D matrix of elementwise distances between the inputs. Basically,
47 this class is used to calculate the distance between a cue and each neuron
48 in a Kohonen Map.
50 - EuclideanMetric. A callable that calculates the Euclidean distance between
51 a cue and each neuron in a Kohonen Map.
53 - etc.
55 - Timeseries. A callable that takes no arguments and returns a value that
56 might vary over time. Each call to the function will generally return a
57 unique value (though this is not necessary).
59 - ExponentialTimeseries. A callable that takes no arguments and returns an
60 exponentially decreasing (or increasing) series of values, dependent on
61 the parameters passed in at construction time.
63 - etc.
65 These utility objects are generally used to regulate the learning parameters in
66 Kohonen Map objects.
67 '''
69 import numpy
70 from numpy import random as rng
73 class DistanceMetric(object):
74 '''A class that implements a distance metric for a self-organizing map.'''
76 def __call__(self, x, y):
77 '''Return the distances from x to y, along the last array index.'''
78 raise NotImplementedError
80 class CosineMetric(DistanceMetric):
81 '''Implements the cosine distance.'''
83 def __call__(self, x, y):
84 nx = numpy.sqrt(numpy.sum(x * x, axis=-1))
85 ny = numpy.sqrt(numpy.sum(y * y, axis=-1))
86 # the cosine metric returns 1 when the args are equal, 0 when they are
87 # orthogonal, and -1 when they are opposite. we want the opposite
88 # effect, i.e. we minimize metric distance to find the "winner". also,
89 # add 1 to the result so that the distance is always nonnegative.
90 return 1 - numpy.sum(x * y, axis=-1) / nx / ny
92 class EuclideanMetric(DistanceMetric):
93 '''Implements the euclidean distance (L-2 norm).'''
95 def __call__(self, x, y):
96 d = x - y
97 return numpy.sqrt(numpy.sum(d * d, axis=-1))
99 class ManhattanMetric(DistanceMetric):
100 '''Implements the manhattan distance (L-1 norm).'''
102 def __call__(self, x, y):
103 return numpy.sum(numpy.abs(x - y), axis=-1)
105 class WeightedEuclideanMetric(EuclideanMetric):
106 '''Implements a standard euclidean distance with weighted dimensions.'''
108 def __init__(self, weights):
109 self._weights = numpy.array(weights)
111 def __call__(self, x, y):
112 d = x - y
113 w = numpy.resize(self._weights, d.shape)
114 return numpy.sqrt(numpy.sum(w * d * d, axis=-1))
117 class Timeseries(object):
118 '''Represents some sort of value that changes over time.'''
120 def __init__(self):
121 '''Set up this timeseries.'''
122 super(Timeseries, self).__init__()
123 self.ticks = 0
125 def __call__(self):
126 '''Call this timeseries.'''
127 t = self.ticks
128 self.ticks += 1
129 return t
131 def reset(self):
132 '''Reset the time for this series.'''
133 self.ticks = 0
135 class ConstantTimeseries(Timeseries):
136 '''This timeseries just returns a constant value.'''
138 def __init__(self, k=1):
139 '''Set up this series with a constant value.'''
140 self.k = k
142 def __call__(self):
143 '''Return the constant.'''
144 return self.k
146 class ExponentialTimeseries(Timeseries):
147 '''Represents an exponential decay process.'''
149 def __init__(self, rate=-1, initial=1, final=0):
150 '''Create a new exponential timeseries object.'''
151 super(ExponentialTimeseries, self).__init__()
152 self.initial = initial - final
153 self.rate = rate
154 self.final = final
155 self.last = initial
157 def __call__(self):
158 '''Return an exponentially-decreasing series of values.'''
159 super(ExponentialTimeseries, self).__call__()
160 self.last = self.final + self.initial * numpy.exp(self.rate * self.ticks)
161 return self.last
164 class Parameters(object):
165 '''We are plain old data holding self-organizing map parameters.'''
167 def __init__(self,
168 dimension=None,
169 shape=None,
170 metric=None,
171 learning_rate=None,
172 neighborhood_size=None,
173 noise_variance=None):
174 '''This class holds standard parameters for self-organizing maps.
176 dimension: The length of a neuron vector in a Map or a Gas.
178 shape: The shape of the neuron topology in whatever Map or Gas we are
179 building.
181 metric: The distance metric to use when comparing cues to neurons in the
182 map. Defaults to EuclideanMetric.
184 learning_rate: This parameter determines the time course of the learning
185 rate for a Map. This parameter should be a callable that takes no
186 arguments and returns a floating point value for the learning rate.
188 If this parameter is None, a default learning rate series will be
189 used, equivalent to ExponentialTimeseries(-1e-3, 1, 0.2).
191 If this parameter is a numeric value, it will be used as the
192 constant value for the learning rate: ConstantTimeseries(value).
194 neighborhood_size: Like the learning rate, this parameter determines the
195 time course of the neighborhood size parameter. It should be a
196 callable that takes no arguments and returns a neighborhood size for
197 storing each cue.
199 If this is None, a default neighborhood size series will be used. The
200 initial size will be the maximum of the dimensions given in shape, and
201 the decay will be -1e-3: ExponentialTimeseries(-1e-3, max(shape), 1).
203 If this is a floating point value, it will be used as a constant
204 neighborhood size: ConstantTimeseries(value).
206 noise_variance: Like the learning rate and neighborhood size, this
207 should be a factory for creating a callable that creates noise
208 variance values.
210 If this is None, no noise will be included in the created Maps.
212 If this parameter is a number, it will be used as a constant noise
213 variance.
215 assert dimension is not None
216 self.dimension = dimension
218 assert shape is not None
219 self.shape = shape
221 self.metric = metric or EuclideanMetric()
223 ET = ExponentialTimeseries
224 CT = ConstantTimeseries
226 self.learning_rate = learning_rate
227 if isinstance(learning_rate, (float, int)):
228 self.learning_rate = CT(learning_rate)
229 if learning_rate is None:
230 self.learning_rate = ET(-1e-3, 1, 0.2)
232 self.neighborhood_size = neighborhood_size
233 if isinstance(neighborhood_size, (float, int)):
234 self.neighborhood_size = CT(neighborhood_size)
235 if neighborhood_size is None:
236 self.neighborhood_size = ET(-1e-3, max(shape), 1)
238 self.noise_variance = noise_variance
239 if isinstance(noise_variance, (float, int)):
240 self.noise_variance = CT(noise_variance)
243 def heatmap(raw, axes=(0, 1), lower=None, upper=None):
244 '''Create a heat map image from the given raw matrix.
246 raw: An array of values to use for the image pixels.
247 axes: The axes in the array that we want to preserve for the final image.
248 All other axes will be summed away.
249 lower: If given, clip values in the matrix to this lower limit. If not
250 given, raw.min() will be used.
251 upper: If given, clip values in the matrix to this upper limit. If not
252 given, raw.max() will be used.
254 Returns an annotated Image object (as returned from _image).
256 assert len(axes) == 2
257 for ax in xrange(len(raw.shape) - 1, -1, -1):
258 if ax in axes:
259 continue
260 raw = raw.sum(axis=ax)
261 l = lower
262 if l is None:
263 l = raw.min()
264 l *= l < 0 and 1.01 or 0.99
265 u = upper
266 if u is None:
267 u = raw.max() * 1.01
268 u *= u > 0 and 1.01 or 0.99
269 return _image(raw, l, u)
272 def colormap(raw, axes=(0, 1, 2), layers=(0, 1, 2)):
273 '''Create an RGB image using the given layers of a 3D raw values matrix.
275 raw: An array of raw values to use for the image.
276 axes: The axes in the array that we want to preserve for the final image.
277 All other axes will be summed away.
278 layers: The indices of the third preserved axis that we should use for the
279 red, green, and blue channels in the output image.
281 Raw values will be scaled along each layer to lie in [lower, upper], where
282 lower (upper) is the global lower (upper) bound of all values in each of the
283 raw layers.
285 Returns an Image object, as in the heatmap() function.
287 assert len(axes) == len(layers) == 3
288 for ax in xrange(len(raw.shape) - 1, -1, -1):
289 if ax in axes:
290 continue
291 raw = raw.sum(axis=ax)
292 u = -numpy.inf
293 l = numpy.inf
294 for i in layers:
295 v = raw[:, :, i]
296 l = min(l, v.min())
297 u = max(u, v.max())
298 l *= l < 0 and 1.01 or 0.99
299 u *= u > 0 and 1.01 or 0.99
300 return _image(raw[:, :, layers], l, u, 'RGB')
303 def _image(values, lower, upper, format='L'):
304 '''Create a PIL image using the given 2D array of values.
306 Pixel values in the range [lower, upper] are scaled linearly to [0, 1]
307 before creating the image.
309 Returns an Image object annotated with the lower and upper bounds that were
310 used to scale the values to convert them to pixels.
312 from PIL import Image
313 ratios = (values - lower) / (upper - lower)
314 img = Image.fromarray(numpy.array(256 * ratios, numpy.uint8), format)
315 img.lower_bound = lower
316 img.upper_bound = upper
317 return img
320 def _zeros(shape, dtype='d'):
321 '''Get a blank (all-zero) matrix with a certain shape.'''
322 return numpy.zeros(shape, dtype=dtype)
325 def itershape(shape):
326 '''Given a shape tuple, iterate over all indices in that shape.'''
327 if not shape:
328 yield ()
329 return
330 for i in xrange(shape[0]):
331 for z in itershape(shape[1:]):
332 yield (i, ) + z
335 def argsample(pdf, n=1):
336 '''Return n indices drawn proportionally from a discrete mass vector.'''
337 assert (pdf >= 0).all(), 'cannot sample from %r!' % pdf
338 cdf = pdf.cumsum()
339 return numpy.searchsorted(cdf, rng.uniform(0, cdf[-1], n))
342 def sample(pdf, n=1):
343 '''Return n samples drawn proportionally from a discrete mass vector.'''
344 assert len(pdf.shape) == 1
345 return pdf[argsample(pdf, n)]
348 class Map(object):
349 '''Basic implementation of a rectangular N-dimensional self-organizing map.
351 A Self-Organizing or Kohonen Map (henceforth just Map) is a group of
352 lightweight processing units called neurons, which are here implemented as
353 vectors of real numbers. Neurons in a Map are arranged in a specific
354 topology, so that a given neuron is connected to a small, specific subset of
355 the overall neurons in the Map. In addition, the Map uses a distance metric
356 (e.g., Euclidean distance) for computing similarity between neurons and cue
357 vectors, as described below.
359 The Map accepts cues---vectors of real numbers---as inputs. In standard Map
360 usage, cues represent some data point of interest. Normally applications of
361 Maps use input vectors like the activation patterns for an array of sensors,
362 term frequency vectors for a document, etc. Cues are stored in the Map as
363 follows : First, a "winner" neuron w is chosen from the Map, and, second,
364 the neurons in the Map topologically near w are altered so that they become
365 closer to the cue. Each of these steps is described briefly below.
367 For the first step, the Map computes the distance between the cue and each
368 of the Map neurons using its metric. The neuron closest to the cue under
369 this metric is declared the "winner" w. Alternatively, the winner can be
370 selected probabilistically based on the overall distance landscape.
372 Next, the Map alters the neurons in the neighborhood of w, normally using
373 some function of the difference between the cue and the neuron being
374 modified. The weight of the alteration decreases exponentially as the
375 topological distance from w increases. The learning rule for a neuron n is
377 n += eta * exp(-d**2 / sigma**2) * (c - n)
379 where eta is the learning rate, sigma is called the neighborhood size, d is
380 the topological distance between n and w, and c is the cue vector being
381 stored in the map. Eta and sigma normally decrease in value over time, to
382 take advantage of the empirical machine learning benefits of simulated
383 annealing.
385 The storage mechanism in a Map has the effect of grouping cues with similar
386 characteristics into similar areas of the Map. Because the winner---and its
387 neighborhood---are altered to look more like the cues that they capture, the
388 winner for a given cue will tend to win similar inputs in the future. This
389 tends to cluster similar Map inputs, and can lead to interesting data
390 organization patterns.
393 def __init__(self, params):
394 '''Initialize this Map.'''
395 self._shape = params.shape
396 self.dimension = params.dimension
397 self.neurons = _zeros(self.shape + (self.dimension, ))
399 self._metric = params.metric
401 self._learning_rate = params.learning_rate
402 self._neighborhood_size = params.neighborhood_size
403 self._noise_variance = params.noise_variance
405 # precompute a neighborhood mask for performing fast storage updates.
406 # this mask is the same dimensionality as self.shape, but twice the size
407 # along each axis. the maximum value in the mask is 1, occurring in the
408 # center. values decrease in a gaussian fashion from the center.
409 S = tuple(2 * size - 1 for size in self.shape)
410 self._neighborhood_mask = _zeros(S)
411 for coords in itershape(S):
412 z = 0
413 for axis, offset in enumerate(coords):
414 d = offset + 1 - self.shape[axis]
415 z += d * d
416 self._neighborhood_mask[coords] = numpy.exp(-z / 2)
418 @property
419 def shape(self):
420 return self._shape
422 def neuron(self, coords):
423 '''Get the current state of a specific neuron.'''
424 return self.neurons[coords]
426 def reset(self, f=None):
427 '''Reset the neurons and timeseries in the Map.
429 f: A callable that takes a neuron coordinate and returns a value for
430 that neuron. Defaults to random values from the standard normal.
432 self._learning_rate.reset()
433 self._neighborhood_size.reset()
434 if f is None:
435 self.neurons = rng.randn(*self.neurons.shape)
436 else:
437 for z in itershape(self.shape):
438 self.neurons[z] = f(z)
440 def weights(self, distances):
441 '''Get an array of learning weights to use for storing a cue.'''
442 i = self.smallest(distances)
443 z = []
444 for axis, size in enumerate(self.flat_to_coords(i)):
445 offset = self.shape[axis] - size - 1
446 z.append(slice(offset, offset + self.shape[axis]))
447 sigma = self._neighborhood_size()
448 return self._neighborhood_mask[z] ** (1.0 / sigma / sigma)
450 def distances(self, cue):
451 '''Get the distance of each neuron in the Map to a particular cue.'''
452 z = numpy.resize(cue, self.neurons.shape)
453 return self._metric(z, self.neurons)
455 def flat_to_coords(self, i):
456 '''Given a flattened index, convert it to a coordinate tuple.'''
457 coords = []
458 for limit in reversed(self.shape[1:]):
459 i, j = divmod(i, limit)
460 coords.append(j)
461 coords.append(i)
462 return tuple(reversed(coords))
464 def winner(self, cue):
465 '''Get the coordinates of the most similar neuron to the given cue.
467 Returns a flat index ; use flat_to_coords to convert this to a neuron
468 index.
470 return self.smallest(self.distances(cue))
472 def sample(self, n):
473 '''Get a sample of n neuron coordinates from the map.
475 The returned values will be flat indices ; use flat_to_coords to convert
476 them to neuron indices.
478 return rng.randint(0, self.neurons.size / self.dimension - 1, n)
480 def smallest(self, distances):
481 '''Get the index of the smallest element in the given distances array.
483 Returns a flat index ; use flat_to_coords to convert this to a neuron
484 index.
486 assert distances.shape == self.shape
487 return distances.argmin()
489 def learn(self, cue, weights=None, distances=None):
490 '''Add a new cue vector to the Map, moving neurons as needed.'''
491 if weights is None:
492 if distances is None:
493 distances = self.distances(cue)
494 weights = self.weights(distances)
495 assert weights.shape == self.shape
496 weights.shape += (1, )
497 delta = numpy.resize(cue, self.neurons.shape) - self.neurons
498 eta = self._learning_rate()
499 self.neurons += eta * weights * delta
500 if self._noise_variance:
501 self.neurons += rng.normal(
502 0, self._noise_variance(), self.neurons.shape)
504 def neuron_heatmap(self, axes=(0, 1), lower=None, upper=None):
505 '''Return an image representation of this Map.'''
506 return heatmap(self.neurons, axes, lower, upper)
508 def distance_heatmap(self, cue, axes=(0, 1), lower=None, upper=None):
509 '''Return an image representation of the distance to a cue.'''
510 return heatmap(self.distances(cue), axes, lower, upper)
513 class Gas(Map):
514 '''A neural Gas is a topologically unordered collection of neurons.
516 Learning takes place in the Gas by ordering the neurons according to their
517 distance from each cue that is presented. Neurons are updated using this
518 sorted order, with an exponentially decreasing weight for neurons that are
519 further (in sort order) from the cue.
522 def __init__(self, params):
523 '''Initialize this Gas. A Gas must have a 1D shape.'''
524 super(Gas, self).__init__(params)
525 assert len(params.shape) == 1
526 self.N = params.shape[0]
528 def weights(self, distances):
529 # this is slightly different from a traditional gas, which uses a linear
530 # negative exponential for update weights:
532 # return numpy.exp(-distances.argsort() / sigma)
534 # quadratic weights more closely match the standard kohonen behavior.
535 z = distances.argsort() / self._neighborhood_size()
536 return numpy.exp(-z * z)
539 def _array_without(a, i):
540 '''Remove the ith row and column from 2x2 array a.'''
541 if i == 0:
542 return a[1:, 1:].copy()
543 if i == a.shape[0] - 1:
544 return a[:-1, :-1].copy()
545 return numpy.hstack((numpy.vstack((a[:i, :i], a[i+1:, :i])),
546 numpy.vstack((a[:i, i+1:], a[i+1:, i+1:]))))
549 def _vector_without(v, i):
550 '''Remove the ith element from vector v.'''
551 if i == 0:
552 return v[1:].copy()
553 if i == v.shape[0] - 1:
554 return v[:-1].copy()
555 return numpy.concatenate((v[:i], v[i+1:]))
558 class GrowingGasParameters(Parameters):
559 '''Parameters for Growing Neural Gases.'''
561 def __init__(self,
562 growth_interval=2,
563 max_connection_age=5,
564 error_decay=0.99,
565 neighbor_error_decay=0.99,
566 **kwargs):
567 super(GrowingGasParameters, self).__init__(**kwargs)
569 self.growth_interval = growth_interval
570 self.max_connection_age = max_connection_age
572 self.error_decay = error_decay
573 self.neighbor_error_decay = neighbor_error_decay
576 class GrowingGas(Gas):
577 '''A Growing Neural Gas uses a variable number of variable-topology neurons.
579 In essence, a GNG is similar to a standard Gas, but there is additional
580 logic in this class for adding new neurons to better explain areas of the
581 sample space that currently have large error.
584 def __init__(self, params):
585 '''Initialize a new Growing Gas with parameters.'''
586 self._size = 2
588 super(GrowingGas, self).__init__(params)
590 self._growth_interval = params.growth_interval
591 self._max_connection_age = params.max_connection_age
593 self._error_decay = params.error_decay
594 self._neighbor_error_decay = params.neighbor_error_decay
596 self._errors = _zeros(self.shape)
597 self._connections = _zeros((self._size, self._size), '=i2') - 1
599 self._cue_count = 0
601 @property
602 def shape(self):
603 return (self._size, )
605 def neighbors(self, i):
606 return self._connections[i]
608 def _connect(self, a, b):
609 self._set_connection(a, b, 0)
611 def _age_connection(self, a, b):
612 self._set_connection(a, b, self._connections[a, b] + 1)
614 def _disconnect(self, a, b):
615 self._set_connection(a, b, -1)
617 def _set_connection(self, a, b, age):
618 self._connections[a, b] = self._connections[b, a] = age
620 def learn(self, cue, weights=None, distances=None):
621 '''Store a cue in the gas.'''
622 distances = self.distances(cue)
624 # find the two closest neurons. connect them. add error to the winner.
625 w = distances.argmin()
626 d = distances[w]
627 self._errors[w] += d * d
628 distances[w] = 1 + distances.max()
629 self._connect(w, distances.argmin())
631 # move the winner and all of its neighbors toward the cue.
632 eta = self._learning_rate()
633 def adjust(i):
634 self.neurons[i] += eta * (cue - self.neurons[i])
635 adjust(w)
636 for j, age in enumerate(self.neighbors(w)):
637 if 0 <= age < 65535: # prevent 16-bit age counter overflow
638 adjust(j)
639 self._age_connection(w, j)
641 # add noise.
642 if self._noise_variance:
643 self.neurons += rng.normal(
644 0, self._noise_variance(), self.neurons.shape)
646 # manipulate the gas topology by pruning and growing as needed.
647 self._prune()
648 self._cue_count += 1
649 if (self._cue_count % self._growth_interval == 0 and
650 self._size < self.N):
651 self._grow()
653 # decrease unit error.
654 self._errors *= self._error_decay
656 def _prune(self):
657 '''Remove old connections, and prune any disconnected neurons.'''
658 mask = numpy.where(self._connections > self._max_connection_age)
659 if self._size == 2 or len(mask[0]) == 0:
660 return
662 # remove connections older than max_connection_age (set to -1).
663 self._connections[mask] = -1
665 # remove neurons that were disconnected after removing connections.
666 indices, = numpy.where((self._connections < 0).all(axis=0))
667 for i in indices[::-1]:
668 self.neurons = _vector_without(self.neurons, i)
669 self._errors = _vector_without(self._errors, i)
670 self._connections = _array_without(self._connections, i)
671 self._size -= 1
673 def _grow(self):
674 '''Add a single neuron between two high-error neurons.'''
675 # identify the neuron with max error, and its max error neighbor.
676 q = self._errors.argmax()
677 f = (self._errors * (self.neighbors(q) >= 0)).argmax()
678 r = self._size
680 # allocate a new neurons array.
681 neurons = _zeros((r + 1, self.dimension))
682 neurons[:r] = self.neurons
683 self.neurons = neurons
684 self.neurons[r] = (self.neurons[q] + self.neurons[f]) / 2
686 # insert new node between old two nodes.
687 self._disconnect(q, f)
688 conn = _zeros((r + 1, r + 1), '=i2') - 1
689 conn[:r, :r] = self._connections
690 self._connections = conn
691 self._connect(q, r)
692 self._connect(r, q)
694 # update error for the new and old neurons.
695 self._errors = numpy.concatenate((self._errors, [0]))
696 self._errors[f] *= self._neighbor_error_decay
697 self._errors[q] *= self._neighbor_error_decay
698 self._errors[r] = (self._errors[f] + self._errors[q]) / 2
700 self._size += 1
703 class Filter(object):
704 '''A Filter is an estimate of the probability density of the inputs.'''
706 def __init__(self, map, history=None):
707 '''Initialize this Filter with an underlying Map implementation.
709 history: A callable that returns values in the open interval (0, 1).
710 These values determine how much new cues influence the activation
711 state of the Filter.
713 A 0 value would mean that no history is preserved (i.e. each new cue
714 stored in the Filter completely determines the activity of the Filter)
715 while a 1 value would mean that new cues have no impact on the
716 activity of the Filter (i.e. the initial activity is the only activity
717 that is ever used).
719 self.map = map
720 self.activity = _zeros(self.map.shape) + 1
721 self.activity /= self.activity.sum()
722 self._history = history is None and ConstantTimeseries(0.7) or history
724 @property
725 def shape(self):
726 return self.map.shape
728 def neuron(self, coords):
729 return self.map.neuron(coords)
731 def reset(self, f=None):
732 return self.map.reset(f=f)
734 def distances(self, cue):
735 return self.map.distances(cue)
737 def flat_to_coords(self, i):
738 return self.map.flat_to_coords(i)
740 def winner(self, cue):
741 return self.map.winner(cue)
743 def smallest(self, distances):
744 return self.map.smallest(distances)
746 def weights(self, distances):
747 return self.map.weights(distances) * (1 - self.activity)
749 def sample(self, n):
750 return argsample(self.activity, n)
752 def learn(self, cue, **kwargs):
753 d = self.distances(cue)
754 p = numpy.exp(-self.distances(cue).argsort())
755 l = self._history()
756 self.activity = l * self.activity + (1 - l) * p / p.sum()
757 self.map.learn(cue, **kwargs)