/scipy/ndimage/filters.py
http://github.com/scipy/scipy · Python · 1529 lines · 1374 code · 24 blank · 131 comment · 13 complexity · 0ffa1b4d2ba1909be3db503d176afa98 MD5 · raw file
Large files are truncated click here to view the full file
- # Copyright (C) 2003-2005 Peter J. Verveer
- #
- # Redistribution and use in source and binary forms, with or without
- # modification, are permitted provided that the following conditions
- # are met:
- #
- # 1. Redistributions of source code must retain the above copyright
- # notice, this list of conditions and the following disclaimer.
- #
- # 2. Redistributions in binary form must reproduce the above
- # copyright notice, this list of conditions and the following
- # disclaimer in the documentation and/or other materials provided
- # with the distribution.
- #
- # 3. The name of the author may not be used to endorse or promote
- # products derived from this software without specific prior
- # written permission.
- #
- # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- from collections.abc import Iterable
- import warnings
- import numpy
- import operator
- from numpy.core.multiarray import normalize_axis_index
- from . import _ni_support
- from . import _nd_image
- from . import _ni_docstrings
- __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
- 'prewitt', 'sobel', 'generic_laplace', 'laplace',
- 'gaussian_laplace', 'generic_gradient_magnitude',
- 'gaussian_gradient_magnitude', 'correlate', 'convolve',
- 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
- 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
- 'rank_filter', 'median_filter', 'percentile_filter',
- 'generic_filter1d', 'generic_filter']
- def _invalid_origin(origin, lenw):
- return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
- @_ni_docstrings.docfiller
- def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
- cval=0.0, origin=0):
- """Calculate a 1-D correlation along the given axis.
- The lines of the array along the given axis are correlated with the
- given weights.
- Parameters
- ----------
- %(input)s
- weights : array
- 1-D sequence of numbers.
- %(axis)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin)s
- Examples
- --------
- >>> from scipy.ndimage import correlate1d
- >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
- array([ 8, 26, 8, 12, 7, 28, 36, 9])
- """
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- output = _ni_support._get_output(output, input)
- weights = numpy.asarray(weights, dtype=numpy.float64)
- if weights.ndim != 1 or weights.shape[0] < 1:
- raise RuntimeError('no filter weights given')
- if not weights.flags.contiguous:
- weights = weights.copy()
- axis = normalize_axis_index(axis, input.ndim)
- if _invalid_origin(origin, len(weights)):
- raise ValueError('Invalid origin; origin must satisfy '
- '-(len(weights) // 2) <= origin <= '
- '(len(weights)-1) // 2')
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.correlate1d(input, weights, axis, output, mode, cval,
- origin)
- return output
- @_ni_docstrings.docfiller
- def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
- cval=0.0, origin=0):
- """Calculate a 1-D convolution along the given axis.
- The lines of the array along the given axis are convolved with the
- given weights.
- Parameters
- ----------
- %(input)s
- weights : ndarray
- 1-D sequence of numbers.
- %(axis)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin)s
- Returns
- -------
- convolve1d : ndarray
- Convolved array with same shape as input
- Examples
- --------
- >>> from scipy.ndimage import convolve1d
- >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
- array([14, 24, 4, 13, 12, 36, 27, 0])
- """
- weights = weights[::-1]
- origin = -origin
- if not len(weights) & 1:
- origin -= 1
- return correlate1d(input, weights, axis, output, mode, cval, origin)
- def _gaussian_kernel1d(sigma, order, radius):
- """
- Computes a 1-D Gaussian convolution kernel.
- """
- if order < 0:
- raise ValueError('order must be non-negative')
- exponent_range = numpy.arange(order + 1)
- sigma2 = sigma * sigma
- x = numpy.arange(-radius, radius+1)
- phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
- phi_x = phi_x / phi_x.sum()
- if order == 0:
- return phi_x
- else:
- # f(x) = q(x) * phi(x) = q(x) * exp(p(x))
- # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
- # p'(x) = -1 / sigma ** 2
- # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
- # coefficients of q(x)
- q = numpy.zeros(order + 1)
- q[0] = 1
- D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
- P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
- Q_deriv = D + P
- for _ in range(order):
- q = Q_deriv.dot(q)
- q = (x[:, None] ** exponent_range).dot(q)
- return q * phi_x
- @_ni_docstrings.docfiller
- def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
- mode="reflect", cval=0.0, truncate=4.0):
- """1-D Gaussian filter.
- Parameters
- ----------
- %(input)s
- sigma : scalar
- standard deviation for Gaussian kernel
- %(axis)s
- order : int, optional
- An order of 0 corresponds to convolution with a Gaussian
- kernel. A positive order corresponds to convolution with
- that derivative of a Gaussian.
- %(output)s
- %(mode)s
- %(cval)s
- truncate : float, optional
- Truncate the filter at this many standard deviations.
- Default is 4.0.
- Returns
- -------
- gaussian_filter1d : ndarray
- Examples
- --------
- >>> from scipy.ndimage import gaussian_filter1d
- >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
- array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
- >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
- array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
- >>> import matplotlib.pyplot as plt
- >>> np.random.seed(280490)
- >>> x = np.random.randn(101).cumsum()
- >>> y3 = gaussian_filter1d(x, 3)
- >>> y6 = gaussian_filter1d(x, 6)
- >>> plt.plot(x, 'k', label='original data')
- >>> plt.plot(y3, '--', label='filtered, sigma=3')
- >>> plt.plot(y6, ':', label='filtered, sigma=6')
- >>> plt.legend()
- >>> plt.grid()
- >>> plt.show()
- """
- sd = float(sigma)
- # make the radius of the filter equal to truncate standard deviations
- lw = int(truncate * sd + 0.5)
- # Since we are calling correlate, not convolve, revert the kernel
- weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
- return correlate1d(input, weights, axis, output, mode, cval, 0)
- @_ni_docstrings.docfiller
- def gaussian_filter(input, sigma, order=0, output=None,
- mode="reflect", cval=0.0, truncate=4.0):
- """Multidimensional Gaussian filter.
- Parameters
- ----------
- %(input)s
- sigma : scalar or sequence of scalars
- Standard deviation for Gaussian kernel. The standard
- deviations of the Gaussian filter are given for each axis as a
- sequence, or as a single number, in which case it is equal for
- all axes.
- order : int or sequence of ints, optional
- The order of the filter along each axis is given as a sequence
- of integers, or as a single number. An order of 0 corresponds
- to convolution with a Gaussian kernel. A positive order
- corresponds to convolution with that derivative of a Gaussian.
- %(output)s
- %(mode_multiple)s
- %(cval)s
- truncate : float
- Truncate the filter at this many standard deviations.
- Default is 4.0.
- Returns
- -------
- gaussian_filter : ndarray
- Returned array of same shape as `input`.
- Notes
- -----
- The multidimensional filter is implemented as a sequence of
- 1-D convolution filters. The intermediate arrays are
- stored in the same data type as the output. Therefore, for output
- types with a limited precision, the results may be imprecise
- because intermediate results may be stored with insufficient
- precision.
- Examples
- --------
- >>> from scipy.ndimage import gaussian_filter
- >>> a = np.arange(50, step=2).reshape((5,5))
- >>> a
- array([[ 0, 2, 4, 6, 8],
- [10, 12, 14, 16, 18],
- [20, 22, 24, 26, 28],
- [30, 32, 34, 36, 38],
- [40, 42, 44, 46, 48]])
- >>> gaussian_filter(a, sigma=1)
- array([[ 4, 6, 8, 9, 11],
- [10, 12, 14, 15, 17],
- [20, 22, 24, 25, 27],
- [29, 31, 33, 34, 36],
- [35, 37, 39, 40, 42]])
- >>> from scipy import misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = gaussian_filter(ascent, sigma=5)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- input = numpy.asarray(input)
- output = _ni_support._get_output(output, input)
- orders = _ni_support._normalize_sequence(order, input.ndim)
- sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
- modes = _ni_support._normalize_sequence(mode, input.ndim)
- axes = list(range(input.ndim))
- axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
- for ii in range(len(axes)) if sigmas[ii] > 1e-15]
- if len(axes) > 0:
- for axis, sigma, order, mode in axes:
- gaussian_filter1d(input, sigma, axis, order, output,
- mode, cval, truncate)
- input = output
- else:
- output[...] = input[...]
- return output
- @_ni_docstrings.docfiller
- def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
- """Calculate a Prewitt filter.
- Parameters
- ----------
- %(input)s
- %(axis)s
- %(output)s
- %(mode_multiple)s
- %(cval)s
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.prewitt(ascent)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- input = numpy.asarray(input)
- axis = normalize_axis_index(axis, input.ndim)
- output = _ni_support._get_output(output, input)
- modes = _ni_support._normalize_sequence(mode, input.ndim)
- correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
- axes = [ii for ii in range(input.ndim) if ii != axis]
- for ii in axes:
- correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
- return output
- @_ni_docstrings.docfiller
- def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
- """Calculate a Sobel filter.
- Parameters
- ----------
- %(input)s
- %(axis)s
- %(output)s
- %(mode_multiple)s
- %(cval)s
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.sobel(ascent)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- input = numpy.asarray(input)
- axis = normalize_axis_index(axis, input.ndim)
- output = _ni_support._get_output(output, input)
- modes = _ni_support._normalize_sequence(mode, input.ndim)
- correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
- axes = [ii for ii in range(input.ndim) if ii != axis]
- for ii in axes:
- correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
- return output
- @_ni_docstrings.docfiller
- def generic_laplace(input, derivative2, output=None, mode="reflect",
- cval=0.0,
- extra_arguments=(),
- extra_keywords=None):
- """
- N-D Laplace filter using a provided second derivative function.
- Parameters
- ----------
- %(input)s
- derivative2 : callable
- Callable with the following signature::
- derivative2(input, axis, output, mode, cval,
- *extra_arguments, **extra_keywords)
- See `extra_arguments`, `extra_keywords` below.
- %(output)s
- %(mode_multiple)s
- %(cval)s
- %(extra_keywords)s
- %(extra_arguments)s
- """
- if extra_keywords is None:
- extra_keywords = {}
- input = numpy.asarray(input)
- output = _ni_support._get_output(output, input)
- axes = list(range(input.ndim))
- if len(axes) > 0:
- modes = _ni_support._normalize_sequence(mode, len(axes))
- derivative2(input, axes[0], output, modes[0], cval,
- *extra_arguments, **extra_keywords)
- for ii in range(1, len(axes)):
- tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
- *extra_arguments, **extra_keywords)
- output += tmp
- else:
- output[...] = input[...]
- return output
- @_ni_docstrings.docfiller
- def laplace(input, output=None, mode="reflect", cval=0.0):
- """N-D Laplace filter based on approximate second derivatives.
- Parameters
- ----------
- %(input)s
- %(output)s
- %(mode_multiple)s
- %(cval)s
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.laplace(ascent)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- def derivative2(input, axis, output, mode, cval):
- return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
- return generic_laplace(input, derivative2, output, mode, cval)
- @_ni_docstrings.docfiller
- def gaussian_laplace(input, sigma, output=None, mode="reflect",
- cval=0.0, **kwargs):
- """Multidimensional Laplace filter using Gaussian second derivatives.
- Parameters
- ----------
- %(input)s
- sigma : scalar or sequence of scalars
- The standard deviations of the Gaussian filter are given for
- each axis as a sequence, or as a single number, in which case
- it is equal for all axes.
- %(output)s
- %(mode_multiple)s
- %(cval)s
- Extra keyword arguments will be passed to gaussian_filter().
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> ascent = misc.ascent()
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> result = ndimage.gaussian_laplace(ascent, sigma=1)
- >>> ax1.imshow(result)
- >>> result = ndimage.gaussian_laplace(ascent, sigma=3)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- input = numpy.asarray(input)
- def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
- order = [0] * input.ndim
- order[axis] = 2
- return gaussian_filter(input, sigma, order, output, mode, cval,
- **kwargs)
- return generic_laplace(input, derivative2, output, mode, cval,
- extra_arguments=(sigma,),
- extra_keywords=kwargs)
- @_ni_docstrings.docfiller
- def generic_gradient_magnitude(input, derivative, output=None,
- mode="reflect", cval=0.0,
- extra_arguments=(), extra_keywords=None):
- """Gradient magnitude using a provided gradient function.
- Parameters
- ----------
- %(input)s
- derivative : callable
- Callable with the following signature::
- derivative(input, axis, output, mode, cval,
- *extra_arguments, **extra_keywords)
- See `extra_arguments`, `extra_keywords` below.
- `derivative` can assume that `input` and `output` are ndarrays.
- Note that the output from `derivative` is modified inplace;
- be careful to copy important inputs before returning them.
- %(output)s
- %(mode_multiple)s
- %(cval)s
- %(extra_keywords)s
- %(extra_arguments)s
- """
- if extra_keywords is None:
- extra_keywords = {}
- input = numpy.asarray(input)
- output = _ni_support._get_output(output, input)
- axes = list(range(input.ndim))
- if len(axes) > 0:
- modes = _ni_support._normalize_sequence(mode, len(axes))
- derivative(input, axes[0], output, modes[0], cval,
- *extra_arguments, **extra_keywords)
- numpy.multiply(output, output, output)
- for ii in range(1, len(axes)):
- tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
- *extra_arguments, **extra_keywords)
- numpy.multiply(tmp, tmp, tmp)
- output += tmp
- # This allows the sqrt to work with a different default casting
- numpy.sqrt(output, output, casting='unsafe')
- else:
- output[...] = input[...]
- return output
- @_ni_docstrings.docfiller
- def gaussian_gradient_magnitude(input, sigma, output=None,
- mode="reflect", cval=0.0, **kwargs):
- """Multidimensional gradient magnitude using Gaussian derivatives.
- Parameters
- ----------
- %(input)s
- sigma : scalar or sequence of scalars
- The standard deviations of the Gaussian filter are given for
- each axis as a sequence, or as a single number, in which case
- it is equal for all axes.
- %(output)s
- %(mode_multiple)s
- %(cval)s
- Extra keyword arguments will be passed to gaussian_filter().
- Returns
- -------
- gaussian_gradient_magnitude : ndarray
- Filtered array. Has the same shape as `input`.
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- input = numpy.asarray(input)
- def derivative(input, axis, output, mode, cval, sigma, **kwargs):
- order = [0] * input.ndim
- order[axis] = 1
- return gaussian_filter(input, sigma, order, output, mode,
- cval, **kwargs)
- return generic_gradient_magnitude(input, derivative, output, mode,
- cval, extra_arguments=(sigma,),
- extra_keywords=kwargs)
- def _correlate_or_convolve(input, weights, output, mode, cval, origin,
- convolution):
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- origins = _ni_support._normalize_sequence(origin, input.ndim)
- weights = numpy.asarray(weights, dtype=numpy.float64)
- wshape = [ii for ii in weights.shape if ii > 0]
- if len(wshape) != input.ndim:
- raise RuntimeError('filter weights array has incorrect shape.')
- if convolution:
- weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
- for ii in range(len(origins)):
- origins[ii] = -origins[ii]
- if not weights.shape[ii] & 1:
- origins[ii] -= 1
- for origin, lenw in zip(origins, wshape):
- if _invalid_origin(origin, lenw):
- raise ValueError('Invalid origin; origin must satisfy '
- '-(weights.shape[k] // 2) <= origin[k] <= '
- '(weights.shape[k]-1) // 2')
- if not weights.flags.contiguous:
- weights = weights.copy()
- output = _ni_support._get_output(output, input)
- temp_needed = numpy.shares_memory(input, output)
- if temp_needed:
- # input and output arrays cannot share memory
- temp = output
- output = _ni_support._get_output(output.dtype, input)
- if not isinstance(mode, str) and isinstance(mode, Iterable):
- raise RuntimeError("A sequence of modes is not supported")
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.correlate(input, weights, output, mode, cval, origins)
- if temp_needed:
- temp[...] = output
- output = temp
- return output
- @_ni_docstrings.docfiller
- def correlate(input, weights, output=None, mode='reflect', cval=0.0,
- origin=0):
- """
- Multidimensional correlation.
- The array is correlated with the given kernel.
- Parameters
- ----------
- %(input)s
- weights : ndarray
- array of weights, same number of dimensions as input
- %(output)s
- %(mode)s
- %(cval)s
- %(origin_multiple)s
- Returns
- -------
- result : ndarray
- The result of correlation of `input` with `weights`.
- See Also
- --------
- convolve : Convolve an image with a kernel.
- Examples
- --------
- Correlation is the process of moving a filter mask often referred to
- as kernel over the image and computing the sum of products at each location.
- >>> from scipy.ndimage import correlate
- >>> input_img = np.arange(25).reshape(5,5)
- >>> print(input_img)
- [[ 0 1 2 3 4]
- [ 5 6 7 8 9]
- [10 11 12 13 14]
- [15 16 17 18 19]
- [20 21 22 23 24]]
- Define a kernel (weights) for correlation. In this example, it is for sum of
- center and up, down, left and right next elements.
- >>> weights = [[0, 1, 0],
- ... [1, 1, 1],
- ... [0, 1, 0]]
- We can calculate a correlation result:
- For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
- >>> correlate(input_img, weights)
- array([[ 6, 10, 15, 20, 24],
- [ 26, 30, 35, 40, 44],
- [ 51, 55, 60, 65, 69],
- [ 76, 80, 85, 90, 94],
- [ 96, 100, 105, 110, 114]])
- """
- return _correlate_or_convolve(input, weights, output, mode, cval,
- origin, False)
- @_ni_docstrings.docfiller
- def convolve(input, weights, output=None, mode='reflect', cval=0.0,
- origin=0):
- """
- Multidimensional convolution.
- The array is convolved with the given kernel.
- Parameters
- ----------
- %(input)s
- weights : array_like
- Array of weights, same number of dimensions as input
- %(output)s
- %(mode)s
- cval : scalar, optional
- Value to fill past edges of input if `mode` is 'constant'. Default
- is 0.0
- %(origin_multiple)s
- Returns
- -------
- result : ndarray
- The result of convolution of `input` with `weights`.
- See Also
- --------
- correlate : Correlate an image with a kernel.
- Notes
- -----
- Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
- W is the `weights` kernel,
- j is the N-D spatial index over :math:`W`,
- I is the `input` and k is the coordinate of the center of
- W, specified by `origin` in the input parameters.
- Examples
- --------
- Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
- because in this case borders (i.e., where the `weights` kernel, centered
- on any one value, extends beyond an edge of `input`) are treated as zeros.
- >>> a = np.array([[1, 2, 0, 0],
- ... [5, 3, 0, 4],
- ... [0, 0, 0, 7],
- ... [9, 3, 0, 0]])
- >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
- >>> from scipy import ndimage
- >>> ndimage.convolve(a, k, mode='constant', cval=0.0)
- array([[11, 10, 7, 4],
- [10, 3, 11, 11],
- [15, 12, 14, 7],
- [12, 3, 7, 0]])
- Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
- with 1.0's (and then extracting only the original region of the result).
- >>> ndimage.convolve(a, k, mode='constant', cval=1.0)
- array([[13, 11, 8, 7],
- [11, 3, 11, 14],
- [16, 12, 14, 10],
- [15, 6, 10, 5]])
- With ``mode='reflect'`` (the default), outer values are reflected at the
- edge of `input` to fill in missing values.
- >>> b = np.array([[2, 0, 0],
- ... [1, 0, 0],
- ... [0, 0, 0]])
- >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
- >>> ndimage.convolve(b, k, mode='reflect')
- array([[5, 0, 0],
- [3, 0, 0],
- [1, 0, 0]])
- This includes diagonally at the corners.
- >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
- >>> ndimage.convolve(b, k)
- array([[4, 2, 0],
- [3, 2, 0],
- [1, 1, 0]])
- With ``mode='nearest'``, the single nearest value in to an edge in
- `input` is repeated as many times as needed to match the overlapping
- `weights`.
- >>> c = np.array([[2, 0, 1],
- ... [1, 0, 0],
- ... [0, 0, 0]])
- >>> k = np.array([[0, 1, 0],
- ... [0, 1, 0],
- ... [0, 1, 0],
- ... [0, 1, 0],
- ... [0, 1, 0]])
- >>> ndimage.convolve(c, k, mode='nearest')
- array([[7, 0, 3],
- [5, 0, 2],
- [3, 0, 1]])
- """
- return _correlate_or_convolve(input, weights, output, mode, cval,
- origin, True)
- @_ni_docstrings.docfiller
- def uniform_filter1d(input, size, axis=-1, output=None,
- mode="reflect", cval=0.0, origin=0):
- """Calculate a 1-D uniform filter along the given axis.
- The lines of the array along the given axis are filtered with a
- uniform filter of given size.
- Parameters
- ----------
- %(input)s
- size : int
- length of uniform filter
- %(axis)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin)s
- Examples
- --------
- >>> from scipy.ndimage import uniform_filter1d
- >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
- array([4, 3, 4, 1, 4, 6, 6, 3])
- """
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- axis = normalize_axis_index(axis, input.ndim)
- if size < 1:
- raise RuntimeError('incorrect filter size')
- output = _ni_support._get_output(output, input)
- if (size // 2 + origin < 0) or (size // 2 + origin >= size):
- raise ValueError('invalid origin')
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
- origin)
- return output
- @_ni_docstrings.docfiller
- def uniform_filter(input, size=3, output=None, mode="reflect",
- cval=0.0, origin=0):
- """Multidimensional uniform filter.
- Parameters
- ----------
- %(input)s
- size : int or sequence of ints, optional
- The sizes of the uniform filter are given for each axis as a
- sequence, or as a single number, in which case the size is
- equal for all axes.
- %(output)s
- %(mode_multiple)s
- %(cval)s
- %(origin_multiple)s
- Returns
- -------
- uniform_filter : ndarray
- Filtered array. Has the same shape as `input`.
- Notes
- -----
- The multidimensional filter is implemented as a sequence of
- 1-D uniform filters. The intermediate arrays are stored
- in the same data type as the output. Therefore, for output types
- with a limited precision, the results may be imprecise because
- intermediate results may be stored with insufficient precision.
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.uniform_filter(ascent, size=20)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- input = numpy.asarray(input)
- output = _ni_support._get_output(output, input)
- sizes = _ni_support._normalize_sequence(size, input.ndim)
- origins = _ni_support._normalize_sequence(origin, input.ndim)
- modes = _ni_support._normalize_sequence(mode, input.ndim)
- axes = list(range(input.ndim))
- axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
- for ii in range(len(axes)) if sizes[ii] > 1]
- if len(axes) > 0:
- for axis, size, origin, mode in axes:
- uniform_filter1d(input, int(size), axis, output, mode,
- cval, origin)
- input = output
- else:
- output[...] = input[...]
- return output
- @_ni_docstrings.docfiller
- def minimum_filter1d(input, size, axis=-1, output=None,
- mode="reflect", cval=0.0, origin=0):
- """Calculate a 1-D minimum filter along the given axis.
- The lines of the array along the given axis are filtered with a
- minimum filter of given size.
- Parameters
- ----------
- %(input)s
- size : int
- length along which to calculate 1D minimum
- %(axis)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin)s
- Notes
- -----
- This function implements the MINLIST algorithm [1]_, as described by
- Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
- the `input` length, regardless of filter size.
- References
- ----------
- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
- .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
- Examples
- --------
- >>> from scipy.ndimage import minimum_filter1d
- >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
- array([2, 0, 0, 0, 1, 1, 0, 0])
- """
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- axis = normalize_axis_index(axis, input.ndim)
- if size < 1:
- raise RuntimeError('incorrect filter size')
- output = _ni_support._get_output(output, input)
- if (size // 2 + origin < 0) or (size // 2 + origin >= size):
- raise ValueError('invalid origin')
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
- origin, 1)
- return output
- @_ni_docstrings.docfiller
- def maximum_filter1d(input, size, axis=-1, output=None,
- mode="reflect", cval=0.0, origin=0):
- """Calculate a 1-D maximum filter along the given axis.
- The lines of the array along the given axis are filtered with a
- maximum filter of given size.
- Parameters
- ----------
- %(input)s
- size : int
- Length along which to calculate the 1-D maximum.
- %(axis)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin)s
- Returns
- -------
- maximum1d : ndarray, None
- Maximum-filtered array with same shape as input.
- None if `output` is not None
- Notes
- -----
- This function implements the MAXLIST algorithm [1]_, as described by
- Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
- the `input` length, regardless of filter size.
- References
- ----------
- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
- .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
- Examples
- --------
- >>> from scipy.ndimage import maximum_filter1d
- >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
- array([8, 8, 8, 4, 9, 9, 9, 9])
- """
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- axis = normalize_axis_index(axis, input.ndim)
- if size < 1:
- raise RuntimeError('incorrect filter size')
- output = _ni_support._get_output(output, input)
- if (size // 2 + origin < 0) or (size // 2 + origin >= size):
- raise ValueError('invalid origin')
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
- origin, 0)
- return output
- def _min_or_max_filter(input, size, footprint, structure, output, mode,
- cval, origin, minimum):
- if (size is not None) and (footprint is not None):
- warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
- if structure is None:
- if footprint is None:
- if size is None:
- raise RuntimeError("no footprint provided")
- separable = True
- else:
- footprint = numpy.asarray(footprint, dtype=bool)
- if not footprint.any():
- raise ValueError("All-zero footprint is not supported.")
- if footprint.all():
- size = footprint.shape
- footprint = None
- separable = True
- else:
- separable = False
- else:
- structure = numpy.asarray(structure, dtype=numpy.float64)
- separable = False
- if footprint is None:
- footprint = numpy.ones(structure.shape, bool)
- else:
- footprint = numpy.asarray(footprint, dtype=bool)
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- output = _ni_support._get_output(output, input)
- temp_needed = numpy.shares_memory(input, output)
- if temp_needed:
- # input and output arrays cannot share memory
- temp = output
- output = _ni_support._get_output(output.dtype, input)
- origins = _ni_support._normalize_sequence(origin, input.ndim)
- if separable:
- sizes = _ni_support._normalize_sequence(size, input.ndim)
- modes = _ni_support._normalize_sequence(mode, input.ndim)
- axes = list(range(input.ndim))
- axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
- for ii in range(len(axes)) if sizes[ii] > 1]
- if minimum:
- filter_ = minimum_filter1d
- else:
- filter_ = maximum_filter1d
- if len(axes) > 0:
- for axis, size, origin, mode in axes:
- filter_(input, int(size), axis, output, mode, cval, origin)
- input = output
- else:
- output[...] = input[...]
- else:
- fshape = [ii for ii in footprint.shape if ii > 0]
- if len(fshape) != input.ndim:
- raise RuntimeError('footprint array has incorrect shape.')
- for origin, lenf in zip(origins, fshape):
- if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
- raise ValueError('invalid origin')
- if not footprint.flags.contiguous:
- footprint = footprint.copy()
- if structure is not None:
- if len(structure.shape) != input.ndim:
- raise RuntimeError('structure array has incorrect shape')
- if not structure.flags.contiguous:
- structure = structure.copy()
- if not isinstance(mode, str) and isinstance(mode, Iterable):
- raise RuntimeError(
- "A sequence of modes is not supported for non-separable "
- "footprints")
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.min_or_max_filter(input, footprint, structure, output,
- mode, cval, origins, minimum)
- if temp_needed:
- temp[...] = output
- output = temp
- return output
- @_ni_docstrings.docfiller
- def minimum_filter(input, size=None, footprint=None, output=None,
- mode="reflect", cval=0.0, origin=0):
- """Calculate a multidimensional minimum filter.
- Parameters
- ----------
- %(input)s
- %(size_foot)s
- %(output)s
- %(mode_multiple)s
- %(cval)s
- %(origin_multiple)s
- Returns
- -------
- minimum_filter : ndarray
- Filtered array. Has the same shape as `input`.
- Notes
- -----
- A sequence of modes (one per axis) is only supported when the footprint is
- separable. Otherwise, a single mode string must be provided.
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.minimum_filter(ascent, size=20)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- return _min_or_max_filter(input, size, footprint, None, output, mode,
- cval, origin, 1)
- @_ni_docstrings.docfiller
- def maximum_filter(input, size=None, footprint=None, output=None,
- mode="reflect", cval=0.0, origin=0):
- """Calculate a multidimensional maximum filter.
- Parameters
- ----------
- %(input)s
- %(size_foot)s
- %(output)s
- %(mode_multiple)s
- %(cval)s
- %(origin_multiple)s
- Returns
- -------
- maximum_filter : ndarray
- Filtered array. Has the same shape as `input`.
- Notes
- -----
- A sequence of modes (one per axis) is only supported when the footprint is
- separable. Otherwise, a single mode string must be provided.
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.maximum_filter(ascent, size=20)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- return _min_or_max_filter(input, size, footprint, None, output, mode,
- cval, origin, 0)
- @_ni_docstrings.docfiller
- def _rank_filter(input, rank, size=None, footprint=None, output=None,
- mode="reflect", cval=0.0, origin=0, operation='rank'):
- if (size is not None) and (footprint is not None):
- warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- origins = _ni_support._normalize_sequence(origin, input.ndim)
- if footprint is None:
- if size is None:
- raise RuntimeError("no footprint or filter size provided")
- sizes = _ni_support._normalize_sequence(size, input.ndim)
- footprint = numpy.ones(sizes, dtype=bool)
- else:
- footprint = numpy.asarray(footprint, dtype=bool)
- fshape = [ii for ii in footprint.shape if ii > 0]
- if len(fshape) != input.ndim:
- raise RuntimeError('filter footprint array has incorrect shape.')
- for origin, lenf in zip(origins, fshape):
- if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
- raise ValueError('invalid origin')
- if not footprint.flags.contiguous:
- footprint = footprint.copy()
- filter_size = numpy.where(footprint, 1, 0).sum()
- if operation == 'median':
- rank = filter_size // 2
- elif operation == 'percentile':
- percentile = rank
- if percentile < 0.0:
- percentile += 100.0
- if percentile < 0 or percentile > 100:
- raise RuntimeError('invalid percentile')
- if percentile == 100.0:
- rank = filter_size - 1
- else:
- rank = int(float(filter_size) * percentile / 100.0)
- if rank < 0:
- rank += filter_size
- if rank < 0 or rank >= filter_size:
- raise RuntimeError('rank not within filter footprint size')
- if rank == 0:
- return minimum_filter(input, None, footprint, output, mode, cval,
- origins)
- elif rank == filter_size - 1:
- return maximum_filter(input, None, footprint, output, mode, cval,
- origins)
- else:
- output = _ni_support._get_output(output, input)
- temp_needed = numpy.shares_memory(input, output)
- if temp_needed:
- # input and output arrays cannot share memory
- temp = output
- output = _ni_support._get_output(output.dtype, input)
- if not isinstance(mode, str) and isinstance(mode, Iterable):
- raise RuntimeError(
- "A sequence of modes is not supported by non-separable rank "
- "filters")
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.rank_filter(input, rank, footprint, output, mode, cval,
- origins)
- if temp_needed:
- temp[...] = output
- output = temp
- return output
- @_ni_docstrings.docfiller
- def rank_filter(input, rank, size=None, footprint=None, output=None,
- mode="reflect", cval=0.0, origin=0):
- """Calculate a multidimensional rank filter.
- Parameters
- ----------
- %(input)s
- rank : int
- The rank parameter may be less then zero, i.e., rank = -1
- indicates the largest element.
- %(size_foot)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin_multiple)s
- Returns
- -------
- rank_filter : ndarray
- Filtered array. Has the same shape as `input`.
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.rank_filter(ascent, rank=42, size=20)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- rank = operator.index(rank)
- return _rank_filter(input, rank, size, footprint, output, mode, cval,
- origin, 'rank')
- @_ni_docstrings.docfiller
- def median_filter(input, size=None, footprint=None, output=None,
- mode="reflect", cval=0.0, origin=0):
- """
- Calculate a multidimensional median filter.
- Parameters
- ----------
- %(input)s
- %(size_foot)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin_multiple)s
- Returns
- -------
- median_filter : ndarray
- Filtered array. Has the same shape as `input`.
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.median_filter(ascent, size=20)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- return _rank_filter(input, 0, size, footprint, output, mode, cval,
- origin, 'median')
- @_ni_docstrings.docfiller
- def percentile_filter(input, percentile, size=None, footprint=None,
- output=None, mode="reflect", cval=0.0, origin=0):
- """Calculate a multidimensional percentile filter.
- Parameters
- ----------
- %(input)s
- percentile : scalar
- The percentile parameter may be less then zero, i.e.,
- percentile = -20 equals percentile = 80
- %(size_foot)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin_multiple)s
- Returns
- -------
- percentile_filter : ndarray
- Filtered array. Has the same shape as `input`.
- Examples
- --------
- >>> from scipy import ndimage, misc
- >>> import matplotlib.pyplot as plt
- >>> fig = plt.figure()
- >>> plt.gray() # show the filtered result in grayscale
- >>> ax1 = fig.add_subplot(121) # left side
- >>> ax2 = fig.add_subplot(122) # right side
- >>> ascent = misc.ascent()
- >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
- >>> ax1.imshow(ascent)
- >>> ax2.imshow(result)
- >>> plt.show()
- """
- return _rank_filter(input, percentile, size, footprint, output, mode,
- cval, origin, 'percentile')
- @_ni_docstrings.docfiller
- def generic_filter1d(input, function, filter_size, axis=-1,
- output=None, mode="reflect", cval=0.0, origin=0,
- extra_arguments=(), extra_keywords=None):
- """Calculate a 1-D filter along the given axis.
- `generic_filter1d` iterates over the lines of the array, calling the
- given function at each line. The arguments of the line are the
- input line, and the output line. The input and output lines are 1-D
- double arrays. The input line is extended appropriately according
- to the filter size and origin. The output line must be modified
- in-place with the result.
- Parameters
- ----------
- %(input)s
- function : {callable, scipy.LowLevelCallable}
- Function to apply along given axis.
- filter_size : scalar
- Length of the filter.
- %(axis)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin)s
- %(extra_arguments)s
- %(extra_keywords)s
- Notes
- -----
- This function also accepts low-level callback functions with one of
- the following signatures and wrapped in `scipy.LowLevelCallable`:
- .. code:: c
- int function(double *input_line, npy_intp input_length,
- double *output_line, npy_intp output_length,
- void *user_data)
- int function(double *input_line, intptr_t input_length,
- double *output_line, intptr_t output_length,
- void *user_data)
- The calling function iterates over the lines of the input and output
- arrays, calling the callback function at each line. The current line
- is extended according to the border conditions set by the calling
- function, and the result is copied into the array that is passed
- through ``input_line``. The length of the input line (after extension)
- is passed through ``input_length``. The callback function should apply
- the filter and store the result in the array passed through
- ``output_line``. The length of the output line is passed through
- ``output_length``. ``user_data`` is the data pointer provided
- to `scipy.LowLevelCallable` as-is.
- The callback function must return an integer error status that is zero
- if something went wrong and one otherwise. If an error occurs, you should
- normally set the python error status with an informative message
- before returning, otherwise a default error message is set by the
- calling function.
- In addition, some other low-level function pointer specifications
- are accepted, but these are for backward compatibility only and should
- not be used in new code.
- """
- if extra_keywords is None:
- extra_keywords = {}
- input = numpy.asarray(input)
- if numpy.iscomplexobj(input):
- raise TypeError('Complex type not supported')
- output = _ni_support._get_output(output, input)
- if filter_size < 1:
- raise RuntimeError('invalid filter size')
- axis = normalize_axis_index(axis, input.ndim)
- if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
- filter_size):
- raise ValueError('invalid origin')
- mode = _ni_support._extend_mode_to_code(mode)
- _nd_image.generic_filter1d(input, function, filter_size, axis, output,
- mode, cval, origin, extra_arguments,
- extra_keywords)
- return output
- @_ni_docstrings.docfiller
- def generic_filter(input, function, size=None, footprint=None,
- output=None, mode="reflect", cval=0.0, origin=0,
- extra_arguments=(), extra_keywords=None):
- """Calculate a multidimensional filter using the given function.
- At each element the provided function is called. The input values
- within the filter footprint at that element are passed to the function
- as a 1-D array of double values.
- Parameters
- ----------
- %(input)s
- function : {callable, scipy.LowLevelCallable}
- Function to apply at each element.
- %(size_foot)s
- %(output)s
- %(mode)s
- %(cval)s
- %(origin_multiple)s
- %(extra_arguments)s
- %(extra_keywords)s
- Notes
- -----
- This function also accepts low-level callback functions with one of
- the following signatures and wrapped in `scipy.LowLevelCallable`:
- .. code:: c
- int callback(double *buffer, npy_intp filter_size,
- double *return_value, void *user_data)
- int callback(double *buffer, intptr_t filter_size,
- double *return_value,…