You cannot select more than 25 topics
			Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
		
		
		
		
		
			
		
			
				
	
	
		
			1787 lines
		
	
	
		
			68 KiB
		
	
	
	
		
			Python
		
	
			
		
		
	
	
			1787 lines
		
	
	
		
			68 KiB
		
	
	
	
		
			Python
		
	
"""
 | 
						|
The image module supports basic image loading, rescaling and display
 | 
						|
operations.
 | 
						|
"""
 | 
						|
 | 
						|
import math
 | 
						|
import os
 | 
						|
import logging
 | 
						|
from pathlib import Path
 | 
						|
import warnings
 | 
						|
 | 
						|
import numpy as np
 | 
						|
import PIL.Image
 | 
						|
import PIL.PngImagePlugin
 | 
						|
 | 
						|
import matplotlib as mpl
 | 
						|
from matplotlib import _api, cbook
 | 
						|
# For clarity, names from _image are given explicitly in this module
 | 
						|
from matplotlib import _image
 | 
						|
# For user convenience, the names from _image are also imported into
 | 
						|
# the image namespace
 | 
						|
from matplotlib._image import *  # noqa: F401, F403
 | 
						|
import matplotlib.artist as martist
 | 
						|
import matplotlib.colorizer as mcolorizer
 | 
						|
from matplotlib.backend_bases import FigureCanvasBase
 | 
						|
import matplotlib.colors as mcolors
 | 
						|
from matplotlib.transforms import (
 | 
						|
    Affine2D, BboxBase, Bbox, BboxTransform, BboxTransformTo,
 | 
						|
    IdentityTransform, TransformedBbox)
 | 
						|
 | 
						|
_log = logging.getLogger(__name__)
 | 
						|
 | 
						|
# map interpolation strings to module constants
 | 
						|
_interpd_ = {
 | 
						|
    'auto': _image.NEAREST,  # this will use nearest or Hanning...
 | 
						|
    'none': _image.NEAREST,  # fall back to nearest when not supported
 | 
						|
    'nearest': _image.NEAREST,
 | 
						|
    'bilinear': _image.BILINEAR,
 | 
						|
    'bicubic': _image.BICUBIC,
 | 
						|
    'spline16': _image.SPLINE16,
 | 
						|
    'spline36': _image.SPLINE36,
 | 
						|
    'hanning': _image.HANNING,
 | 
						|
    'hamming': _image.HAMMING,
 | 
						|
    'hermite': _image.HERMITE,
 | 
						|
    'kaiser': _image.KAISER,
 | 
						|
    'quadric': _image.QUADRIC,
 | 
						|
    'catrom': _image.CATROM,
 | 
						|
    'gaussian': _image.GAUSSIAN,
 | 
						|
    'bessel': _image.BESSEL,
 | 
						|
    'mitchell': _image.MITCHELL,
 | 
						|
    'sinc': _image.SINC,
 | 
						|
    'lanczos': _image.LANCZOS,
 | 
						|
    'blackman': _image.BLACKMAN,
 | 
						|
    'antialiased': _image.NEAREST,  # this will use nearest or Hanning...
 | 
						|
}
 | 
						|
 | 
						|
interpolations_names = set(_interpd_)
 | 
						|
 | 
						|
 | 
						|
def composite_images(images, renderer, magnification=1.0):
 | 
						|
    """
 | 
						|
    Composite a number of RGBA images into one.  The images are
 | 
						|
    composited in the order in which they appear in the *images* list.
 | 
						|
 | 
						|
    Parameters
 | 
						|
    ----------
 | 
						|
    images : list of Images
 | 
						|
        Each must have a `make_image` method.  For each image,
 | 
						|
        `can_composite` should return `True`, though this is not
 | 
						|
        enforced by this function.  Each image must have a purely
 | 
						|
        affine transformation with no shear.
 | 
						|
 | 
						|
    renderer : `.RendererBase`
 | 
						|
 | 
						|
    magnification : float, default: 1
 | 
						|
        The additional magnification to apply for the renderer in use.
 | 
						|
 | 
						|
    Returns
 | 
						|
    -------
 | 
						|
    image : (M, N, 4) `numpy.uint8` array
 | 
						|
        The composited RGBA image.
 | 
						|
    offset_x, offset_y : float
 | 
						|
        The (left, bottom) offset where the composited image should be placed
 | 
						|
        in the output figure.
 | 
						|
    """
 | 
						|
    if len(images) == 0:
 | 
						|
        return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
 | 
						|
 | 
						|
    parts = []
 | 
						|
    bboxes = []
 | 
						|
    for image in images:
 | 
						|
        data, x, y, trans = image.make_image(renderer, magnification)
 | 
						|
        if data is not None:
 | 
						|
            x *= magnification
 | 
						|
            y *= magnification
 | 
						|
            parts.append((data, x, y, image._get_scalar_alpha()))
 | 
						|
            bboxes.append(
 | 
						|
                Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))
 | 
						|
 | 
						|
    if len(parts) == 0:
 | 
						|
        return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
 | 
						|
 | 
						|
    bbox = Bbox.union(bboxes)
 | 
						|
 | 
						|
    output = np.zeros(
 | 
						|
        (int(bbox.height), int(bbox.width), 4), dtype=np.uint8)
 | 
						|
 | 
						|
    for data, x, y, alpha in parts:
 | 
						|
        trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)
 | 
						|
        _image.resample(data, output, trans, _image.NEAREST,
 | 
						|
                        resample=False, alpha=alpha)
 | 
						|
 | 
						|
    return output, bbox.x0 / magnification, bbox.y0 / magnification
 | 
						|
 | 
						|
 | 
						|
def _draw_list_compositing_images(
 | 
						|
        renderer, parent, artists, suppress_composite=None):
 | 
						|
    """
 | 
						|
    Draw a sorted list of artists, compositing images into a single
 | 
						|
    image where possible.
 | 
						|
 | 
						|
    For internal Matplotlib use only: It is here to reduce duplication
 | 
						|
    between `Figure.draw` and `Axes.draw`, but otherwise should not be
 | 
						|
    generally useful.
 | 
						|
    """
 | 
						|
    has_images = any(isinstance(x, _ImageBase) for x in artists)
 | 
						|
 | 
						|
    # override the renderer default if suppressComposite is not None
 | 
						|
    not_composite = (suppress_composite if suppress_composite is not None
 | 
						|
                     else renderer.option_image_nocomposite())
 | 
						|
 | 
						|
    if not_composite or not has_images:
 | 
						|
        for a in artists:
 | 
						|
            a.draw(renderer)
 | 
						|
    else:
 | 
						|
        # Composite any adjacent images together
 | 
						|
        image_group = []
 | 
						|
        mag = renderer.get_image_magnification()
 | 
						|
 | 
						|
        def flush_images():
 | 
						|
            if len(image_group) == 1:
 | 
						|
                image_group[0].draw(renderer)
 | 
						|
            elif len(image_group) > 1:
 | 
						|
                data, l, b = composite_images(image_group, renderer, mag)
 | 
						|
                if data.size != 0:
 | 
						|
                    gc = renderer.new_gc()
 | 
						|
                    gc.set_clip_rectangle(parent.bbox)
 | 
						|
                    gc.set_clip_path(parent.get_clip_path())
 | 
						|
                    renderer.draw_image(gc, round(l), round(b), data)
 | 
						|
                    gc.restore()
 | 
						|
            del image_group[:]
 | 
						|
 | 
						|
        for a in artists:
 | 
						|
            if (isinstance(a, _ImageBase) and a.can_composite() and
 | 
						|
                    a.get_clip_on() and not a.get_clip_path()):
 | 
						|
                image_group.append(a)
 | 
						|
            else:
 | 
						|
                flush_images()
 | 
						|
                a.draw(renderer)
 | 
						|
        flush_images()
 | 
						|
 | 
						|
 | 
						|
def _resample(
 | 
						|
        image_obj, data, out_shape, transform, *, resample=None, alpha=1):
 | 
						|
    """
 | 
						|
    Convenience wrapper around `._image.resample` to resample *data* to
 | 
						|
    *out_shape* (with a third dimension if *data* is RGBA) that takes care of
 | 
						|
    allocating the output array and fetching the relevant properties from the
 | 
						|
    Image object *image_obj*.
 | 
						|
    """
 | 
						|
    # AGG can only handle coordinates smaller than 24-bit signed integers,
 | 
						|
    # so raise errors if the input data is larger than _image.resample can
 | 
						|
    # handle.
 | 
						|
    msg = ('Data with more than {n} cannot be accurately displayed. '
 | 
						|
           'Downsampling to less than {n} before displaying. '
 | 
						|
           'To remove this warning, manually downsample your data.')
 | 
						|
    if data.shape[1] > 2**23:
 | 
						|
        warnings.warn(msg.format(n='2**23 columns'))
 | 
						|
        step = int(np.ceil(data.shape[1] / 2**23))
 | 
						|
        data = data[:, ::step]
 | 
						|
        transform = Affine2D().scale(step, 1) + transform
 | 
						|
    if data.shape[0] > 2**24:
 | 
						|
        warnings.warn(msg.format(n='2**24 rows'))
 | 
						|
        step = int(np.ceil(data.shape[0] / 2**24))
 | 
						|
        data = data[::step, :]
 | 
						|
        transform = Affine2D().scale(1, step) + transform
 | 
						|
    # decide if we need to apply anti-aliasing if the data is upsampled:
 | 
						|
    # compare the number of displayed pixels to the number of
 | 
						|
    # the data pixels.
 | 
						|
    interpolation = image_obj.get_interpolation()
 | 
						|
    if interpolation in ['antialiased', 'auto']:
 | 
						|
        # don't antialias if upsampling by an integer number or
 | 
						|
        # if zooming in more than a factor of 3
 | 
						|
        pos = np.array([[0, 0], [data.shape[1], data.shape[0]]])
 | 
						|
        disp = transform.transform(pos)
 | 
						|
        dispx = np.abs(np.diff(disp[:, 0]))
 | 
						|
        dispy = np.abs(np.diff(disp[:, 1]))
 | 
						|
        if ((dispx > 3 * data.shape[1] or
 | 
						|
                dispx == data.shape[1] or
 | 
						|
                dispx == 2 * data.shape[1]) and
 | 
						|
            (dispy > 3 * data.shape[0] or
 | 
						|
                dispy == data.shape[0] or
 | 
						|
                dispy == 2 * data.shape[0])):
 | 
						|
            interpolation = 'nearest'
 | 
						|
        else:
 | 
						|
            interpolation = 'hanning'
 | 
						|
    out = np.zeros(out_shape + data.shape[2:], data.dtype)  # 2D->2D, 3D->3D.
 | 
						|
    if resample is None:
 | 
						|
        resample = image_obj.get_resample()
 | 
						|
    _image.resample(data, out, transform,
 | 
						|
                    _interpd_[interpolation],
 | 
						|
                    resample,
 | 
						|
                    alpha,
 | 
						|
                    image_obj.get_filternorm(),
 | 
						|
                    image_obj.get_filterrad())
 | 
						|
    return out
 | 
						|
 | 
						|
 | 
						|
def _rgb_to_rgba(A):
 | 
						|
    """
 | 
						|
    Convert an RGB image to RGBA, as required by the image resample C++
 | 
						|
    extension.
 | 
						|
    """
 | 
						|
    rgba = np.zeros((A.shape[0], A.shape[1], 4), dtype=A.dtype)
 | 
						|
    rgba[:, :, :3] = A
 | 
						|
    if rgba.dtype == np.uint8:
 | 
						|
        rgba[:, :, 3] = 255
 | 
						|
    else:
 | 
						|
        rgba[:, :, 3] = 1.0
 | 
						|
    return rgba
 | 
						|
 | 
						|
 | 
						|
class _ImageBase(mcolorizer.ColorizingArtist):
 | 
						|
    """
 | 
						|
    Base class for images.
 | 
						|
 | 
						|
    interpolation and cmap default to their rc settings
 | 
						|
 | 
						|
    cmap is a colors.Colormap instance
 | 
						|
    norm is a colors.Normalize instance to map luminance to 0-1
 | 
						|
 | 
						|
    extent is data axes (left, right, bottom, top) for making image plots
 | 
						|
    registered with data plots.  Default is to label the pixel
 | 
						|
    centers with the zero-based row and column indices.
 | 
						|
 | 
						|
    Additional kwargs are matplotlib.artist properties
 | 
						|
    """
 | 
						|
    zorder = 0
 | 
						|
 | 
						|
    def __init__(self, ax,
 | 
						|
                 cmap=None,
 | 
						|
                 norm=None,
 | 
						|
                 colorizer=None,
 | 
						|
                 interpolation=None,
 | 
						|
                 origin=None,
 | 
						|
                 filternorm=True,
 | 
						|
                 filterrad=4.0,
 | 
						|
                 resample=False,
 | 
						|
                 *,
 | 
						|
                 interpolation_stage=None,
 | 
						|
                 **kwargs
 | 
						|
                 ):
 | 
						|
        super().__init__(self._get_colorizer(cmap, norm, colorizer))
 | 
						|
        if origin is None:
 | 
						|
            origin = mpl.rcParams['image.origin']
 | 
						|
        _api.check_in_list(["upper", "lower"], origin=origin)
 | 
						|
        self.origin = origin
 | 
						|
        self.set_filternorm(filternorm)
 | 
						|
        self.set_filterrad(filterrad)
 | 
						|
        self.set_interpolation(interpolation)
 | 
						|
        self.set_interpolation_stage(interpolation_stage)
 | 
						|
        self.set_resample(resample)
 | 
						|
        self.axes = ax
 | 
						|
 | 
						|
        self._imcache = None
 | 
						|
 | 
						|
        self._internal_update(kwargs)
 | 
						|
 | 
						|
    def __str__(self):
 | 
						|
        try:
 | 
						|
            shape = self.get_shape()
 | 
						|
            return f"{type(self).__name__}(shape={shape!r})"
 | 
						|
        except RuntimeError:
 | 
						|
            return type(self).__name__
 | 
						|
 | 
						|
    def __getstate__(self):
 | 
						|
        # Save some space on the pickle by not saving the cache.
 | 
						|
        return {**super().__getstate__(), "_imcache": None}
 | 
						|
 | 
						|
    def get_size(self):
 | 
						|
        """Return the size of the image as tuple (numrows, numcols)."""
 | 
						|
        return self.get_shape()[:2]
 | 
						|
 | 
						|
    def get_shape(self):
 | 
						|
        """
 | 
						|
        Return the shape of the image as tuple (numrows, numcols, channels).
 | 
						|
        """
 | 
						|
        if self._A is None:
 | 
						|
            raise RuntimeError('You must first set the image array')
 | 
						|
 | 
						|
        return self._A.shape
 | 
						|
 | 
						|
    def set_alpha(self, alpha):
 | 
						|
        """
 | 
						|
        Set the alpha value used for blending - not supported on all backends.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        alpha : float or 2D array-like or None
 | 
						|
        """
 | 
						|
        martist.Artist._set_alpha_for_array(self, alpha)
 | 
						|
        if np.ndim(alpha) not in (0, 2):
 | 
						|
            raise TypeError('alpha must be a float, two-dimensional '
 | 
						|
                            'array, or None')
 | 
						|
        self._imcache = None
 | 
						|
 | 
						|
    def _get_scalar_alpha(self):
 | 
						|
        """
 | 
						|
        Get a scalar alpha value to be applied to the artist as a whole.
 | 
						|
 | 
						|
        If the alpha value is a matrix, the method returns 1.0 because pixels
 | 
						|
        have individual alpha values (see `~._ImageBase._make_image` for
 | 
						|
        details). If the alpha value is a scalar, the method returns said value
 | 
						|
        to be applied to the artist as a whole because pixels do not have
 | 
						|
        individual alpha values.
 | 
						|
        """
 | 
						|
        return 1.0 if self._alpha is None or np.ndim(self._alpha) > 0 \
 | 
						|
            else self._alpha
 | 
						|
 | 
						|
    def changed(self):
 | 
						|
        """
 | 
						|
        Call this whenever the mappable is changed so observers can update.
 | 
						|
        """
 | 
						|
        self._imcache = None
 | 
						|
        super().changed()
 | 
						|
 | 
						|
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
 | 
						|
                    unsampled=False, round_to_pixel_border=True):
 | 
						|
        """
 | 
						|
        Normalize, rescale, and colormap the image *A* from the given *in_bbox*
 | 
						|
        (in data space), to the given *out_bbox* (in pixel space) clipped to
 | 
						|
        the given *clip_bbox* (also in pixel space), and magnified by the
 | 
						|
        *magnification* factor.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        A : ndarray
 | 
						|
 | 
						|
            - a (M, N) array interpreted as scalar (greyscale) image,
 | 
						|
              with one of the dtypes `~numpy.float32`, `~numpy.float64`,
 | 
						|
              `~numpy.float128`, `~numpy.uint16` or `~numpy.uint8`.
 | 
						|
            - (M, N, 4) RGBA image with a dtype of `~numpy.float32`,
 | 
						|
              `~numpy.float64`, `~numpy.float128`, or `~numpy.uint8`.
 | 
						|
 | 
						|
        in_bbox : `~matplotlib.transforms.Bbox`
 | 
						|
 | 
						|
        out_bbox : `~matplotlib.transforms.Bbox`
 | 
						|
 | 
						|
        clip_bbox : `~matplotlib.transforms.Bbox`
 | 
						|
 | 
						|
        magnification : float, default: 1
 | 
						|
 | 
						|
        unsampled : bool, default: False
 | 
						|
            If True, the image will not be scaled, but an appropriate
 | 
						|
            affine transformation will be returned instead.
 | 
						|
 | 
						|
        round_to_pixel_border : bool, default: True
 | 
						|
            If True, the output image size will be rounded to the nearest pixel
 | 
						|
            boundary.  This makes the images align correctly with the Axes.
 | 
						|
            It should not be used if exact scaling is needed, such as for
 | 
						|
            `.FigureImage`.
 | 
						|
 | 
						|
        Returns
 | 
						|
        -------
 | 
						|
        image : (M, N, 4) `numpy.uint8` array
 | 
						|
            The RGBA image, resampled unless *unsampled* is True.
 | 
						|
        x, y : float
 | 
						|
            The upper left corner where the image should be drawn, in pixel
 | 
						|
            space.
 | 
						|
        trans : `~matplotlib.transforms.Affine2D`
 | 
						|
            The affine transformation from image to pixel space.
 | 
						|
        """
 | 
						|
        if A is None:
 | 
						|
            raise RuntimeError('You must first set the image '
 | 
						|
                               'array or the image attribute')
 | 
						|
        if A.size == 0:
 | 
						|
            raise RuntimeError("_make_image must get a non-empty image. "
 | 
						|
                               "Your Artist's draw method must filter before "
 | 
						|
                               "this method is called.")
 | 
						|
 | 
						|
        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)
 | 
						|
 | 
						|
        if clipped_bbox is None:
 | 
						|
            return None, 0, 0, None
 | 
						|
 | 
						|
        out_width_base = clipped_bbox.width * magnification
 | 
						|
        out_height_base = clipped_bbox.height * magnification
 | 
						|
 | 
						|
        if out_width_base == 0 or out_height_base == 0:
 | 
						|
            return None, 0, 0, None
 | 
						|
 | 
						|
        if self.origin == 'upper':
 | 
						|
            # Flip the input image using a transform.  This avoids the
 | 
						|
            # problem with flipping the array, which results in a copy
 | 
						|
            # when it is converted to contiguous in the C wrapper
 | 
						|
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
 | 
						|
        else:
 | 
						|
            t0 = IdentityTransform()
 | 
						|
 | 
						|
        t0 += (
 | 
						|
            Affine2D()
 | 
						|
            .scale(
 | 
						|
                in_bbox.width / A.shape[1],
 | 
						|
                in_bbox.height / A.shape[0])
 | 
						|
            .translate(in_bbox.x0, in_bbox.y0)
 | 
						|
            + self.get_transform())
 | 
						|
 | 
						|
        t = (t0
 | 
						|
             + (Affine2D()
 | 
						|
                .translate(-clipped_bbox.x0, -clipped_bbox.y0)
 | 
						|
                .scale(magnification)))
 | 
						|
 | 
						|
        # So that the image is aligned with the edge of the Axes, we want to
 | 
						|
        # round up the output width to the next integer.  This also means
 | 
						|
        # scaling the transform slightly to account for the extra subpixel.
 | 
						|
        if ((not unsampled) and t.is_affine and round_to_pixel_border and
 | 
						|
                (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
 | 
						|
            out_width = math.ceil(out_width_base)
 | 
						|
            out_height = math.ceil(out_height_base)
 | 
						|
            extra_width = (out_width - out_width_base) / out_width_base
 | 
						|
            extra_height = (out_height - out_height_base) / out_height_base
 | 
						|
            t += Affine2D().scale(1.0 + extra_width, 1.0 + extra_height)
 | 
						|
        else:
 | 
						|
            out_width = int(out_width_base)
 | 
						|
            out_height = int(out_height_base)
 | 
						|
        out_shape = (out_height, out_width)
 | 
						|
 | 
						|
        if not unsampled:
 | 
						|
            if not (A.ndim == 2 or A.ndim == 3 and A.shape[-1] in (3, 4)):
 | 
						|
                raise ValueError(f"Invalid shape {A.shape} for image data")
 | 
						|
 | 
						|
            # if antialiased, this needs to change as window sizes
 | 
						|
            # change:
 | 
						|
            interpolation_stage = self._interpolation_stage
 | 
						|
            if interpolation_stage in ['antialiased', 'auto']:
 | 
						|
                pos = np.array([[0, 0], [A.shape[1], A.shape[0]]])
 | 
						|
                disp = t.transform(pos)
 | 
						|
                dispx = np.abs(np.diff(disp[:, 0])) / A.shape[1]
 | 
						|
                dispy = np.abs(np.diff(disp[:, 1])) / A.shape[0]
 | 
						|
                if (dispx < 3) or (dispy < 3):
 | 
						|
                    interpolation_stage = 'rgba'
 | 
						|
                else:
 | 
						|
                    interpolation_stage = 'data'
 | 
						|
 | 
						|
            if A.ndim == 2 and interpolation_stage == 'data':
 | 
						|
                # if we are a 2D array, then we are running through the
 | 
						|
                # norm + colormap transformation.  However, in general the
 | 
						|
                # input data is not going to match the size on the screen so we
 | 
						|
                # have to resample to the correct number of pixels
 | 
						|
 | 
						|
                if A.dtype.kind == 'f':  # Float dtype: scale to same dtype.
 | 
						|
                    scaled_dtype = np.dtype("f8" if A.dtype.itemsize > 4 else "f4")
 | 
						|
                    if scaled_dtype.itemsize < A.dtype.itemsize:
 | 
						|
                        _api.warn_external(f"Casting input data from {A.dtype}"
 | 
						|
                                           f" to {scaled_dtype} for imshow.")
 | 
						|
                else:  # Int dtype, likely.
 | 
						|
                    # TODO slice input array first
 | 
						|
                    # Scale to appropriately sized float: use float32 if the
 | 
						|
                    # dynamic range is small, to limit the memory footprint.
 | 
						|
                    da = A.max().astype("f8") - A.min().astype("f8")
 | 
						|
                    scaled_dtype = "f8" if da > 1e8 else "f4"
 | 
						|
 | 
						|
                # resample the input data to the correct resolution and shape
 | 
						|
                A_resampled = _resample(self, A.astype(scaled_dtype), out_shape, t)
 | 
						|
 | 
						|
                # if using NoNorm, cast back to the original datatype
 | 
						|
                if isinstance(self.norm, mcolors.NoNorm):
 | 
						|
                    A_resampled = A_resampled.astype(A.dtype)
 | 
						|
 | 
						|
                # Compute out_mask (what screen pixels include "bad" data
 | 
						|
                # pixels) and out_alpha (to what extent screen pixels are
 | 
						|
                # covered by data pixels: 0 outside the data extent, 1 inside
 | 
						|
                # (even for bad data), and intermediate values at the edges).
 | 
						|
                mask = (np.where(A.mask, np.float32(np.nan), np.float32(1))
 | 
						|
                        if A.mask.shape == A.shape  # nontrivial mask
 | 
						|
                        else np.ones_like(A, np.float32))
 | 
						|
                # we always have to interpolate the mask to account for
 | 
						|
                # non-affine transformations
 | 
						|
                out_alpha = _resample(self, mask, out_shape, t, resample=True)
 | 
						|
                del mask  # Make sure we don't use mask anymore!
 | 
						|
                out_mask = np.isnan(out_alpha)
 | 
						|
                out_alpha[out_mask] = 1
 | 
						|
                # Apply the pixel-by-pixel alpha values if present
 | 
						|
                alpha = self.get_alpha()
 | 
						|
                if alpha is not None and np.ndim(alpha) > 0:
 | 
						|
                    out_alpha *= _resample(self, alpha, out_shape, t, resample=True)
 | 
						|
                # mask and run through the norm
 | 
						|
                resampled_masked = np.ma.masked_array(A_resampled, out_mask)
 | 
						|
                output = self.norm(resampled_masked)
 | 
						|
            else:
 | 
						|
                if A.ndim == 2:  # interpolation_stage = 'rgba'
 | 
						|
                    self.norm.autoscale_None(A)
 | 
						|
                    A = self.to_rgba(A)
 | 
						|
                alpha = self.get_alpha()
 | 
						|
                if alpha is None:  # alpha parameter not specified
 | 
						|
                    if A.shape[2] == 3:  # image has no alpha channel
 | 
						|
                        output_alpha = 255 if A.dtype == np.uint8 else 1.0
 | 
						|
                    else:
 | 
						|
                        output_alpha = _resample(  # resample alpha channel
 | 
						|
                            self, A[..., 3], out_shape, t)
 | 
						|
                    output = _resample(  # resample rgb channels
 | 
						|
                        self, _rgb_to_rgba(A[..., :3]), out_shape, t)
 | 
						|
                elif np.ndim(alpha) > 0:  # Array alpha
 | 
						|
                    # user-specified array alpha overrides the existing alpha channel
 | 
						|
                    output_alpha = _resample(self, alpha, out_shape, t)
 | 
						|
                    output = _resample(
 | 
						|
                        self, _rgb_to_rgba(A[..., :3]), out_shape, t)
 | 
						|
                else:  # Scalar alpha
 | 
						|
                    if A.shape[2] == 3:  # broadcast scalar alpha
 | 
						|
                        output_alpha = (255 * alpha) if A.dtype == np.uint8 else alpha
 | 
						|
                    else:  # or apply scalar alpha to existing alpha channel
 | 
						|
                        output_alpha = _resample(self, A[..., 3], out_shape, t) * alpha
 | 
						|
                    output = _resample(
 | 
						|
                        self, _rgb_to_rgba(A[..., :3]), out_shape, t)
 | 
						|
                output[..., 3] = output_alpha  # recombine rgb and alpha
 | 
						|
 | 
						|
            # output is now either a 2D array of normed (int or float) data
 | 
						|
            # or an RGBA array of re-sampled input
 | 
						|
            output = self.to_rgba(output, bytes=True, norm=False)
 | 
						|
            # output is now a correctly sized RGBA array of uint8
 | 
						|
 | 
						|
            # Apply alpha *after* if the input was greyscale without a mask
 | 
						|
            if A.ndim == 2:
 | 
						|
                alpha = self._get_scalar_alpha()
 | 
						|
                alpha_channel = output[:, :, 3]
 | 
						|
                alpha_channel[:] = (  # Assignment will cast to uint8.
 | 
						|
                    alpha_channel.astype(np.float32) * out_alpha * alpha)
 | 
						|
 | 
						|
        else:
 | 
						|
            if self._imcache is None:
 | 
						|
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
 | 
						|
            output = self._imcache
 | 
						|
 | 
						|
            # Subset the input image to only the part that will be displayed.
 | 
						|
            subset = TransformedBbox(clip_bbox, t0.inverted()).frozen()
 | 
						|
            output = output[
 | 
						|
                int(max(subset.ymin, 0)):
 | 
						|
                int(min(subset.ymax + 1, output.shape[0])),
 | 
						|
                int(max(subset.xmin, 0)):
 | 
						|
                int(min(subset.xmax + 1, output.shape[1]))]
 | 
						|
 | 
						|
            t = Affine2D().translate(
 | 
						|
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t
 | 
						|
 | 
						|
        return output, clipped_bbox.x0, clipped_bbox.y0, t
 | 
						|
 | 
						|
    def make_image(self, renderer, magnification=1.0, unsampled=False):
 | 
						|
        """
 | 
						|
        Normalize, rescale, and colormap this image's data for rendering using
 | 
						|
        *renderer*, with the given *magnification*.
 | 
						|
 | 
						|
        If *unsampled* is True, the image will not be scaled, but an
 | 
						|
        appropriate affine transformation will be returned instead.
 | 
						|
 | 
						|
        Returns
 | 
						|
        -------
 | 
						|
        image : (M, N, 4) `numpy.uint8` array
 | 
						|
            The RGBA image, resampled unless *unsampled* is True.
 | 
						|
        x, y : float
 | 
						|
            The upper left corner where the image should be drawn, in pixel
 | 
						|
            space.
 | 
						|
        trans : `~matplotlib.transforms.Affine2D`
 | 
						|
            The affine transformation from image to pixel space.
 | 
						|
        """
 | 
						|
        raise NotImplementedError('The make_image method must be overridden')
 | 
						|
 | 
						|
    def _check_unsampled_image(self):
 | 
						|
        """
 | 
						|
        Return whether the image is better to be drawn unsampled.
 | 
						|
 | 
						|
        The derived class needs to override it.
 | 
						|
        """
 | 
						|
        return False
 | 
						|
 | 
						|
    @martist.allow_rasterization
 | 
						|
    def draw(self, renderer):
 | 
						|
        # if not visible, declare victory and return
 | 
						|
        if not self.get_visible():
 | 
						|
            self.stale = False
 | 
						|
            return
 | 
						|
        # for empty images, there is nothing to draw!
 | 
						|
        if self.get_array().size == 0:
 | 
						|
            self.stale = False
 | 
						|
            return
 | 
						|
        # actually render the image.
 | 
						|
        gc = renderer.new_gc()
 | 
						|
        self._set_gc_clip(gc)
 | 
						|
        gc.set_alpha(self._get_scalar_alpha())
 | 
						|
        gc.set_url(self.get_url())
 | 
						|
        gc.set_gid(self.get_gid())
 | 
						|
        if (renderer.option_scale_image()  # Renderer supports transform kwarg.
 | 
						|
                and self._check_unsampled_image()
 | 
						|
                and self.get_transform().is_affine):
 | 
						|
            im, l, b, trans = self.make_image(renderer, unsampled=True)
 | 
						|
            if im is not None:
 | 
						|
                trans = Affine2D().scale(im.shape[1], im.shape[0]) + trans
 | 
						|
                renderer.draw_image(gc, l, b, im, trans)
 | 
						|
        else:
 | 
						|
            im, l, b, trans = self.make_image(
 | 
						|
                renderer, renderer.get_image_magnification())
 | 
						|
            if im is not None:
 | 
						|
                renderer.draw_image(gc, l, b, im)
 | 
						|
        gc.restore()
 | 
						|
        self.stale = False
 | 
						|
 | 
						|
    def contains(self, mouseevent):
 | 
						|
        """Test whether the mouse event occurred within the image."""
 | 
						|
        if (self._different_canvas(mouseevent)
 | 
						|
                # This doesn't work for figimage.
 | 
						|
                or not self.axes.contains(mouseevent)[0]):
 | 
						|
            return False, {}
 | 
						|
        # TODO: make sure this is consistent with patch and patch
 | 
						|
        # collection on nonlinear transformed coordinates.
 | 
						|
        # TODO: consider returning image coordinates (shouldn't
 | 
						|
        # be too difficult given that the image is rectilinear
 | 
						|
        trans = self.get_transform().inverted()
 | 
						|
        x, y = trans.transform([mouseevent.x, mouseevent.y])
 | 
						|
        xmin, xmax, ymin, ymax = self.get_extent()
 | 
						|
        # This checks xmin <= x <= xmax *or* xmax <= x <= xmin.
 | 
						|
        inside = (x is not None and (x - xmin) * (x - xmax) <= 0
 | 
						|
                  and y is not None and (y - ymin) * (y - ymax) <= 0)
 | 
						|
        return inside, {}
 | 
						|
 | 
						|
    def write_png(self, fname):
 | 
						|
        """Write the image to png file *fname*."""
 | 
						|
        im = self.to_rgba(self._A[::-1] if self.origin == 'lower' else self._A,
 | 
						|
                          bytes=True, norm=True)
 | 
						|
        PIL.Image.fromarray(im).save(fname, format="png")
 | 
						|
 | 
						|
    @staticmethod
 | 
						|
    def _normalize_image_array(A):
 | 
						|
        """
 | 
						|
        Check validity of image-like input *A* and normalize it to a format suitable for
 | 
						|
        Image subclasses.
 | 
						|
        """
 | 
						|
        A = cbook.safe_masked_invalid(A, copy=True)
 | 
						|
        if A.dtype != np.uint8 and not np.can_cast(A.dtype, float, "same_kind"):
 | 
						|
            raise TypeError(f"Image data of dtype {A.dtype} cannot be "
 | 
						|
                            f"converted to float")
 | 
						|
        if A.ndim == 3 and A.shape[-1] == 1:
 | 
						|
            A = A.squeeze(-1)  # If just (M, N, 1), assume scalar and apply colormap.
 | 
						|
        if not (A.ndim == 2 or A.ndim == 3 and A.shape[-1] in [3, 4]):
 | 
						|
            raise TypeError(f"Invalid shape {A.shape} for image data")
 | 
						|
        if A.ndim == 3:
 | 
						|
            # If the input data has values outside the valid range (after
 | 
						|
            # normalisation), we issue a warning and then clip X to the bounds
 | 
						|
            # - otherwise casting wraps extreme values, hiding outliers and
 | 
						|
            # making reliable interpretation impossible.
 | 
						|
            high = 255 if np.issubdtype(A.dtype, np.integer) else 1
 | 
						|
            if A.min() < 0 or high < A.max():
 | 
						|
                _log.warning(
 | 
						|
                    'Clipping input data to the valid range for imshow with '
 | 
						|
                    'RGB data ([0..1] for floats or [0..255] for integers). '
 | 
						|
                    'Got range [%s..%s].',
 | 
						|
                    A.min(), A.max()
 | 
						|
                )
 | 
						|
                A = np.clip(A, 0, high)
 | 
						|
            # Cast unsupported integer types to uint8
 | 
						|
            if A.dtype != np.uint8 and np.issubdtype(A.dtype, np.integer):
 | 
						|
                A = A.astype(np.uint8)
 | 
						|
        return A
 | 
						|
 | 
						|
    def set_data(self, A):
 | 
						|
        """
 | 
						|
        Set the image array.
 | 
						|
 | 
						|
        Note that this function does *not* update the normalization used.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        A : array-like or `PIL.Image.Image`
 | 
						|
        """
 | 
						|
        if isinstance(A, PIL.Image.Image):
 | 
						|
            A = pil_to_array(A)  # Needed e.g. to apply png palette.
 | 
						|
        self._A = self._normalize_image_array(A)
 | 
						|
        self._imcache = None
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def set_array(self, A):
 | 
						|
        """
 | 
						|
        Retained for backwards compatibility - use set_data instead.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        A : array-like
 | 
						|
        """
 | 
						|
        # This also needs to be here to override the inherited
 | 
						|
        # cm.ScalarMappable.set_array method so it is not invoked by mistake.
 | 
						|
        self.set_data(A)
 | 
						|
 | 
						|
    def get_interpolation(self):
 | 
						|
        """
 | 
						|
        Return the interpolation method the image uses when resizing.
 | 
						|
 | 
						|
        One of 'auto', 'antialiased', 'nearest', 'bilinear', 'bicubic',
 | 
						|
        'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
 | 
						|
        'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos',
 | 
						|
        or 'none'.
 | 
						|
        """
 | 
						|
        return self._interpolation
 | 
						|
 | 
						|
    def set_interpolation(self, s):
 | 
						|
        """
 | 
						|
        Set the interpolation method the image uses when resizing.
 | 
						|
 | 
						|
        If None, use :rc:`image.interpolation`. If 'none', the image is
 | 
						|
        shown as is without interpolating. 'none' is only supported in
 | 
						|
        agg, ps and pdf backends and will fall back to 'nearest' mode
 | 
						|
        for other backends.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        s : {'auto', 'nearest', 'bilinear', 'bicubic', 'spline16', \
 | 
						|
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', \
 | 
						|
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'none'} or None
 | 
						|
        """
 | 
						|
        s = mpl._val_or_rc(s, 'image.interpolation').lower()
 | 
						|
        _api.check_in_list(interpolations_names, interpolation=s)
 | 
						|
        self._interpolation = s
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def get_interpolation_stage(self):
 | 
						|
        """
 | 
						|
        Return when interpolation happens during the transform to RGBA.
 | 
						|
 | 
						|
        One of 'data', 'rgba', 'auto'.
 | 
						|
        """
 | 
						|
        return self._interpolation_stage
 | 
						|
 | 
						|
    def set_interpolation_stage(self, s):
 | 
						|
        """
 | 
						|
        Set when interpolation happens during the transform to RGBA.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        s : {'data', 'rgba', 'auto'} or None
 | 
						|
            Whether to apply up/downsampling interpolation in data or RGBA
 | 
						|
            space.  If None, use :rc:`image.interpolation_stage`.
 | 
						|
            If 'auto' we will check upsampling rate and if less
 | 
						|
            than 3 then use 'rgba', otherwise use 'data'.
 | 
						|
        """
 | 
						|
        s = mpl._val_or_rc(s, 'image.interpolation_stage')
 | 
						|
        _api.check_in_list(['data', 'rgba', 'auto'], s=s)
 | 
						|
        self._interpolation_stage = s
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def can_composite(self):
 | 
						|
        """Return whether the image can be composited with its neighbors."""
 | 
						|
        trans = self.get_transform()
 | 
						|
        return (
 | 
						|
            self._interpolation != 'none' and
 | 
						|
            trans.is_affine and
 | 
						|
            trans.is_separable)
 | 
						|
 | 
						|
    def set_resample(self, v):
 | 
						|
        """
 | 
						|
        Set whether image resampling is used.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        v : bool or None
 | 
						|
            If None, use :rc:`image.resample`.
 | 
						|
        """
 | 
						|
        v = mpl._val_or_rc(v, 'image.resample')
 | 
						|
        self._resample = v
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def get_resample(self):
 | 
						|
        """Return whether image resampling is used."""
 | 
						|
        return self._resample
 | 
						|
 | 
						|
    def set_filternorm(self, filternorm):
 | 
						|
        """
 | 
						|
        Set whether the resize filter normalizes the weights.
 | 
						|
 | 
						|
        See help for `~.Axes.imshow`.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        filternorm : bool
 | 
						|
        """
 | 
						|
        self._filternorm = bool(filternorm)
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def get_filternorm(self):
 | 
						|
        """Return whether the resize filter normalizes the weights."""
 | 
						|
        return self._filternorm
 | 
						|
 | 
						|
    def set_filterrad(self, filterrad):
 | 
						|
        """
 | 
						|
        Set the resize filter radius only applicable to some
 | 
						|
        interpolation schemes -- see help for imshow
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        filterrad : positive float
 | 
						|
        """
 | 
						|
        r = float(filterrad)
 | 
						|
        if r <= 0:
 | 
						|
            raise ValueError("The filter radius must be a positive number")
 | 
						|
        self._filterrad = r
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def get_filterrad(self):
 | 
						|
        """Return the filterrad setting."""
 | 
						|
        return self._filterrad
 | 
						|
 | 
						|
 | 
						|
class AxesImage(_ImageBase):
 | 
						|
    """
 | 
						|
    An image with pixels on a regular grid, attached to an Axes.
 | 
						|
 | 
						|
    Parameters
 | 
						|
    ----------
 | 
						|
    ax : `~matplotlib.axes.Axes`
 | 
						|
        The Axes the image will belong to.
 | 
						|
    cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
 | 
						|
        The Colormap instance or registered colormap name used to map scalar
 | 
						|
        data to colors.
 | 
						|
    norm : str or `~matplotlib.colors.Normalize`
 | 
						|
        Maps luminance to 0-1.
 | 
						|
    interpolation : str, default: :rc:`image.interpolation`
 | 
						|
        Supported values are 'none', 'auto', 'nearest', 'bilinear',
 | 
						|
        'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite',
 | 
						|
        'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell',
 | 
						|
        'sinc', 'lanczos', 'blackman'.
 | 
						|
    interpolation_stage : {'data', 'rgba'}, default: 'data'
 | 
						|
        If 'data', interpolation
 | 
						|
        is carried out on the data provided by the user.  If 'rgba', the
 | 
						|
        interpolation is carried out after the colormapping has been
 | 
						|
        applied (visual interpolation).
 | 
						|
    origin : {'upper', 'lower'}, default: :rc:`image.origin`
 | 
						|
        Place the [0, 0] index of the array in the upper left or lower left
 | 
						|
        corner of the Axes. The convention 'upper' is typically used for
 | 
						|
        matrices and images.
 | 
						|
    extent : tuple, optional
 | 
						|
        The data axes (left, right, bottom, top) for making image plots
 | 
						|
        registered with data plots.  Default is to label the pixel
 | 
						|
        centers with the zero-based row and column indices.
 | 
						|
    filternorm : bool, default: True
 | 
						|
        A parameter for the antigrain image resize filter
 | 
						|
        (see the antigrain documentation).
 | 
						|
        If filternorm is set, the filter normalizes integer values and corrects
 | 
						|
        the rounding errors. It doesn't do anything with the source floating
 | 
						|
        point values, it corrects only integers according to the rule of 1.0
 | 
						|
        which means that any sum of pixel weights must be equal to 1.0. So,
 | 
						|
        the filter function must produce a graph of the proper shape.
 | 
						|
    filterrad : float > 0, default: 4
 | 
						|
        The filter radius for filters that have a radius parameter, i.e. when
 | 
						|
        interpolation is one of: 'sinc', 'lanczos' or 'blackman'.
 | 
						|
    resample : bool, default: False
 | 
						|
        When True, use a full resampling method. When False, only resample when
 | 
						|
        the output image is larger than the input image.
 | 
						|
    **kwargs : `~matplotlib.artist.Artist` properties
 | 
						|
    """
 | 
						|
 | 
						|
    def __init__(self, ax,
 | 
						|
                 *,
 | 
						|
                 cmap=None,
 | 
						|
                 norm=None,
 | 
						|
                 colorizer=None,
 | 
						|
                 interpolation=None,
 | 
						|
                 origin=None,
 | 
						|
                 extent=None,
 | 
						|
                 filternorm=True,
 | 
						|
                 filterrad=4.0,
 | 
						|
                 resample=False,
 | 
						|
                 interpolation_stage=None,
 | 
						|
                 **kwargs
 | 
						|
                 ):
 | 
						|
 | 
						|
        self._extent = extent
 | 
						|
 | 
						|
        super().__init__(
 | 
						|
            ax,
 | 
						|
            cmap=cmap,
 | 
						|
            norm=norm,
 | 
						|
            colorizer=colorizer,
 | 
						|
            interpolation=interpolation,
 | 
						|
            origin=origin,
 | 
						|
            filternorm=filternorm,
 | 
						|
            filterrad=filterrad,
 | 
						|
            resample=resample,
 | 
						|
            interpolation_stage=interpolation_stage,
 | 
						|
            **kwargs
 | 
						|
        )
 | 
						|
 | 
						|
    def get_window_extent(self, renderer=None):
 | 
						|
        x0, x1, y0, y1 = self._extent
 | 
						|
        bbox = Bbox.from_extents([x0, y0, x1, y1])
 | 
						|
        return bbox.transformed(self.get_transform())
 | 
						|
 | 
						|
    def make_image(self, renderer, magnification=1.0, unsampled=False):
 | 
						|
        # docstring inherited
 | 
						|
        trans = self.get_transform()
 | 
						|
        # image is created in the canvas coordinate.
 | 
						|
        x1, x2, y1, y2 = self.get_extent()
 | 
						|
        bbox = Bbox(np.array([[x1, y1], [x2, y2]]))
 | 
						|
        transformed_bbox = TransformedBbox(bbox, trans)
 | 
						|
        clip = ((self.get_clip_box() or self.axes.bbox) if self.get_clip_on()
 | 
						|
                else self.get_figure(root=True).bbox)
 | 
						|
        return self._make_image(self._A, bbox, transformed_bbox, clip,
 | 
						|
                                magnification, unsampled=unsampled)
 | 
						|
 | 
						|
    def _check_unsampled_image(self):
 | 
						|
        """Return whether the image would be better drawn unsampled."""
 | 
						|
        return self.get_interpolation() == "none"
 | 
						|
 | 
						|
    def set_extent(self, extent, **kwargs):
 | 
						|
        """
 | 
						|
        Set the image extent.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        extent : 4-tuple of float
 | 
						|
            The position and size of the image as tuple
 | 
						|
            ``(left, right, bottom, top)`` in data coordinates.
 | 
						|
        **kwargs
 | 
						|
            Other parameters from which unit info (i.e., the *xunits*,
 | 
						|
            *yunits*, *zunits* (for 3D Axes), *runits* and *thetaunits* (for
 | 
						|
            polar Axes) entries are applied, if present.
 | 
						|
 | 
						|
        Notes
 | 
						|
        -----
 | 
						|
        This updates `.Axes.dataLim`, and, if autoscaling, sets `.Axes.viewLim`
 | 
						|
        to tightly fit the image, regardless of `~.Axes.dataLim`.  Autoscaling
 | 
						|
        state is not changed, so a subsequent call to `.Axes.autoscale_view`
 | 
						|
        will redo the autoscaling in accord with `~.Axes.dataLim`.
 | 
						|
        """
 | 
						|
        (xmin, xmax), (ymin, ymax) = self.axes._process_unit_info(
 | 
						|
            [("x", [extent[0], extent[1]]),
 | 
						|
             ("y", [extent[2], extent[3]])],
 | 
						|
            kwargs)
 | 
						|
        if kwargs:
 | 
						|
            raise _api.kwarg_error("set_extent", kwargs)
 | 
						|
        xmin = self.axes._validate_converted_limits(
 | 
						|
            xmin, self.convert_xunits)
 | 
						|
        xmax = self.axes._validate_converted_limits(
 | 
						|
            xmax, self.convert_xunits)
 | 
						|
        ymin = self.axes._validate_converted_limits(
 | 
						|
            ymin, self.convert_yunits)
 | 
						|
        ymax = self.axes._validate_converted_limits(
 | 
						|
            ymax, self.convert_yunits)
 | 
						|
        extent = [xmin, xmax, ymin, ymax]
 | 
						|
 | 
						|
        self._extent = extent
 | 
						|
        corners = (xmin, ymin), (xmax, ymax)
 | 
						|
        self.axes.update_datalim(corners)
 | 
						|
        self.sticky_edges.x[:] = [xmin, xmax]
 | 
						|
        self.sticky_edges.y[:] = [ymin, ymax]
 | 
						|
        if self.axes.get_autoscalex_on():
 | 
						|
            self.axes.set_xlim((xmin, xmax), auto=None)
 | 
						|
        if self.axes.get_autoscaley_on():
 | 
						|
            self.axes.set_ylim((ymin, ymax), auto=None)
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def get_extent(self):
 | 
						|
        """Return the image extent as tuple (left, right, bottom, top)."""
 | 
						|
        if self._extent is not None:
 | 
						|
            return self._extent
 | 
						|
        else:
 | 
						|
            sz = self.get_size()
 | 
						|
            numrows, numcols = sz
 | 
						|
            if self.origin == 'upper':
 | 
						|
                return (-0.5, numcols-0.5, numrows-0.5, -0.5)
 | 
						|
            else:
 | 
						|
                return (-0.5, numcols-0.5, -0.5, numrows-0.5)
 | 
						|
 | 
						|
    def get_cursor_data(self, event):
 | 
						|
        """
 | 
						|
        Return the image value at the event position or *None* if the event is
 | 
						|
        outside the image.
 | 
						|
 | 
						|
        See Also
 | 
						|
        --------
 | 
						|
        matplotlib.artist.Artist.get_cursor_data
 | 
						|
        """
 | 
						|
        xmin, xmax, ymin, ymax = self.get_extent()
 | 
						|
        if self.origin == 'upper':
 | 
						|
            ymin, ymax = ymax, ymin
 | 
						|
        arr = self.get_array()
 | 
						|
        data_extent = Bbox([[xmin, ymin], [xmax, ymax]])
 | 
						|
        array_extent = Bbox([[0, 0], [arr.shape[1], arr.shape[0]]])
 | 
						|
        trans = self.get_transform().inverted()
 | 
						|
        trans += BboxTransform(boxin=data_extent, boxout=array_extent)
 | 
						|
        point = trans.transform([event.x, event.y])
 | 
						|
        if any(np.isnan(point)):
 | 
						|
            return None
 | 
						|
        j, i = point.astype(int)
 | 
						|
        # Clip the coordinates at array bounds
 | 
						|
        if not (0 <= i < arr.shape[0]) or not (0 <= j < arr.shape[1]):
 | 
						|
            return None
 | 
						|
        else:
 | 
						|
            return arr[i, j]
 | 
						|
 | 
						|
 | 
						|
class NonUniformImage(AxesImage):
 | 
						|
    """
 | 
						|
    An image with pixels on a rectilinear grid.
 | 
						|
 | 
						|
    In contrast to `.AxesImage`, where pixels are on a regular grid,
 | 
						|
    NonUniformImage allows rows and columns with individual heights / widths.
 | 
						|
 | 
						|
    See also :doc:`/gallery/images_contours_and_fields/image_nonuniform`.
 | 
						|
    """
 | 
						|
 | 
						|
    def __init__(self, ax, *, interpolation='nearest', **kwargs):
 | 
						|
        """
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        ax : `~matplotlib.axes.Axes`
 | 
						|
            The Axes the image will belong to.
 | 
						|
        interpolation : {'nearest', 'bilinear'}, default: 'nearest'
 | 
						|
            The interpolation scheme used in the resampling.
 | 
						|
        **kwargs
 | 
						|
            All other keyword arguments are identical to those of `.AxesImage`.
 | 
						|
        """
 | 
						|
        super().__init__(ax, **kwargs)
 | 
						|
        self.set_interpolation(interpolation)
 | 
						|
 | 
						|
    def _check_unsampled_image(self):
 | 
						|
        """Return False. Do not use unsampled image."""
 | 
						|
        return False
 | 
						|
 | 
						|
    def make_image(self, renderer, magnification=1.0, unsampled=False):
 | 
						|
        # docstring inherited
 | 
						|
        if self._A is None:
 | 
						|
            raise RuntimeError('You must first set the image array')
 | 
						|
        if unsampled:
 | 
						|
            raise ValueError('unsampled not supported on NonUniformImage')
 | 
						|
        A = self._A
 | 
						|
        if A.ndim == 2:
 | 
						|
            if A.dtype != np.uint8:
 | 
						|
                A = self.to_rgba(A, bytes=True)
 | 
						|
            else:
 | 
						|
                A = np.repeat(A[:, :, np.newaxis], 4, 2)
 | 
						|
                A[:, :, 3] = 255
 | 
						|
        else:
 | 
						|
            if A.dtype != np.uint8:
 | 
						|
                A = (255*A).astype(np.uint8)
 | 
						|
            if A.shape[2] == 3:
 | 
						|
                B = np.zeros(tuple([*A.shape[0:2], 4]), np.uint8)
 | 
						|
                B[:, :, 0:3] = A
 | 
						|
                B[:, :, 3] = 255
 | 
						|
                A = B
 | 
						|
        l, b, r, t = self.axes.bbox.extents
 | 
						|
        width = int(((round(r) + 0.5) - (round(l) - 0.5)) * magnification)
 | 
						|
        height = int(((round(t) + 0.5) - (round(b) - 0.5)) * magnification)
 | 
						|
 | 
						|
        invertedTransform = self.axes.transData.inverted()
 | 
						|
        x_pix = invertedTransform.transform(
 | 
						|
            [(x, b) for x in np.linspace(l, r, width)])[:, 0]
 | 
						|
        y_pix = invertedTransform.transform(
 | 
						|
            [(l, y) for y in np.linspace(b, t, height)])[:, 1]
 | 
						|
 | 
						|
        if self._interpolation == "nearest":
 | 
						|
            x_mid = (self._Ax[:-1] + self._Ax[1:]) / 2
 | 
						|
            y_mid = (self._Ay[:-1] + self._Ay[1:]) / 2
 | 
						|
            x_int = x_mid.searchsorted(x_pix)
 | 
						|
            y_int = y_mid.searchsorted(y_pix)
 | 
						|
            # The following is equal to `A[y_int[:, None], x_int[None, :]]`,
 | 
						|
            # but many times faster.  Both casting to uint32 (to have an
 | 
						|
            # effectively 1D array) and manual index flattening matter.
 | 
						|
            im = (
 | 
						|
                np.ascontiguousarray(A).view(np.uint32).ravel()[
 | 
						|
                    np.add.outer(y_int * A.shape[1], x_int)]
 | 
						|
                .view(np.uint8).reshape((height, width, 4)))
 | 
						|
        else:  # self._interpolation == "bilinear"
 | 
						|
            # Use np.interp to compute x_int/x_float has similar speed.
 | 
						|
            x_int = np.clip(
 | 
						|
                self._Ax.searchsorted(x_pix) - 1, 0, len(self._Ax) - 2)
 | 
						|
            y_int = np.clip(
 | 
						|
                self._Ay.searchsorted(y_pix) - 1, 0, len(self._Ay) - 2)
 | 
						|
            idx_int = np.add.outer(y_int * A.shape[1], x_int)
 | 
						|
            x_frac = np.clip(
 | 
						|
                np.divide(x_pix - self._Ax[x_int], np.diff(self._Ax)[x_int],
 | 
						|
                          dtype=np.float32),  # Downcasting helps with speed.
 | 
						|
                0, 1)
 | 
						|
            y_frac = np.clip(
 | 
						|
                np.divide(y_pix - self._Ay[y_int], np.diff(self._Ay)[y_int],
 | 
						|
                          dtype=np.float32),
 | 
						|
                0, 1)
 | 
						|
            f00 = np.outer(1 - y_frac, 1 - x_frac)
 | 
						|
            f10 = np.outer(y_frac, 1 - x_frac)
 | 
						|
            f01 = np.outer(1 - y_frac, x_frac)
 | 
						|
            f11 = np.outer(y_frac, x_frac)
 | 
						|
            im = np.empty((height, width, 4), np.uint8)
 | 
						|
            for chan in range(4):
 | 
						|
                ac = A[:, :, chan].reshape(-1)  # reshape(-1) avoids a copy.
 | 
						|
                # Shifting the buffer start (`ac[offset:]`) avoids an array
 | 
						|
                # addition (`ac[idx_int + offset]`).
 | 
						|
                buf = f00 * ac[idx_int]
 | 
						|
                buf += f10 * ac[A.shape[1]:][idx_int]
 | 
						|
                buf += f01 * ac[1:][idx_int]
 | 
						|
                buf += f11 * ac[A.shape[1] + 1:][idx_int]
 | 
						|
                im[:, :, chan] = buf  # Implicitly casts to uint8.
 | 
						|
        return im, l, b, IdentityTransform()
 | 
						|
 | 
						|
    def set_data(self, x, y, A):
 | 
						|
        """
 | 
						|
        Set the grid for the pixel centers, and the pixel values.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        x, y : 1D array-like
 | 
						|
            Monotonic arrays of shapes (N,) and (M,), respectively, specifying
 | 
						|
            pixel centers.
 | 
						|
        A : array-like
 | 
						|
            (M, N) `~numpy.ndarray` or masked array of values to be
 | 
						|
            colormapped, or (M, N, 3) RGB array, or (M, N, 4) RGBA array.
 | 
						|
        """
 | 
						|
        A = self._normalize_image_array(A)
 | 
						|
        x = np.array(x, np.float32)
 | 
						|
        y = np.array(y, np.float32)
 | 
						|
        if not (x.ndim == y.ndim == 1 and A.shape[:2] == y.shape + x.shape):
 | 
						|
            raise TypeError("Axes don't match array shape")
 | 
						|
        self._A = A
 | 
						|
        self._Ax = x
 | 
						|
        self._Ay = y
 | 
						|
        self._imcache = None
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def set_array(self, *args):
 | 
						|
        raise NotImplementedError('Method not supported')
 | 
						|
 | 
						|
    def set_interpolation(self, s):
 | 
						|
        """
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        s : {'nearest', 'bilinear'} or None
 | 
						|
            If None, use :rc:`image.interpolation`.
 | 
						|
        """
 | 
						|
        if s is not None and s not in ('nearest', 'bilinear'):
 | 
						|
            raise NotImplementedError('Only nearest neighbor and '
 | 
						|
                                      'bilinear interpolations are supported')
 | 
						|
        super().set_interpolation(s)
 | 
						|
 | 
						|
    def get_extent(self):
 | 
						|
        if self._A is None:
 | 
						|
            raise RuntimeError('Must set data first')
 | 
						|
        return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
 | 
						|
 | 
						|
    def set_filternorm(self, filternorm):
 | 
						|
        pass
 | 
						|
 | 
						|
    def set_filterrad(self, filterrad):
 | 
						|
        pass
 | 
						|
 | 
						|
    def set_norm(self, norm):
 | 
						|
        if self._A is not None:
 | 
						|
            raise RuntimeError('Cannot change colors after loading data')
 | 
						|
        super().set_norm(norm)
 | 
						|
 | 
						|
    def set_cmap(self, cmap):
 | 
						|
        if self._A is not None:
 | 
						|
            raise RuntimeError('Cannot change colors after loading data')
 | 
						|
        super().set_cmap(cmap)
 | 
						|
 | 
						|
    def get_cursor_data(self, event):
 | 
						|
        # docstring inherited
 | 
						|
        x, y = event.xdata, event.ydata
 | 
						|
        if (x < self._Ax[0] or x > self._Ax[-1] or
 | 
						|
                y < self._Ay[0] or y > self._Ay[-1]):
 | 
						|
            return None
 | 
						|
        j = np.searchsorted(self._Ax, x) - 1
 | 
						|
        i = np.searchsorted(self._Ay, y) - 1
 | 
						|
        return self._A[i, j]
 | 
						|
 | 
						|
 | 
						|
class PcolorImage(AxesImage):
 | 
						|
    """
 | 
						|
    Make a pcolor-style plot with an irregular rectangular grid.
 | 
						|
 | 
						|
    This uses a variation of the original irregular image code,
 | 
						|
    and it is used by pcolorfast for the corresponding grid type.
 | 
						|
    """
 | 
						|
 | 
						|
    def __init__(self, ax,
 | 
						|
                 x=None,
 | 
						|
                 y=None,
 | 
						|
                 A=None,
 | 
						|
                 *,
 | 
						|
                 cmap=None,
 | 
						|
                 norm=None,
 | 
						|
                 colorizer=None,
 | 
						|
                 **kwargs
 | 
						|
                 ):
 | 
						|
        """
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        ax : `~matplotlib.axes.Axes`
 | 
						|
            The Axes the image will belong to.
 | 
						|
        x, y : 1D array-like, optional
 | 
						|
            Monotonic arrays of length N+1 and M+1, respectively, specifying
 | 
						|
            rectangle boundaries.  If not given, will default to
 | 
						|
            ``range(N + 1)`` and ``range(M + 1)``, respectively.
 | 
						|
        A : array-like
 | 
						|
            The data to be color-coded. The interpretation depends on the
 | 
						|
            shape:
 | 
						|
 | 
						|
            - (M, N) `~numpy.ndarray` or masked array: values to be colormapped
 | 
						|
            - (M, N, 3): RGB array
 | 
						|
            - (M, N, 4): RGBA array
 | 
						|
 | 
						|
        cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
 | 
						|
            The Colormap instance or registered colormap name used to map
 | 
						|
            scalar data to colors.
 | 
						|
        norm : str or `~matplotlib.colors.Normalize`
 | 
						|
            Maps luminance to 0-1.
 | 
						|
        **kwargs : `~matplotlib.artist.Artist` properties
 | 
						|
        """
 | 
						|
        super().__init__(ax, norm=norm, cmap=cmap, colorizer=colorizer)
 | 
						|
        self._internal_update(kwargs)
 | 
						|
        if A is not None:
 | 
						|
            self.set_data(x, y, A)
 | 
						|
 | 
						|
    def make_image(self, renderer, magnification=1.0, unsampled=False):
 | 
						|
        # docstring inherited
 | 
						|
        if self._A is None:
 | 
						|
            raise RuntimeError('You must first set the image array')
 | 
						|
        if unsampled:
 | 
						|
            raise ValueError('unsampled not supported on PColorImage')
 | 
						|
 | 
						|
        if self._imcache is None:
 | 
						|
            A = self.to_rgba(self._A, bytes=True)
 | 
						|
            self._imcache = np.pad(A, [(1, 1), (1, 1), (0, 0)], "constant")
 | 
						|
        padded_A = self._imcache
 | 
						|
        bg = mcolors.to_rgba(self.axes.patch.get_facecolor(), 0)
 | 
						|
        bg = (np.array(bg) * 255).astype(np.uint8)
 | 
						|
        if (padded_A[0, 0] != bg).all():
 | 
						|
            padded_A[[0, -1], :] = padded_A[:, [0, -1]] = bg
 | 
						|
 | 
						|
        l, b, r, t = self.axes.bbox.extents
 | 
						|
        width = (round(r) + 0.5) - (round(l) - 0.5)
 | 
						|
        height = (round(t) + 0.5) - (round(b) - 0.5)
 | 
						|
        width = round(width * magnification)
 | 
						|
        height = round(height * magnification)
 | 
						|
        vl = self.axes.viewLim
 | 
						|
 | 
						|
        x_pix = np.linspace(vl.x0, vl.x1, width)
 | 
						|
        y_pix = np.linspace(vl.y0, vl.y1, height)
 | 
						|
        x_int = self._Ax.searchsorted(x_pix)
 | 
						|
        y_int = self._Ay.searchsorted(y_pix)
 | 
						|
        im = (  # See comment in NonUniformImage.make_image re: performance.
 | 
						|
            padded_A.view(np.uint32).ravel()[
 | 
						|
                np.add.outer(y_int * padded_A.shape[1], x_int)]
 | 
						|
            .view(np.uint8).reshape((height, width, 4)))
 | 
						|
        return im, l, b, IdentityTransform()
 | 
						|
 | 
						|
    def _check_unsampled_image(self):
 | 
						|
        return False
 | 
						|
 | 
						|
    def set_data(self, x, y, A):
 | 
						|
        """
 | 
						|
        Set the grid for the rectangle boundaries, and the data values.
 | 
						|
 | 
						|
        Parameters
 | 
						|
        ----------
 | 
						|
        x, y : 1D array-like, optional
 | 
						|
            Monotonic arrays of length N+1 and M+1, respectively, specifying
 | 
						|
            rectangle boundaries.  If not given, will default to
 | 
						|
            ``range(N + 1)`` and ``range(M + 1)``, respectively.
 | 
						|
        A : array-like
 | 
						|
            The data to be color-coded. The interpretation depends on the
 | 
						|
            shape:
 | 
						|
 | 
						|
            - (M, N) `~numpy.ndarray` or masked array: values to be colormapped
 | 
						|
            - (M, N, 3): RGB array
 | 
						|
            - (M, N, 4): RGBA array
 | 
						|
        """
 | 
						|
        A = self._normalize_image_array(A)
 | 
						|
        x = np.arange(0., A.shape[1] + 1) if x is None else np.array(x, float).ravel()
 | 
						|
        y = np.arange(0., A.shape[0] + 1) if y is None else np.array(y, float).ravel()
 | 
						|
        if A.shape[:2] != (y.size - 1, x.size - 1):
 | 
						|
            raise ValueError(
 | 
						|
                "Axes don't match array shape. Got %s, expected %s." %
 | 
						|
                (A.shape[:2], (y.size - 1, x.size - 1)))
 | 
						|
        # For efficient cursor readout, ensure x and y are increasing.
 | 
						|
        if x[-1] < x[0]:
 | 
						|
            x = x[::-1]
 | 
						|
            A = A[:, ::-1]
 | 
						|
        if y[-1] < y[0]:
 | 
						|
            y = y[::-1]
 | 
						|
            A = A[::-1]
 | 
						|
        self._A = A
 | 
						|
        self._Ax = x
 | 
						|
        self._Ay = y
 | 
						|
        self._imcache = None
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
    def set_array(self, *args):
 | 
						|
        raise NotImplementedError('Method not supported')
 | 
						|
 | 
						|
    def get_cursor_data(self, event):
 | 
						|
        # docstring inherited
 | 
						|
        x, y = event.xdata, event.ydata
 | 
						|
        if (x < self._Ax[0] or x > self._Ax[-1] or
 | 
						|
                y < self._Ay[0] or y > self._Ay[-1]):
 | 
						|
            return None
 | 
						|
        j = np.searchsorted(self._Ax, x) - 1
 | 
						|
        i = np.searchsorted(self._Ay, y) - 1
 | 
						|
        return self._A[i, j]
 | 
						|
 | 
						|
 | 
						|
class FigureImage(_ImageBase):
 | 
						|
    """An image attached to a figure."""
 | 
						|
 | 
						|
    zorder = 0
 | 
						|
 | 
						|
    _interpolation = 'nearest'
 | 
						|
 | 
						|
    def __init__(self, fig,
 | 
						|
                 *,
 | 
						|
                 cmap=None,
 | 
						|
                 norm=None,
 | 
						|
                 colorizer=None,
 | 
						|
                 offsetx=0,
 | 
						|
                 offsety=0,
 | 
						|
                 origin=None,
 | 
						|
                 **kwargs
 | 
						|
                 ):
 | 
						|
        """
 | 
						|
        cmap is a colors.Colormap instance
 | 
						|
        norm is a colors.Normalize instance to map luminance to 0-1
 | 
						|
 | 
						|
        kwargs are an optional list of Artist keyword args
 | 
						|
        """
 | 
						|
        super().__init__(
 | 
						|
            None,
 | 
						|
            norm=norm,
 | 
						|
            cmap=cmap,
 | 
						|
            colorizer=colorizer,
 | 
						|
            origin=origin
 | 
						|
        )
 | 
						|
        self.set_figure(fig)
 | 
						|
        self.ox = offsetx
 | 
						|
        self.oy = offsety
 | 
						|
        self._internal_update(kwargs)
 | 
						|
        self.magnification = 1.0
 | 
						|
 | 
						|
    def get_extent(self):
 | 
						|
        """Return the image extent as tuple (left, right, bottom, top)."""
 | 
						|
        numrows, numcols = self.get_size()
 | 
						|
        return (-0.5 + self.ox, numcols-0.5 + self.ox,
 | 
						|
                -0.5 + self.oy, numrows-0.5 + self.oy)
 | 
						|
 | 
						|
    def make_image(self, renderer, magnification=1.0, unsampled=False):
 | 
						|
        # docstring inherited
 | 
						|
        fig = self.get_figure(root=True)
 | 
						|
        fac = renderer.dpi/fig.dpi
 | 
						|
        # fac here is to account for pdf, eps, svg backends where
 | 
						|
        # figure.dpi is set to 72.  This means we need to scale the
 | 
						|
        # image (using magnification) and offset it appropriately.
 | 
						|
        bbox = Bbox([[self.ox/fac, self.oy/fac],
 | 
						|
                     [(self.ox/fac + self._A.shape[1]),
 | 
						|
                     (self.oy/fac + self._A.shape[0])]])
 | 
						|
        width, height = fig.get_size_inches()
 | 
						|
        width *= renderer.dpi
 | 
						|
        height *= renderer.dpi
 | 
						|
        clip = Bbox([[0, 0], [width, height]])
 | 
						|
        return self._make_image(
 | 
						|
            self._A, bbox, bbox, clip, magnification=magnification / fac,
 | 
						|
            unsampled=unsampled, round_to_pixel_border=False)
 | 
						|
 | 
						|
    def set_data(self, A):
 | 
						|
        """Set the image array."""
 | 
						|
        super().set_data(A)
 | 
						|
        self.stale = True
 | 
						|
 | 
						|
 | 
						|
class BboxImage(_ImageBase):
 | 
						|
    """The Image class whose size is determined by the given bbox."""
 | 
						|
 | 
						|
    def __init__(self, bbox,
 | 
						|
                 *,
 | 
						|
                 cmap=None,
 | 
						|
                 norm=None,
 | 
						|
                 colorizer=None,
 | 
						|
                 interpolation=None,
 | 
						|
                 origin=None,
 | 
						|
                 filternorm=True,
 | 
						|
                 filterrad=4.0,
 | 
						|
                 resample=False,
 | 
						|
                 **kwargs
 | 
						|
                 ):
 | 
						|
        """
 | 
						|
        cmap is a colors.Colormap instance
 | 
						|
        norm is a colors.Normalize instance to map luminance to 0-1
 | 
						|
 | 
						|
        kwargs are an optional list of Artist keyword args
 | 
						|
        """
 | 
						|
        super().__init__(
 | 
						|
            None,
 | 
						|
            cmap=cmap,
 | 
						|
            norm=norm,
 | 
						|
            colorizer=colorizer,
 | 
						|
            interpolation=interpolation,
 | 
						|
            origin=origin,
 | 
						|
            filternorm=filternorm,
 | 
						|
            filterrad=filterrad,
 | 
						|
            resample=resample,
 | 
						|
            **kwargs
 | 
						|
        )
 | 
						|
        self.bbox = bbox
 | 
						|
 | 
						|
    def get_window_extent(self, renderer=None):
 | 
						|
        if renderer is None:
 | 
						|
            renderer = self.get_figure()._get_renderer()
 | 
						|
 | 
						|
        if isinstance(self.bbox, BboxBase):
 | 
						|
            return self.bbox
 | 
						|
        elif callable(self.bbox):
 | 
						|
            return self.bbox(renderer)
 | 
						|
        else:
 | 
						|
            raise ValueError("Unknown type of bbox")
 | 
						|
 | 
						|
    def contains(self, mouseevent):
 | 
						|
        """Test whether the mouse event occurred within the image."""
 | 
						|
        if self._different_canvas(mouseevent) or not self.get_visible():
 | 
						|
            return False, {}
 | 
						|
        x, y = mouseevent.x, mouseevent.y
 | 
						|
        inside = self.get_window_extent().contains(x, y)
 | 
						|
        return inside, {}
 | 
						|
 | 
						|
    def make_image(self, renderer, magnification=1.0, unsampled=False):
 | 
						|
        # docstring inherited
 | 
						|
        width, height = renderer.get_canvas_width_height()
 | 
						|
        bbox_in = self.get_window_extent(renderer).frozen()
 | 
						|
        bbox_in._points /= [width, height]
 | 
						|
        bbox_out = self.get_window_extent(renderer)
 | 
						|
        clip = Bbox([[0, 0], [width, height]])
 | 
						|
        self._transform = BboxTransformTo(clip)
 | 
						|
        return self._make_image(
 | 
						|
            self._A,
 | 
						|
            bbox_in, bbox_out, clip, magnification, unsampled=unsampled)
 | 
						|
 | 
						|
 | 
						|
def imread(fname, format=None):
 | 
						|
    """
 | 
						|
    Read an image from a file into an array.
 | 
						|
 | 
						|
    .. note::
 | 
						|
 | 
						|
        This function exists for historical reasons.  It is recommended to
 | 
						|
        use `PIL.Image.open` instead for loading images.
 | 
						|
 | 
						|
    Parameters
 | 
						|
    ----------
 | 
						|
    fname : str or file-like
 | 
						|
        The image file to read: a filename, a URL or a file-like object opened
 | 
						|
        in read-binary mode.
 | 
						|
 | 
						|
        Passing a URL is deprecated.  Please open the URL
 | 
						|
        for reading and pass the result to Pillow, e.g. with
 | 
						|
        ``np.array(PIL.Image.open(urllib.request.urlopen(url)))``.
 | 
						|
    format : str, optional
 | 
						|
        The image file format assumed for reading the data.  The image is
 | 
						|
        loaded as a PNG file if *format* is set to "png", if *fname* is a path
 | 
						|
        or opened file with a ".png" extension, or if it is a URL.  In all
 | 
						|
        other cases, *format* is ignored and the format is auto-detected by
 | 
						|
        `PIL.Image.open`.
 | 
						|
 | 
						|
    Returns
 | 
						|
    -------
 | 
						|
    `numpy.array`
 | 
						|
        The image data. The returned array has shape
 | 
						|
 | 
						|
        - (M, N) for grayscale images.
 | 
						|
        - (M, N, 3) for RGB images.
 | 
						|
        - (M, N, 4) for RGBA images.
 | 
						|
 | 
						|
        PNG images are returned as float arrays (0-1).  All other formats are
 | 
						|
        returned as int arrays, with a bit depth determined by the file's
 | 
						|
        contents.
 | 
						|
    """
 | 
						|
    # hide imports to speed initial import on systems with slow linkers
 | 
						|
    from urllib import parse
 | 
						|
 | 
						|
    if format is None:
 | 
						|
        if isinstance(fname, str):
 | 
						|
            parsed = parse.urlparse(fname)
 | 
						|
            # If the string is a URL (Windows paths appear as if they have a
 | 
						|
            # length-1 scheme), assume png.
 | 
						|
            if len(parsed.scheme) > 1:
 | 
						|
                ext = 'png'
 | 
						|
            else:
 | 
						|
                ext = Path(fname).suffix.lower()[1:]
 | 
						|
        elif hasattr(fname, 'geturl'):  # Returned by urlopen().
 | 
						|
            # We could try to parse the url's path and use the extension, but
 | 
						|
            # returning png is consistent with the block above.  Note that this
 | 
						|
            # if clause has to come before checking for fname.name as
 | 
						|
            # urlopen("file:///...") also has a name attribute (with the fixed
 | 
						|
            # value "<urllib response>").
 | 
						|
            ext = 'png'
 | 
						|
        elif hasattr(fname, 'name'):
 | 
						|
            ext = Path(fname.name).suffix.lower()[1:]
 | 
						|
        else:
 | 
						|
            ext = 'png'
 | 
						|
    else:
 | 
						|
        ext = format
 | 
						|
    img_open = (
 | 
						|
        PIL.PngImagePlugin.PngImageFile if ext == 'png' else PIL.Image.open)
 | 
						|
    if isinstance(fname, str) and len(parse.urlparse(fname).scheme) > 1:
 | 
						|
        # Pillow doesn't handle URLs directly.
 | 
						|
        raise ValueError(
 | 
						|
            "Please open the URL for reading and pass the "
 | 
						|
            "result to Pillow, e.g. with "
 | 
						|
            "``np.array(PIL.Image.open(urllib.request.urlopen(url)))``."
 | 
						|
            )
 | 
						|
    with img_open(fname) as image:
 | 
						|
        return (_pil_png_to_float_array(image)
 | 
						|
                if isinstance(image, PIL.PngImagePlugin.PngImageFile) else
 | 
						|
                pil_to_array(image))
 | 
						|
 | 
						|
 | 
						|
def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None,
 | 
						|
           origin=None, dpi=100, *, metadata=None, pil_kwargs=None):
 | 
						|
    """
 | 
						|
    Colormap and save an array as an image file.
 | 
						|
 | 
						|
    RGB(A) images are passed through.  Single channel images will be
 | 
						|
    colormapped according to *cmap* and *norm*.
 | 
						|
 | 
						|
    .. note::
 | 
						|
 | 
						|
       If you want to save a single channel image as gray scale please use an
 | 
						|
       image I/O library (such as pillow, tifffile, or imageio) directly.
 | 
						|
 | 
						|
    Parameters
 | 
						|
    ----------
 | 
						|
    fname : str or path-like or file-like
 | 
						|
        A path or a file-like object to store the image in.
 | 
						|
        If *format* is not set, then the output format is inferred from the
 | 
						|
        extension of *fname*, if any, and from :rc:`savefig.format` otherwise.
 | 
						|
        If *format* is set, it determines the output format.
 | 
						|
    arr : array-like
 | 
						|
        The image data. Accepts NumPy arrays or sequences
 | 
						|
        (e.g., lists or tuples). The shape can be one of
 | 
						|
        MxN (luminance), MxNx3 (RGB) or MxNx4 (RGBA).
 | 
						|
    vmin, vmax : float, optional
 | 
						|
        *vmin* and *vmax* set the color scaling for the image by fixing the
 | 
						|
        values that map to the colormap color limits. If either *vmin*
 | 
						|
        or *vmax* is None, that limit is determined from the *arr*
 | 
						|
        min/max value.
 | 
						|
    cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
 | 
						|
        A Colormap instance or registered colormap name. The colormap
 | 
						|
        maps scalar data to colors. It is ignored for RGB(A) data.
 | 
						|
    format : str, optional
 | 
						|
        The file format, e.g. 'png', 'pdf', 'svg', ...  The behavior when this
 | 
						|
        is unset is documented under *fname*.
 | 
						|
    origin : {'upper', 'lower'}, default: :rc:`image.origin`
 | 
						|
        Indicates whether the ``(0, 0)`` index of the array is in the upper
 | 
						|
        left or lower left corner of the Axes.
 | 
						|
    dpi : float
 | 
						|
        The DPI to store in the metadata of the file.  This does not affect the
 | 
						|
        resolution of the output image.  Depending on file format, this may be
 | 
						|
        rounded to the nearest integer.
 | 
						|
    metadata : dict, optional
 | 
						|
        Metadata in the image file.  The supported keys depend on the output
 | 
						|
        format, see the documentation of the respective backends for more
 | 
						|
        information.
 | 
						|
        Currently only supported for "png", "pdf", "ps", "eps", and "svg".
 | 
						|
    pil_kwargs : dict, optional
 | 
						|
        Keyword arguments passed to `PIL.Image.Image.save`.  If the 'pnginfo'
 | 
						|
        key is present, it completely overrides *metadata*, including the
 | 
						|
        default 'Software' key.
 | 
						|
    """
 | 
						|
    from matplotlib.figure import Figure
 | 
						|
 | 
						|
    # Normalizing input (e.g., list or tuples) to NumPy array if needed
 | 
						|
    arr = np.asanyarray(arr)
 | 
						|
 | 
						|
    if isinstance(fname, os.PathLike):
 | 
						|
        fname = os.fspath(fname)
 | 
						|
    if format is None:
 | 
						|
        format = (Path(fname).suffix[1:] if isinstance(fname, str)
 | 
						|
                  else mpl.rcParams["savefig.format"]).lower()
 | 
						|
    if format in ["pdf", "ps", "eps", "svg"]:
 | 
						|
        # Vector formats that are not handled by PIL.
 | 
						|
        if pil_kwargs is not None:
 | 
						|
            raise ValueError(
 | 
						|
                f"Cannot use 'pil_kwargs' when saving to {format}")
 | 
						|
        fig = Figure(dpi=dpi, frameon=False)
 | 
						|
        fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin,
 | 
						|
                     resize=True)
 | 
						|
        fig.savefig(fname, dpi=dpi, format=format, transparent=True,
 | 
						|
                    metadata=metadata)
 | 
						|
    else:
 | 
						|
        # Don't bother creating an image; this avoids rounding errors on the
 | 
						|
        # size when dividing and then multiplying by dpi.
 | 
						|
        if origin is None:
 | 
						|
            origin = mpl.rcParams["image.origin"]
 | 
						|
        else:
 | 
						|
            _api.check_in_list(('upper', 'lower'), origin=origin)
 | 
						|
        if origin == "lower":
 | 
						|
            arr = arr[::-1]
 | 
						|
        if (isinstance(arr, memoryview) and arr.format == "B"
 | 
						|
                and arr.ndim == 3 and arr.shape[-1] == 4):
 | 
						|
            # Such an ``arr`` would also be handled fine by sm.to_rgba below
 | 
						|
            # (after casting with asarray), but it is useful to special-case it
 | 
						|
            # because that's what backend_agg passes, and can be in fact used
 | 
						|
            # as is, saving a few operations.
 | 
						|
            rgba = arr
 | 
						|
        else:
 | 
						|
            sm = mcolorizer.Colorizer(cmap=cmap)
 | 
						|
            sm.set_clim(vmin, vmax)
 | 
						|
            rgba = sm.to_rgba(arr, bytes=True)
 | 
						|
        if pil_kwargs is None:
 | 
						|
            pil_kwargs = {}
 | 
						|
        else:
 | 
						|
            # we modify this below, so make a copy (don't modify caller's dict)
 | 
						|
            pil_kwargs = pil_kwargs.copy()
 | 
						|
        pil_shape = (rgba.shape[1], rgba.shape[0])
 | 
						|
        rgba = np.require(rgba, requirements='C')
 | 
						|
        image = PIL.Image.frombuffer(
 | 
						|
            "RGBA", pil_shape, rgba, "raw", "RGBA", 0, 1)
 | 
						|
        if format == "png":
 | 
						|
            # Only use the metadata kwarg if pnginfo is not set, because the
 | 
						|
            # semantics of duplicate keys in pnginfo is unclear.
 | 
						|
            if "pnginfo" in pil_kwargs:
 | 
						|
                if metadata:
 | 
						|
                    _api.warn_external("'metadata' is overridden by the "
 | 
						|
                                       "'pnginfo' entry in 'pil_kwargs'.")
 | 
						|
            else:
 | 
						|
                metadata = {
 | 
						|
                    "Software": (f"Matplotlib version{mpl.__version__}, "
 | 
						|
                                 f"https://matplotlib.org/"),
 | 
						|
                    **(metadata if metadata is not None else {}),
 | 
						|
                }
 | 
						|
                pil_kwargs["pnginfo"] = pnginfo = PIL.PngImagePlugin.PngInfo()
 | 
						|
                for k, v in metadata.items():
 | 
						|
                    if v is not None:
 | 
						|
                        pnginfo.add_text(k, v)
 | 
						|
        elif metadata is not None:
 | 
						|
            raise ValueError(f"metadata not supported for format {format!r}")
 | 
						|
        if format in ["jpg", "jpeg"]:
 | 
						|
            format = "jpeg"  # Pillow doesn't recognize "jpg".
 | 
						|
            facecolor = mpl.rcParams["savefig.facecolor"]
 | 
						|
            if cbook._str_equal(facecolor, "auto"):
 | 
						|
                facecolor = mpl.rcParams["figure.facecolor"]
 | 
						|
            color = tuple(int(x * 255) for x in mcolors.to_rgb(facecolor))
 | 
						|
            background = PIL.Image.new("RGB", pil_shape, color)
 | 
						|
            background.paste(image, image)
 | 
						|
            image = background
 | 
						|
        pil_kwargs.setdefault("format", format)
 | 
						|
        pil_kwargs.setdefault("dpi", (dpi, dpi))
 | 
						|
        image.save(fname, **pil_kwargs)
 | 
						|
 | 
						|
 | 
						|
def pil_to_array(pilImage):
 | 
						|
    """
 | 
						|
    Load a `PIL image`_ and return it as a numpy int array.
 | 
						|
 | 
						|
    .. _PIL image: https://pillow.readthedocs.io/en/latest/reference/Image.html
 | 
						|
 | 
						|
    Returns
 | 
						|
    -------
 | 
						|
    numpy.array
 | 
						|
 | 
						|
        The array shape depends on the image type:
 | 
						|
 | 
						|
        - (M, N) for grayscale images.
 | 
						|
        - (M, N, 3) for RGB images.
 | 
						|
        - (M, N, 4) for RGBA images.
 | 
						|
    """
 | 
						|
    if pilImage.mode in ['RGBA', 'RGBX', 'RGB', 'L']:
 | 
						|
        # return MxNx4 RGBA, MxNx3 RBA, or MxN luminance array
 | 
						|
        return np.asarray(pilImage)
 | 
						|
    elif pilImage.mode.startswith('I;16'):
 | 
						|
        # return MxN luminance array of uint16
 | 
						|
        raw = pilImage.tobytes('raw', pilImage.mode)
 | 
						|
        if pilImage.mode.endswith('B'):
 | 
						|
            x = np.frombuffer(raw, '>u2')
 | 
						|
        else:
 | 
						|
            x = np.frombuffer(raw, '<u2')
 | 
						|
        return x.reshape(pilImage.size[::-1]).astype('=u2')
 | 
						|
    else:  # try to convert to an rgba image
 | 
						|
        try:
 | 
						|
            pilImage = pilImage.convert('RGBA')
 | 
						|
        except ValueError as err:
 | 
						|
            raise RuntimeError('Unknown image mode') from err
 | 
						|
        return np.asarray(pilImage)  # return MxNx4 RGBA array
 | 
						|
 | 
						|
 | 
						|
def _pil_png_to_float_array(pil_png):
 | 
						|
    """Convert a PIL `PNGImageFile` to a 0-1 float array."""
 | 
						|
    # Unlike pil_to_array this converts to 0-1 float32s for backcompat with the
 | 
						|
    # old libpng-based loader.
 | 
						|
    # The supported rawmodes are from PIL.PngImagePlugin._MODES.  When
 | 
						|
    # mode == "RGB(A)", the 16-bit raw data has already been coarsened to 8-bit
 | 
						|
    # by Pillow.
 | 
						|
    mode = pil_png.mode
 | 
						|
    rawmode = pil_png.png.im_rawmode
 | 
						|
    if rawmode == "1":  # Grayscale.
 | 
						|
        return np.asarray(pil_png, np.float32)
 | 
						|
    if rawmode == "L;2":  # Grayscale.
 | 
						|
        return np.divide(pil_png, 2**2 - 1, dtype=np.float32)
 | 
						|
    if rawmode == "L;4":  # Grayscale.
 | 
						|
        return np.divide(pil_png, 2**4 - 1, dtype=np.float32)
 | 
						|
    if rawmode == "L":  # Grayscale.
 | 
						|
        return np.divide(pil_png, 2**8 - 1, dtype=np.float32)
 | 
						|
    if rawmode == "I;16B":  # Grayscale.
 | 
						|
        return np.divide(pil_png, 2**16 - 1, dtype=np.float32)
 | 
						|
    if mode == "RGB":  # RGB.
 | 
						|
        return np.divide(pil_png, 2**8 - 1, dtype=np.float32)
 | 
						|
    if mode == "P":  # Palette.
 | 
						|
        return np.divide(pil_png.convert("RGBA"), 2**8 - 1, dtype=np.float32)
 | 
						|
    if mode == "LA":  # Grayscale + alpha.
 | 
						|
        return np.divide(pil_png.convert("RGBA"), 2**8 - 1, dtype=np.float32)
 | 
						|
    if mode == "RGBA":  # RGBA.
 | 
						|
        return np.divide(pil_png, 2**8 - 1, dtype=np.float32)
 | 
						|
    raise ValueError(f"Unknown PIL rawmode: {rawmode}")
 | 
						|
 | 
						|
 | 
						|
def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear',
 | 
						|
              preview=False):
 | 
						|
    """
 | 
						|
    Make a thumbnail of image in *infile* with output filename *thumbfile*.
 | 
						|
 | 
						|
    See :doc:`/gallery/misc/image_thumbnail_sgskip`.
 | 
						|
 | 
						|
    Parameters
 | 
						|
    ----------
 | 
						|
    infile : str or file-like
 | 
						|
        The image file. Matplotlib relies on Pillow_ for image reading, and
 | 
						|
        thus supports a wide range of file formats, including PNG, JPG, TIFF
 | 
						|
        and others.
 | 
						|
 | 
						|
        .. _Pillow: https://python-pillow.github.io
 | 
						|
 | 
						|
    thumbfile : str or file-like
 | 
						|
        The thumbnail filename.
 | 
						|
 | 
						|
    scale : float, default: 0.1
 | 
						|
        The scale factor for the thumbnail.
 | 
						|
 | 
						|
    interpolation : str, default: 'bilinear'
 | 
						|
        The interpolation scheme used in the resampling. See the
 | 
						|
        *interpolation* parameter of `~.Axes.imshow` for possible values.
 | 
						|
 | 
						|
    preview : bool, default: False
 | 
						|
        If True, the default backend (presumably a user interface
 | 
						|
        backend) will be used which will cause a figure to be raised if
 | 
						|
        `~matplotlib.pyplot.show` is called.  If it is False, the figure is
 | 
						|
        created using `.FigureCanvasBase` and the drawing backend is selected
 | 
						|
        as `.Figure.savefig` would normally do.
 | 
						|
 | 
						|
    Returns
 | 
						|
    -------
 | 
						|
    `.Figure`
 | 
						|
        The figure instance containing the thumbnail.
 | 
						|
    """
 | 
						|
 | 
						|
    im = imread(infile)
 | 
						|
    rows, cols, depth = im.shape
 | 
						|
 | 
						|
    # This doesn't really matter (it cancels in the end) but the API needs it.
 | 
						|
    dpi = 100
 | 
						|
 | 
						|
    height = rows / dpi * scale
 | 
						|
    width = cols / dpi * scale
 | 
						|
 | 
						|
    if preview:
 | 
						|
        # Let the UI backend do everything.
 | 
						|
        import matplotlib.pyplot as plt
 | 
						|
        fig = plt.figure(figsize=(width, height), dpi=dpi)
 | 
						|
    else:
 | 
						|
        from matplotlib.figure import Figure
 | 
						|
        fig = Figure(figsize=(width, height), dpi=dpi)
 | 
						|
        FigureCanvasBase(fig)
 | 
						|
 | 
						|
    ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
 | 
						|
                      frameon=False, xticks=[], yticks=[])
 | 
						|
    ax.imshow(im, aspect='auto', resample=True, interpolation=interpolation)
 | 
						|
    fig.savefig(thumbfile, dpi=dpi)
 | 
						|
    return fig
 |