/gci.py
Python | 5993 lines | 5821 code | 47 blank | 125 comment | 58 complexity | 235216d8a21c72726dfe7d1fde242c86 MD5 | raw file
Large files files are truncated, but you can click here to view the full file
- #! /usr/bin/env python
- # -*- coding: UTF-8 -*-
- # pylint: disable-msg=C0301,C0321,R0912,R0913,R0914,R0915
- '''
- This module provides a library of functions for use with the GCI system.
- GLOBAL CONSTANTS
- ================
- GCIDIR
- GCI_SERIAL_NUMBER
- TODO
- ====
- 1. Make "serial_number" a required argument for a bunch of these functions in order to ensure
- consistency.
- FUNCTION DESCRIPTIONS
- =====================
- '''
- from __future__ import print_function, division
- from numpy import *
- import zmq
- import platform
- import os
- import sys
- import glob
- import struct
- import simplejson as json
- import pdb #zzz
- from scipy.stats import nanmean, nanmedian, nanstd
- #global GCI_SERIAL_NUMBER
- #global GCIDIR
- GCI_SERIAL_NUMBER = 2
- ## Note that os.path.realpath() here converts a rlative path into an absolute one.
- thisfile = os.path.realpath(sys._current_frames().values()[0].f_globals['__file__'])
- GCIDIR = os.path.dirname(thisfile) + '/'
- __all__ = ['decode_gci_msg_meta','acquire_raw','acquire_data','acquire_dcb','acquire_vis','crop_dcb','crop_hypercube',
- 'show_dcb','get_offsets_and_gains','receive_command_replies','get_control_data','move_shutter',
- 'set_heater_setpoint','set_heater_state','replace_bad_pixels','degc_to_atsensor_radiance',
- 'gci_sequence_to_hcb','get_filter_spectra','get_filter_edge_wavelength','send_command',
- 'show_image_with_projections','dynamically_correct_dcb','read_gcifile_meta','read_gcifile',
- 'dcb_objtemp_to_atsensor_radiance','get_filterlist','acquire_manually_calibrated_hypercube',
- 'get_gas_spectra','calculate_hmatrix','get_filter_type',
- 'planck_blackbody','get_degc_to_atsensor_radiance_lut','dcb_atsensor_radiance_to_objtemp',
- 'read_rectanglesfile','update_rectangles_dict','write_rectanglesfile','draw_block_spectrum',
- 'get_mean_spect','dcb_region_stats','daisi_rect_to_xycoords','xycoords_to_daisi_rect','print_rects',
- 'xcorr','xcorr3d','convert_radiancecube_to_spectralcube','send_asynch_command','set_gci_serial_number',
- 'get_gci_serial_number','get_calib_dictionary','jcamp_reader','jcamp_spectrum','format_coord',
- 'find_nearest_brackets','is_float','getcolors','get_poly_coeffs','polyeval_Horner','get_parallax_mask',
- 'get_vismotion_mask','get_irmotion_mask','mask_overlay','draw_box','get_vis_steam_mask',
- 'get_ir_steam_mask','get_spectral_mask','get_detection_mask','get_spectral_detection_mask','set_saturated_to_nan','pullcube',
- 'gci_file_ismotioncalib','gci_msg_ismotioncalib','to_json','serial_number_from_filterlist',
- 'send_keepalive_command','send_trigger_cal_command']
- __docformat__ = 'restructuredtext en'
- ## =================================================================================================
- def decode_gci_msg(msg, use_ir=True, use_vis=True, use_masks=True, save_header=False, debug=False):
- '''
- Convert a GCI message string to a dictionary of numpy objects, reading only the metadata and \
- ignoring images and datacubes.
- Parameters
- ----------
- msg : list of str
- The message delivered by the GCI's zmq network interface, or through the contents of \
- a `*.gci` file
- use_ir : bool
- Whether to decode any datacubes. (Setting to False improves decoding speed.)
- use_vis : bool
- Whether to decode any visible images. (Setting to False improves decoding speed.)
- use_masks : bool
- Whether to decode any masks. (Setting to False improves decoding speed.)
- save_header : bool
- Whether to save the text-version of the metadata as "header".
- Returns
- -------
- data : dict
- A dictionary containing all data elements from the decoded gci msg. If present in the gci \
- file, the list of dictionary keys can include
- * "cool_shutter_temperatures"
- * "heated_shutter_temperatures"
- * "timestamp"
- * "accumulated_frames_list"
- * "gain_cube_filename"
- * "offset_cube_filename"
- * "cropping_xy"
- * "filter_to_camera_trans"
- * "serial_number"
- * ...
- '''
- ## The "algorithm output" image uses different subscription topics, shown in msg[0],
- ## to tell which one is being sent at a given time.
- assert isinstance(msg, (list, tuple, ndarray))
- topic = msg[0]
- if (topic != '') and debug: print('topic = ' + topic)
- d = json.loads(msg[1]) ## the "payload" dictionary
- ## Set up to grab the system data.
- meta = {}
- meta['topic'] = topic
- if save_header:
- import yaml
- meta['header'] = yaml.dump(d, indent=3, width=130)
- if debug:
- import yaml
- print(yaml.dump(d, indent=3, width=130))
- if ('gci-meta' in d) and ('metadata' in d['gci-meta']):
- ## The first three items are always present.
- meta['cool_shutter_temperatures'] = float32(d['gci-meta']['metadata']['unheated-temps-list'])
- meta['warm_shutter_temperatures'] = float32(d['gci-meta']['metadata']['heated-temps-list'])
- meta['accumulated_frames_list'] = d['gci-meta']['metadata']['frames-accum-list']
- meta['timestamp'] = d['gci-meta']['metadata']['ts-ms-since-epoch']
- Nw = alen(meta['accumulated_frames_list'])
- meta['Nw'] = Nw
- if ('cube' in d) and ('metadata' in d['cube']):
- meta['Nx'] = d['cube']['metadata']['height']
- else:
- meta['Nx'] = 240
- if ('cube-width' in d['gci-meta']['metadata']):
- meta['Ny'] = d['gci-meta']['metadata']['cube-width']
- if ('unheated-radiances-list' in d['gci-meta']['metadata']):
- meta['cool_shutter_radiances'] = float32(d['gci-meta']['metadata']['unheated-radiances-list'])
- if ('heated-radiances-list' in d['gci-meta']['metadata']):
- meta['warm_shutter_radiances'] = float32(d['gci-meta']['metadata']['heated-radiances-list'])
- if ('gain-cube-filename' in d['gci-meta']['metadata']):
- meta['gain_cube_filename'] = d['gci-meta']['metadata']['gain-cube-filename']
- if ('offset-cube-filename' in d['gci-meta']['metadata']):
- meta['offset_cube_filename'] = d['gci-meta']['metadata']['offset-cube-filename']
- if ('static-offset-type' in d['gci-meta']['metadata']):
- ## This can be either "cool" or "warm".
- meta['static_offset_type'] = d['gci-meta']['metadata']['static-offset-type'].lower()
- if ('reg-data-str' in d['gci-meta']['metadata']):
- ## Had to hard-code the Nx value here. Need a better structure for data to make it
- ## flexible for *any* detector array dimensions.
- rect_dict = d['gci-meta']['metadata']['reg-data-str']['IR']
- meta['rd'] = {}
- meta['rd']['cropping_xy'] = daisi_rect_to_xycoords(rect_dict, 240, meta['Nw'])
- #if ('scene-reference-rect' in d['gci-meta']['metadata']):
- scene_xy = daisi_rect_to_xycoords(d['gci-meta']['metadata']['scene-reference-rect'], meta['Nx'], 0)
- meta['rd']['scene_xy'] = scene_xy
- if ('aper-mean-temps' in d['gci-meta']['metadata']):
- ## Since not all w's are represented in the aperture values, you need to initialize the
- ## array and fill in only those values which are defined.
- meta['aper_mean_temps'] = float32([None]*Nw)
- for key in d['gci-meta']['metadata']['aper-mean-temps']:
- w = int(key)
- meta['aper_mean_temps'][w] = float32(d['gci-meta']['metadata']['aper-mean-temps'][key])
- if ('scene-mean-temps' in d['gci-meta']['metadata']):
- meta['sceneref_mean_temps'] = zeros(Nw, 'float32')
- for key in d['gci-meta']['metadata']['scene-mean-temps']:
- w = int(key)
- meta['sceneref_mean_temps'][w] = float32(d['gci-meta']['metadata']['scene-mean-temps'][key])
- if ('aper-offset-deltas' in d['gci-meta']['metadata']):
- meta['aper_offset_deltas'] = float32([None]*Nw)
- for key in d['gci-meta']['metadata']['aper-offset-deltas']:
- w = int(key)
- meta['aper_offset_deltas'][w] = float32(d['gci-meta']['metadata']['aper-offset-deltas'][key])
- if ('cropped-page-mean-temps' in d['gci-meta']['metadata']):
- meta['cropped_page_mean_temps'] = float32(d['gci-meta']['metadata']['cropped-page-mean-temps'])
- if ('spec-radiance-means' in d['gci-meta']['metadata']):
- meta['spec_radiance_means'] = float32(d['gci-meta']['metadata']['spec-radiance-means'])
- meta['Nc'] = alen(meta['spec_radiance_means'])
- if ('version-info' in d['gci-meta']['metadata']):
- meta['version_info'] = d['gci-meta']['metadata']['version-info']
- if ('calibration-state' in d['gci-meta']['metadata']):
- ## This string can be {'calibration-2-pt-heating','calibration-2-pt-running',
- ## 'calibration-1-pt-running','calibration-idle'}.
- meta['calibration_state'] = d['gci-meta']['metadata']['calibration-state'].lower()
- if ('pan-tilt-in-motion' in d['gci-meta']['metadata']):
- meta['pan_tilt_in_motion'] = d['gci-meta']['metadata']['pan-tilt-in-motion']
- if ('pan-tilt-position' in d['gci-meta']['metadata']):
- meta['pan_tilt_position'] = float32(d['gci-meta']['metadata']['pan-tilt-position'])
- if ('pan-tilt-setpoint' in d['gci-meta']['metadata']):
- meta['pan_tilt_setpoint'] = float32(d['gci-meta']['metadata']['pan-tilt-setpoint'])
- if ('gain-cube-timestamp' in d['gci-meta']['metadata']):
- meta['gain_cube_timestamp'] = d['gci-meta']['metadata']['gain-cube-timestamp']
- if ('offset-cube-timestamp' in d['gci-meta']['metadata']):
- meta['offset_cube_timestamp'] = d['gci-meta']['metadata']['offset-cube-timestamp']
- if ('heated-temp-ts-ms' in d['gci-meta']['metadata']):
- meta['heated_temp_timestamp'] = d['gci-meta']['metadata']['heated-temp-ts-ms']
- if ('scene-ref-timestamp' in d['gci-meta']['metadata']):
- meta['scene_ref_timestamp'] = d['gci-meta']['metadata']['scene-ref-timestamp']
- if ('is-scene-ref' in d['gci-meta']['metadata']):
- meta['is_scene_ref'] = d['gci-meta']['metadata']['is-scene-ref']
- if ('num-pixels-masked-by-ir-mc' in d['gci-meta']['metadata']):
- meta['npix_masked_ir_mc'] = d['gci-meta']['metadata']['num-pixels-masked-by-ir-mc']
- if ('num-pixels-masked-by-ir-steam' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_ir_steam'] = d['gci-meta']['metadata']['num-pixels-masked-by-ir-steam']
- if ('num-pixels-masked-by-parallax' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_parallax'] = d['gci-meta']['metadata']['num-pixels-masked-by-parallax']
- if ('num-pixels-masked-by-vis-mc' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_vis_mc'] = d['gci-meta']['metadata']['num-pixels-masked-by-vis-mc']
- if ('num-pixels-masked-by-vis-steam' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_vis_steam'] = d['gci-meta']['metadata']['num-pixels-masked-by-vis-steam']
- ## Look inside the filter map to determine which GCI you're talking to.
- if ('filt-chan-map' in d['gci-meta']['metadata']):
- if ('propylene0' in d['gci-meta']['metadata']['filt-chan-map']):
- filterlist = get_filterlist(d['gci-meta']['metadata']['filt-chan-map'])
- meta['filterlist'] = filterlist
- else:
- meta['filterlist'] = []
- for key in d['gci-meta']['metadata']['filt-chan-map']:
- w = int(key)
- meta['filterlist'].append(d['gci-meta']['metadata']['filt-chan-map'][key]['uniqueID'])
- meta['serial_number'] = serial_number_from_filterlist(meta['filterlist'], debug=debug)
- if ('shutter-state-info' in d['gci-meta']['metadata']):
- sh = d['gci-meta']['metadata']['shutter-state-info']
- if 'heater_enabled' in sh: meta['heater_enabled'] = sh['heater_enabled'] ## bool
- if 'heater_on_cond' in sh: meta['heater_on_cond'] = sh['heater_on_cond'] ## bool
- if 'heater_set_point' in sh: meta['heater_set_point'] = float32(sh['heater_set_point'])
- if 'cool_set_open' in sh: meta['cool_shutter_set_open'] = sh['cool_set_open'] ## bool
- if 'cool_pos' in sh: meta['cool_shutter_position'] = sh['cool_pos'].lower() ## can be 'open', 'closed', 'transit', 'invalid'
- if 'warm_set_open' in sh: meta['warm_shutter_set_open'] = sh['warm_set_open'] ## bool
- if 'warm_pos' in sh: meta['warm_shutter_position'] = sh['warm_pos'].lower() ## can be 'open', 'closed', 'transit', 'invalid'
- if ('force-temporal-ref-reset' in d['gci-meta']['metadata']):
- meta['force_temporal_ref_reset'] = d['gci-meta']['metadata']['force-temporal-ref-reset']
- ## Some heuristics to guess which port was the source for the frame.
- if ('version-info' in d['gci-meta']['metadata']):
- #if ('cube_chan_viewer' in d['gci-meta']['metadata']['version-info']):
- if ('detection-mask' in d):
- meta['source'] = '9501'
- elif ('spectral_cube_gen' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9005'
- elif ('mask_gen' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9004'
- elif ('degC_to_rad' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9003'
- elif ('dynamic_cal' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9002'
- elif ('rotate_vis_cam' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9001'
- else:
- meta['source'] = '9000'
- elif('gain-cube-filename' in d['gci-meta']['metadata']):
- meta['source'] = '9000'
- else:
- meta['source'] = '6009'
- if debug: print('Input GCI message contains a "' + meta['source'] + '" type frame.')
- ## Grab the datacube metadata.
- if use_ir and ('cube' in d) and ('buffer' in d['cube']) and ('metadata' in d['cube']):
- #meta['Nx'] = d['cube']['metadata']['height']
- meta['Ny'] = d['cube']['metadata']['width']
- meta['Nz'] = d['cube']['metadata']['depth']
- meta['dcb'] = decode_datacube(d['cube'], msg)
- meta['dcb'+meta['source']] = meta['dcb']
- if (topic in ('spectral-cube','absorption-cube','normalized-absorption-cube','snr-cube','stdev-cube')):
- meta['Nc'] = meta['Nz']
- ## Grab the absorption cube metadata.
- if use_ir and ('absorption-cube' in d) and ('buffer' in d['absorption-cube']) and ('metadata' in d['absorption-cube']):
- meta['Ny'] = d['absorption-cube']['metadata']['width']
- meta['Nz'] = d['absorption-cube']['metadata']['depth']
- meta['dcb_absorption'] = decode_datacube(d['absorption-cube'], msg)
- #zzz
- #pdb.set_trace()
- ## Grab the standard-deviation cube metadata.
- if use_ir and ('std-dev-cube' in d) and ('buffer' in d['std-dev-cube']) and ('metadata' in d['std-dev-cube']):
- meta['Ny'] = d['std-dev-cube']['metadata']['width']
- meta['Nz'] = d['std-dev-cube']['metadata']['depth']
- meta['dcb_stdev'] = decode_datacube(d['std-dev-cube'], msg)
- ## Grab the SNR cube metadata.
- if use_ir and ('snr-cube' in d) and ('buffer' in d['snr-cube']) and ('metadata' in d['snr-cube']):
- meta['Ny'] = d['snr-cube']['metadata']['width']
- meta['Nz'] = d['snr-cube']['metadata']['depth']
- meta['dcb_snr'] = decode_datacube(d['snr-cube'], msg)
- ## Grab the visible image metadata.
- if use_vis and ('image' in d) and ('buffer' in d['image']) and ('metadata' in d['image']):
- meta['vis_Nx'] = d['image']['metadata']['height']
- meta['vis_Ny'] = d['image']['metadata']['width']
- meta['vis'] = decode_image(d['image'], msg)
- ## Grab the saturation mask.
- if use_masks and ('saturation-mask' in d) and ('buffer' in d['saturation-mask']) and ('metadata' in d['saturation-mask']):
- meta['saturation_mask'] = decode_image(d['saturation-mask'], msg) // 255
- ## Grab the IR motion mask.
- if use_masks and ('ir-motion-canc-mask' in d) and ('buffer' in d['ir-motion-canc-mask']) and ('metadata' in d['ir-motion-canc-mask']):
- meta['irmotion_mask'] = decode_image(d['ir-motion-canc-mask'], msg) // 255
- ## Grab the parallax mask.
- if use_masks and ('parallax-mask' in d) and ('buffer' in d['parallax-mask']) and ('metadata' in d['parallax-mask']):
- meta['parallax_mask'] = decode_image(d['parallax-mask'], msg) // 255
- ## Grab the VIS motion mask.
- if use_masks and ('vis-motion-canc-mask' in d) and ('buffer' in d['vis-motion-canc-mask']) and ('metadata' in d['vis-motion-canc-mask']):
- meta['vismotion_mask'] = decode_image(d['vis-motion-canc-mask'], msg) // 255
- ## Grab the detection mask.
- if use_masks and ('detection-mask' in d) and ('buffer' in d['detection-mask']) and ('metadata' in d['detection-mask']):
- meta['detmask'] = decode_image(d['detection-mask'], msg) // 255
- ## Grab the difference image. Note! When the detection algorithm is a Xcorr type, then the "difference image"
- ## is actually a xcorr image.
- if ('difference-image' in d) and ('buffer' in d['difference-image']) and ('metadata' in d['difference-image']):
- meta['diffimg'] = decode_image(d['difference-image'], msg)
- ## Grab the visible *overlay* image.
- if use_vis and ('vis-image' in d) and ('buffer' in d['vis-image']) and ('metadata' in d['vis-image']):
- meta['overlay'] = decode_image(d['vis-image'], msg)
- if debug:
- ## The line below cannot be implemented as-is, because the JSON module does not know how
- ## to serialize numpy objects.
- #print(json.dumps(meta, indent=4, sort_keys=True))
- #print(meta)
- pass
- #dynamic_gains = (meta['sceneref_mean_temps'][9] - meta['aper_mean_temps'][9]) / (meta['sceneref_mean_temps'] - meta['aper_mean_temps'])
- #dynamic_offsets = meta['aper_mean_temps'][9] + meta['aper_offset_deltas']
- ## Can't use cropped_mean_temps for calculating dynamic offsets of the referenced cameras
- ## because these values are *after* the dynamic cal was applied. And if we can't get the
- ## offsets, then we might as well skip the gains, because they're just 1.0.
- #rd = read_rectanglesfile(meta['serial_number'])
- #for key in rd['refcam']:
- # dynamic_gains[int(key)] = 1.0
- # #dynamic_offsets[int(key)] = meta['cropped_page_mean_temps'][9] - meta['cropped_page_mean_temps'][int(key)]
- #print('dynamic offsets = ', dynamic_offsets)
- #print('dynamic gains = ', dynamic_gains)
- return(meta)
- ## =================================================================================================
- def decode_datacube(cubedata, msg, debug=False):
- datatype = cubedata['type'].lower()
- raw_str = msg[cubedata['buffer']['index']]
- Nx = cubedata['metadata']['height']
- Ny = cubedata['metadata']['width']
- Nz = cubedata['metadata']['depth']
- if debug: print('%s: Nx, Ny, Nz = %3i %3i %3i' % (cubedata['topic'], Nx, Ny, Nz))
- if (datatype == 'ipp16u-cube'):
- bytesize = 2
- dcb = zeros((Nx,Ny,Nz), 'uint16')
- q = 0
- for z in arange(Nz):
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = uint16(struct.unpack('<'+str(Ny)+'H',raw_str[q:q+(Ny*bytesize)]))
- dcb[x,:,z] = row
- q += Ny * bytesize
- elif (datatype == 'ipp32f-cube'):
- bytesize = 4
- dcb = zeros((Nx,Ny,Nz), 'float32')
- q = 0
- for z in arange(Nz):
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny)+'f',raw_str[q:q+(Ny*bytesize)])
- dcb[x,:,z] = row
- q += Ny * bytesize
- else:
- raise NotImplementedError('The data type "' + datatype + '" is not implemented.')
- ## If scaling is needed, then apply the gain and offset.
- if ('gain' in cubedata['metadata']):
- cubedata['dcb_dtype'] = 'float32'
- scalegain = float(cubedata['metadata']['gain'])
- scaleoffset = float(cubedata['metadata']['offset'])
- if (float(cubedata['metadata']['gain']) > 0.0):
- dcb = (float32(dcb) / scalegain) + scaleoffset
- return(dcb)
- ## =================================================================================================
- def decode_image(cubedata, msg, debug=False):
- raw_str = msg[cubedata['buffer']['index']]
- Nx = cubedata['metadata']['height']
- Ny = cubedata['metadata']['width']
- if (cubedata['metadata']['imageTypeString'] == 'RGB_UINT8'):
- bytesize = 1
- vis = zeros((Nx,Ny,3), 'uint8')
- q = 0
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny*3)+'B', raw_str[q:q+(Ny*3*bytesize)])
- vis[x,:,0] = row[0::3]
- vis[x,:,1] = row[1::3]
- vis[x,:,2] = row[2::3]
- q += Ny * 3 * bytesize
- elif (cubedata['metadata']['imageTypeString'] == 'MONO_UINT8'):
- bytesize = 1
- vis = zeros((Nx,Ny), 'uint8')
- q = 0
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny)+'B', raw_str[q:q+(Ny*bytesize)])
- vis[x,:] = row
- q += Ny * bytesize
- elif (cubedata['metadata']['imageTypeString'] == 'MONO_FLOAT32'):
- bytesize = 4
- vis = zeros((Nx,Ny), 'float32')
- q = 0
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny)+'f',raw_str[q:q+(Ny*bytesize)])
- vis[x,:] = row
- q += Ny * bytesize
- else:
- raise NotImplementedError('That visible image data type ("' + cubedata['metadata']['imageTypeString'] + \
- '") is not implemented.')
- return(vis)
- ## =================================================================================================
- def acquire_raw(nframes, port=6009, gcinum=3, username=None, keyfile=None, topic='', verbose=False):
- '''
- Acquire a raw data sequence from the GCI. This is the lowest-level acquisition function, called
- by everything that wants live data from hardware.
- Parameters
- ----------
- nframes : int
- The number of frames of data to acquire
- port : int
- The port you want to pull from (i.e. 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9006, or 9501).
- gcinum : int, optional
- The GCI serial number.
- username : str, optional
- The username to use for connecting to the remote host
- keyfile : str, optional
- The RSA keyfile file to use for connecting to the remote host.
- topic : str
- The GCI message topic to wait for. This is used primarily when listening to ports \
- 9005 (which can have topics "spectral-cube","absorption-cube", or "normalized-absorption-cube") \
- and 9501 (which can have a variety of topics, such as "methane_binary", etc --- see the \
- detection algorithm config file).
- Returns
- -------
- raw : dict
- A dictionary whose keys are "0", "1", etc. labelling the time index of the frames.\
- raw['0'] contains the GCI message string that can be used as input to `decode_gci_msg()`.
- Example
- -------
- import gci
- ## To acquire data from hardware connected to the local network.
- raw = gci.acquire_raw(9002)
- ## To acquire data from a remote system.
- raw = gci.acquire_raw(9002, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa')
- ## Or even using an SSH tunnel to a locally-connected system.
- raw = gci.acquire_raw(9002, 3, 'nhagen', '/home/nhagen/.ssh/nhagen_lab_rsa')
- ## Or, for acquiring a detection mask from a remote system.
- raw = gci.acquire_raw(9501, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa', topic='methane_binary')
- '''
- assert (nframes == int(nframes)), '"nframes" must be an integer type.'
- assert isinstance(port, basestring) or isinstance(port, int), '"units" must be either a string or integer type.'
- gcinum = int(gcinum)
- assert gcinum in (2,3), 'Only GCI serial numbers 2 and 3 are mapped to known systems.'
- ctx = zmq.Context()
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
- subsock.setsockopt(zmq.HWM, nframes)
- ## If the input uses a units string rather than a port number, then translate to a number.
- trans = {'counts':6009, '6009':6009, 'degc':9000, '9000':9000, 'corrected_degc':9002, '9002':9002,
- 'corrected_radiance':9003, '9003':9003, 'radiance':9003, 'mask':9004, '9004':9004,
- 'spectrum':9005, '9005':9005, '9006':9006, 'detection':9501, '9501':9501}
- if isinstance(port, str):
- port = trans[port.lower()]
- if (port not in (6009,9000,9001,9002,9003,9004,9005,9006,9007,9501)):
- raise ValueError('That port number ("' + str(port) + '") is not recognized.')
- if (port == 9005):
- if (topic == ''): topic = 'spectral-cube'
- if (port == 9501):
- send_keepalive_command(topic, debug=verbose)
- if (username == None) or (keyfile == None):
- ## Use the local network to acquire data.
- if (port == 6009):
- ## Talk to the embedded system.
- address = 'tcp://10.1.10.10:'
- else:
- ## Talk to the server system.
- address = 'tcp://10.1.10.11:'
- #address = 'tcp://localhost:'
- subsock.connect(address + str(port))
- raw = {}
- for t in arange(nframes):
- #raw[str(t)] = subsock.recv_multipart()
- poller = zmq.Poller()
- poller.register(subsock, zmq.POLLIN)
- REQUEST_TIMEOUT = 2500
- q = 1 ## counter to see how many requests for messages are made
- while True:
- sockets = dict(poller.poll(REQUEST_TIMEOUT))
- q += 1
- if (subsock in sockets) and (sockets[subsock] == zmq.POLLIN):
- msg = subsock.recv_multipart()
- if (topic != '') and (msg[0] != topic): continue
- raw[str(t)] = msg
- break
- if (q > 5):
- subsock.close()
- raise ImportError('Failed to receive a datacube message.')
- if verbose: print('Frame index = ' + str(t))
- subsock.close()
- return(raw)
- else:
- ## Here we try to build an SSH tunnel to a remotely-connected GCI system.
- import subprocess
- import signal
- import time
- username = str(username)
- keyfile = str(keyfile)
- if not os.path.exists(keyfile): raise LookupError, 'Cannot find file "' + keyfile + '"'
- try:
- if (gcinum == 2): ## Toledo system
- if (port == 6009):
- connect_str = 'ssh -i %s -p 22001 %s@24.52.111.98 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:6009'
- else:
- connect_str = 'ssh -i %s -p 22000 %s@24.52.111.98 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:' + str(port)
- elif (gcinum == 3): ## lab system
- if (port == 6009):
- connect_str = 'ssh -i %s -p 22 %s@10.1.10.11 -L %i:10.1.20.10:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:6009'
- else:
- connect_str = 'ssh -i %s -p 22 %s@10.1.10.11 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:' + str(port)
- ## Note: SSH creates a shell, and so any commands running in the shell are its children. When you try to
- ## kill the shell, its children are not terminated with it, leaving them orphan. One way around this is
- ## to create the shell and its children as a process group (via "os.setsid") and send a command to kill
- ## all processes in that group. This works on Linux, but not on MS Windows, which doesn't have os.setsid
- ## available.
- raw = {}
- ssh_process = subprocess.Popen(connect_str, shell=True, preexec_fn=os.setsid)
- time.sleep(2.0) ## give it some time to connect
- ctx = zmq.Context()
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
- subsock.setsockopt(zmq.HWM, 1)
- subsock.connect(address)
- raw = {}
- for t in arange(nframes):
- poller = zmq.Poller()
- poller.register(subsock, zmq.POLLIN)
- REQUEST_TIMEOUT = 2500
- q = 1 ## counter to see how many requests for messages are made
- while True:
- sockets = dict(poller.poll(REQUEST_TIMEOUT))
- q += 1
- if (subsock in sockets) and (sockets[subsock] == zmq.POLLIN):
- msg = subsock.recv_multipart()
- if (msg[0] != topic): continue
- raw[str(t)] = msg
- break
- if (q > 5):
- subsock.close()
- raise ImportError('Failed to receive a datacube message with topic="' + topic + '".')
- if verbose: print('Frame index = ' + str(t))
- subsock.close()
- time.sleep(0.1) ## give it some time to close the connection before you terminate the SSH process
- ## Send the terminate signal to all the processes in the group. This apparently doesn't work on MS Windows.
- ## Note that ssh_process.kill() does *not* work, since that is a child process and not a parent.
- os.killpg(ssh_process.pid, signal.SIGTERM)
- except (OSError,ImportError), errmsg:
- print('Tunnel failed:', errmsg)
- time.sleep(0.1) ## give it some time to close the connection before you terminate the SSH process
- ## Send the terminate signal to all the processes in the group. This apparently doesn't work on MS Windows.
- ## Note that ssh_process.kill() does *not* work, since that is a child process and not a parent.
- os.killpg(ssh_process.pid, signal.SIGTERM)
- raise Exception(errmsg)
- return(raw)
- ## =================================================================================================
- def acquire_data(port=6009, gcinum=3, username=None, keyfile=None, use_ir=True, use_vis=True, use_masks=True,
- topic='', save_header=False, debug=False):
- '''
- Acquire a *single frame* of data containing the datacube, vis image, and all associated metadata.
- Parameters
- ----------
- port : int
- The port to acquire data from. (This can be 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9051).
- gcinum : int, optional
- The serial number of the GCI to talk to.
- username : str, optional
- The username to use for connecting to the remote host
- keyfile : str, optional
- The RSA keyfile file to use for connecting to the remote host
- use_ir : bool, optional
- Whether to load the IR datacube. (Set to false to improve loading speed.)
- use_vis : bool, optional
- Whether to load the visible image. (Set to false to improve loading speed.)
- use_masks : bool, optional
- Whether to load the full set of masks. (Set to false to improve loading speed.)
- topic : str
- The GCI message topic to wait for. This is used primarily when listening to ports \
- 9005 (which can have topics "spectral-cube","absorption-cube", or "normalized-absorption-cube") \
- and 9501 (which can have a variety of topics, such as "methane_binary", etc --- see the \
- detection algorithm config file).
- save_header : bool
- Whether to save the text version of the metadata as "header".
- Returns
- -------
- data : dict
- A dictionary containing keys 'dcb', 'vis'', and all the other elements returned by\
- `decode_gci_msg()`.
- Example
- -------
- import gci
- ## To acquire data from hardware connected to the local network.
- data = gci.acquire_data(9002)
- ## To acquire data from a remote system.
- data = gci.acquire_data(9002, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa')
- ## Or even using an SSH tunnel to a locally-connected system.
- data = gci.acquire_data(9002, 3, 'nhagen', '/home/nhagen/.ssh/nhagen_lab_rsa')
- '''
- raw = acquire_raw(1, port=port, gcinum=gcinum, username=username, keyfile=keyfile, topic=topic, verbose=debug)
- data = decode_gci_msg(raw['0'], use_ir=use_ir, use_vis=use_vis, use_masks=use_masks, save_header=save_header, debug=debug)
- return(data)
- ## =================================================================================================
- def acquire_dcb(ncubes, port=6009, debug=False):
- '''
- Acquire a sequence of infrared datacubes (i.e. hypercube if ncubes>1) from the GCI. This throws away
- all of the associated metadata and keeps only the final cubes.
- Parameters
- ----------
- ncubes : int
- the number of datacubes to acquire
- port : int
- The port to acquire data from. (This can be 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9051).
- Returns
- -------
- hcb : (list of ndarrays)
- If ncubes==1, a single datacube is returned; if ncubes>1, a Nt-long list of them is
- returned.
- '''
- assert (ncubes == int(ncubes)), '"ncubes" must be an integer type.'
- raw = acquire_raw(ncubes, port)
- gcidata = decode_gci_msg(raw['0'], use_ir=True, use_vis=False, use_masks=False, debug=debug)
- if (ncubes == 1):
- hcb = gcidata['dcb']
- else:
- hcb = []
- hcb.append(gcidata['dcb'])
- for t in arange(1,ncubes):
- gcidata = decode_gci_msg(raw[str(t)], use_ir=True, use_vis=False, use_masks=False, debug=debug)
- hcb.append(gcidata['dcb'])
- return(hcb)
- ## =================================================================================================
- def acquire_vis(nframes, debug=False):
- '''
- Acquire a sequence of visible images (i.e. hypercube if nframes>1) from the GCI. This throws away
- all of the associated metadata and keeps only the final images.
- Parameters
- ----------
- nframes : int
- The number of images to acquire
- Returns
- -------
- hcb : ndarray
- * If `nframes==1`, and using a color camera: a (Nx,Ny,3) image is returned
- * If `nframes>1`, and using a color camera: a (Nx,Ny,3,Nt) image is returned
- * If `nframes==1`, and using a monochrome camera: a (Nx,Ny) image is returned
- * If `nframes>1`, and using a monochrome camera: a (Nx,Ny,Nt) image is returned
- '''
- assert (nframes == int(nframes)), '"nframes" must be an integer type.'
- raw = acquire_raw(nframes)
- gcidata = decode_gci_msg(raw['0'], use_ir=False, use_vis=True, use_masks=False, debug=debug)
- if (nframes == 1):
- vcb = gcidata['vis']
- else:
- if (gcidata['vis'].ndim == 2):
- (vis_Nx,vis_Ny) = gcidata['vis'].shape
- vcb = zeros((vis_Nx,vis_Ny,nframes), gcidata['vis'].dtype)
- for t in arange(1,nframes):
- gcidata = decode_gci_msg(raw[str(t)], use_ir=False, use_vis=True, use_masks=False, debug=debug)
- vcb[:,:,t] = gcidata['vis']
- elif (gcidata['vis'].ndim == 3):
- (vis_Nx,vis_Ny,_) = gcidata['vis'].shape
- vcb = zeros((vis_Nx,vis_Ny,3,nframes), gcidata['vis'].dtype)
- for t in arange(1,nframes):
- gcidata = decode_gci_msg(raw[str(t)], use_ir=False, use_vis=True, use_masks=False, debug=debug)
- vcb[:,:,:,t] = gcidata['vis']
- return(vcb)
- ## =================================================================================================
- ## Convert all of the GCI files in a folder to a Numpy hypercube, and save as .npz.
- def gci_sequence_to_hcb(path, debug=True):
- '''
- Glob a directory for `*.gci` files, and decode each file into an infrared datacube and (if use_vis==True)
- a visible image.
- If there is more than one file in the directory, assemble the datacubes and images
- into an infrared hypercube and a visible hypercube. Throw away all of the associated metadata.
- Parameters
- ----------
- path : str
- The directory where to find all of the *.gci files.
- Returns
- -------
- hcb : (list of ndarrays)
- A list of datacubes.
- '''
- assert isinstance(path, str), 'The input "path" must be a string type.'
- if os.path.isfile(path):
- f = path
- nfiles = 1
- elif os.path.isdir(path):
- gci_files = sorted(glob.glob(path+'*.gci'))
- nfiles = len(gci_files)
- f = gci_files[0]
- if debug: print('Converting "' + os.path.basename(f) + '" (' + str(1) + ' of ' + str(nfiles) + ') ...')
- else:
- print('No files found in folder "' + path + '". Exiting ...')
- return
- ## Open the first file nd grab the dcb and vis image to figure out the data dimensions. This will allow you to create the
- ## hcb and vcb objects, which we can later fill in by looping over the remaining files.
- print('Decoding image #1 from "' + os.path.basename(f) + '"')
- gcidata = decode_gci_msg(f, use_vis=False, use_masks=False, debug=False)
- if (nfiles == 1):
- return(gcidata['dcb'])
- (Nx,Ny,Nw) = gcidata['dcb'].shape
- if debug: print('(Nx,Ny,Nw)=', (Nx,Ny,Nw))
- hcb = []
- hcb.append(gcidata['dcb'])
- for t in arange(1,nfiles):
- print('Decoding image #' + str(t+1) + ' from "' + os.path.basename(gci_files[t]) + '"')
- gcidata = decode_gci_msg(gci_files[t], use_vis=False, use_masks=False, debug=False)
- hcb.append(gcidata['dcb'])
- return(hcb)
- ## =================================================================================================
- def crop_dcb(dcb, xycoords):
- '''
- Crop a datacube using a matrix of cropping coordinates.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) datacube to be cropped
- xycoords : ndarray
- The (Nw,4) array of (xlo,xhi,ylo,yhi) cropping coordinates for all Nw cameras
- Returns
- -------
- dcb_cropped : ndarray
- The cropped datacube, with x-dimension equal to xycoords[w,1]-xycoords[w,0],\
- y-dimension equal to xycoords[w,3]-xycoords[w,2], and w-dimension equal to Nw.
- '''
- dcb = asarray(dcb)
- assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
- xycoords = asarray(xycoords)
- (_,_,Nw) = dcb.shape
- Nx = xycoords[0,1] - xycoords[0,0]
- Ny = xycoords[0,3] - xycoords[0,2]
- dcb_cropped = zeros((Nx,Ny,Nw), dtype(dcb[0,0,0]))
- for w in arange(Nw):
- xlo = xycoords[w,0]
- xhi = xycoords[w,1]
- ylo = xycoords[w,2]
- yhi = xycoords[w,3]
- dcb_cropped[:,:,w] = dcb[xlo:xhi,ylo:yhi,w]
- return(dcb_cropped)
- ## =================================================================================================
- def crop_hypercube(hcb, xycoords):
- '''
- Crop a hypercube (datacube sequence) using a matrix of cropping coordinates.
- Parameters
- ----------
- hcb : ndarray
- The (Nx,Ny,Nw,Nt) hypercube to be cropped
- xycoords : ndarray
- The (Nw,4) array of (xlo,xhi,ylo,yhi) cropping coordinates for all Nw cameras
- Returns
- -------
- hcb_cropped : ndarray
- The cropped hypercube, with x-dimension equal to `xycoords[w,1]-xycoords[w,0]`, y-dimension \
- equal to `xycoords[w,3]-xycoords[w,2]`, w-dimension equal to Nw, and t-dimension equal to \
- Nt. Here `w` can be any index value 0 through Nw-1.
- '''
- hcb = asarray(hcb)
- assert (hcb.ndim == 4), 'Input "hcb" must have three dimensions.'
- xycoords = asarray(xycoords)
- (_,_,Nw,Nt) = hcb.shape
- Nx = xycoords[0,1] - xycoords[0,0]
- Ny = xycoords[0,3] - xycoords[0,2]
- hcb_cropped = zeros((Nx,Ny,Nw,Nt), dtype(hcb[0,0,0,0]))
- for t in arange(Nt):
- for w in arange(Nw):
- xlo = xycoords[w,0]
- xhi = xycoords[w,1]
- ylo = xycoords[w,2]
- yhi = xycoords[w,3]
- hcb_cropped[:,:,w] = hcb[xlo:xhi,ylo:yhi,w,t]
- return(hcb_cropped)
- ## =================================================================================================
- ## keyword "plottype" allows options ['image', 'histogram']
- ## The "box1", "box2", "box3", and "box4" optional arguments can be used to draw boxes in the datacube slices images.
- ## Each argument can be either a 4-element vector (i.e. same box coordinates in each wavelength image) or a Nw*4
- ## matrix (giving different box coordinates for each wavelength image).
- def show_dcb(dcb, waves=None, figsize=None, title='', plottype='image', showstats=False, \
- box1=None, box2=None, box3=None, box4=None, invert_display=False, show=False,
- use_colorbars=True, **kwargs):
- '''
- Display a datacube in a single window, showing each w-plane of the cube as an image.
- Parameters
- ----------
- dcb : ndarray
- A (Nx,Ny,Nw) datacube.
- waves : ndarray, optional
- A vector of Nw values with which to label the datacube images.
- figsize : tuple, optional
- The size of the plotting window.
- title : str, optional
- The figure title.
- plottype : str, optional ('image','histogram','difference1','difference2')
- What type of figure to draw --- the images themselves, their histograms, or their \
- difference cubes.
- showstats : bool
- Whether or now to show the image stats as text overlaid on the image.
- box1 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box1 is drawn in blue.
- box2 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box2 is drawn in red.
- box3 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box3 is drawn in green.
- box4 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box4 is drawn in yellow.
- invert_display : bool, optional
- Whether or not to draw the datacube planes with 0 at the bottom (invert_display==True), \
- or 0 at the top (invert_display==False).
- show : bool, optional
- Whether or not to call matplotlib's `show()` function.
- use_colorbars : bool, optional
- Whether to display colorbars with the datacube images.
- **kwargs: any, optional
- Ay keyword arguments that can be used by matplotlib's `imshow()` function.
- Notes
- -----
- Also, the inputs to the `box1`, `box2`, `box3`, and `box4` keywords can be 4-element vectors \
- rather than (Nw,4) size matrices. This forces the box coordinates to be the same for all \
- `w`.
- The dark gray shaded regions of the histograms extend out to the 1*sigma value, while the \
- light gray shaded regions of the histograms extend out to the 2*sigma value. The central \
- white line in the background shows the position of the mean.
- Setting `log=True` will allow logarithmic scaling of the histogram plots.
- '''
- import matplotlib.pyplot as plt
- import matplotlib.cm as cm
- from matplotlib.ticker import MaxNLocator
- import matplotlib.patheffects as PathEffects
- #print('PLATFORM = ', platform.system())
- #dcb = asarray(dcb)
- assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
- assert (plottype.lower() in ['image','histogram','difference1','difference2']), 'That plottype value ("' + plottype + '") is not recognized.'
- (Nx,Ny,Nw) = dcb.shape
- if (waves != None):
- waves = asarray(waves)
- assert (alen(waves) == Nw), 'The input "waves" should have Nw elements, not ' + str(alen(waves)) + '.'
- if (plottype == 'difference1'):
- dcb = array(dcb[:,:,1:] - dcb[:,:,:-1])
- Nw -= 1
- print('DIFFERENCE1')
- elif (plottype == 'difference2'):
- dcb = array(dcb - dcb[:,:,::-1])
- print('DIFFERENCE2')
- aspect_ratio = Ny / float(Nx)
- if (Nw == 1):
- if (figsize == None): figsize=(6*1.65*aspect_ratio,6)
- plt.figure(figsize=figsize)
- if (plottype.lower() == 'image'):
- plt.imshow(dcb[:,:,0], zorder=1, **kwargs)
- plt.axis('tight')
- if use_colorbars: plt.colorbar()
- elif (plottype.lower() == 'histogram'):
- ## Note: there is a matplotlib bug for logarithmic plotting when NaNs are present.
- (n, bin_edges) = histogram(log10(dcb[:,:,0].flatten()), 40)
- meanval = mean(dcb[:,:,0].flatten())
- stdval = std(dcb[:,:,0].flatten())
- ax = plt.subplot(111)
- ax.fill_between([meanval-2.0*stdval, meanval+2.0*stdval, meanval+2.0*stdval, meanval-2.0*stdval, meanval-2.0*stdval], [0, 0, amax(n)*1.1, amax(n)*1.1, 0], 0.5, color='0.75', zorder=1)
- ax.fill_between([meanval-stdval, meanval+stdval, meanval+stdval, meanval-stdval, meanval-stdval], [0, 0, amax(n)*1.1, amax(n)*1.1, 0], 0.5, color='0.5', zorder=2)
- ax.plot([meanval, meanval], [0, amax(n)*1.1], 'w-', linewidth=1.5, zorder=3)
- (counts, edges, patches) = ax.hist(dcb[:,:,0].flatten(), 40, zorder=4, edgecolor='0.5', log=True, **kwargs)
- if (meanval > 100):
- statstr = 'mean='+('%.0f' % meanval) + '\nstd='+('%.0f' % stdval)
- else:
- statstr = 'mean='+('%.2f' % meanval) + '\nstd='+('%.2f' % stdval)
- ax.text(0.8, 0.8, statstr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), zorder=5)
- ax.axis([amin(edges), amax(edges), 0.5, amax(counts)*1.1])
- else:
- if (figsize == None): figsize=(8*1.65*aspect_ratio,8)
- ncols = (ceil(sqrt(Nw)))
- nrows = (ceil(Nw / ncols))
- plt.figure(figsize=figsize)
- dimstr = '(Nx,Ny,Nw)=(' + str(Nx) + ',' + str(Ny) + ',' + str(Nw) + ')'
- plt.suptitle(dimstr + ' ' + title)
- for w in arange(Nw):
- if invert_display:
- c = int(w % ncols)
- r = int((Nw - w - 1) // ncols)
- else:
- c = int(w % ncols)
- r = int(w // ncols)
- if (plottype == 'image') or (plottype == 'histogram'):
- titlestr = 'w='+str(w) if (waves == None) else 'w='+str(waves[w])
- elif (plottype == 'difference1'):
- titlestr = '(w=' + str(w+1) + ') - (w=' + str(w) + ') difference'
- elif (plottype == 'difference2'):
- titlestr = '(w=' + str(Nw-1-w) + ') - (w=' + str(w) + ') difference'
- ## The "sharex=ax0" and "sharey=ax0" business is needed in order for zooming of one of the
- ## subplots is to propagate into all of the subplots. Don't turn this on for the histogram
- ## plot type, since we want to allow the axes to be different for each plot in that case.
- if (w == 0):
- ax0 = plt.subplot2grid((int(nrows), int(ncols)), (r,c))
- elif (w != 0) and (plottype.lower() == 'histogram'):
- ax = plt.subplot2grid((int(nrows), int(ncols)), (r,c))
- else:
- ax = plt.subplot2grid((int(nrows), int(ncols)), (r,c), sharex=ax0, sharey=ax0)
- if (plottype.lower() in ('image','difference1','difference2')):
- if (w == 0):
- plt.imshow(dcb[:,:,w], zorder=1, **kwargs)
- else:
- plt.imshow(dcb[:,:,w], zorder=1, **kwargs)
- plt.axis('tight')
- if use_colorbars:
- cb = plt.colorbar()
- cb.formatter.set_useOffset(False) ## turn off any offset that it may apply
- cb.update_ticks() ## update the new ticks for no-offset
- if (w == 0):
- ax0.set_yticklabels([])
- ax0.set_xticklabels([])
- else:
- ax.set_yticklabels([])
- ax.set_xticklabels([])
- plt.gca().axes.get_xaxis().set_visible(False)
- plt.gca().axes.get_yaxis().set_visible(False)
- if use_colorbars:
- cl = plt.getp(cb.ax, 'ymajorticklabels')
- plt.setp(cl, fontsize=10)
- plt.title(titlestr)
- if showstats:
- minval = min(dcb[:,:,w].flatten())
- meanval = mean(dcb[:,:,w].flatten())
- maxval = max(dcb[:,:,w].flatten())
- stdval = std(dcb[:,:,w].flatten())
- if isnan(meanval):
- sigdig = 0
- else:
- logmean = log10(abs(meanval))
- sigdig = 2 if (logmean >= -1) else int(-floor(logmean))
- fmt = '%.0f' if (meanval > 100) else '%.' + str(sigdig) + 'f'
- statstr = (('min=' + fmt + '\nmean=' + fmt + '\nmax=' + fmt + '\nstd=' + fmt) % (minval, meanval, maxval, stdval))
- if (w == 0):
- stattext = ax0.text(0.4, 0.4, statstr, horizontalalignment='center', transform=ax0.transA…
Large files files are truncated, but you can click here to view the full file