/gci.py
Python | 5993 lines | 5821 code | 47 blank | 125 comment | 58 complexity | 235216d8a21c72726dfe7d1fde242c86 MD5 | raw file
- #! /usr/bin/env python
- # -*- coding: UTF-8 -*-
- # pylint: disable-msg=C0301,C0321,R0912,R0913,R0914,R0915
- '''
- This module provides a library of functions for use with the GCI system.
- GLOBAL CONSTANTS
- ================
- GCIDIR
- GCI_SERIAL_NUMBER
- TODO
- ====
- 1. Make "serial_number" a required argument for a bunch of these functions in order to ensure
- consistency.
- FUNCTION DESCRIPTIONS
- =====================
- '''
- from __future__ import print_function, division
- from numpy import *
- import zmq
- import platform
- import os
- import sys
- import glob
- import struct
- import simplejson as json
- import pdb #zzz
- from scipy.stats import nanmean, nanmedian, nanstd
- #global GCI_SERIAL_NUMBER
- #global GCIDIR
- GCI_SERIAL_NUMBER = 2
- ## Note that os.path.realpath() here converts a rlative path into an absolute one.
- thisfile = os.path.realpath(sys._current_frames().values()[0].f_globals['__file__'])
- GCIDIR = os.path.dirname(thisfile) + '/'
- __all__ = ['decode_gci_msg_meta','acquire_raw','acquire_data','acquire_dcb','acquire_vis','crop_dcb','crop_hypercube',
- 'show_dcb','get_offsets_and_gains','receive_command_replies','get_control_data','move_shutter',
- 'set_heater_setpoint','set_heater_state','replace_bad_pixels','degc_to_atsensor_radiance',
- 'gci_sequence_to_hcb','get_filter_spectra','get_filter_edge_wavelength','send_command',
- 'show_image_with_projections','dynamically_correct_dcb','read_gcifile_meta','read_gcifile',
- 'dcb_objtemp_to_atsensor_radiance','get_filterlist','acquire_manually_calibrated_hypercube',
- 'get_gas_spectra','calculate_hmatrix','get_filter_type',
- 'planck_blackbody','get_degc_to_atsensor_radiance_lut','dcb_atsensor_radiance_to_objtemp',
- 'read_rectanglesfile','update_rectangles_dict','write_rectanglesfile','draw_block_spectrum',
- 'get_mean_spect','dcb_region_stats','daisi_rect_to_xycoords','xycoords_to_daisi_rect','print_rects',
- 'xcorr','xcorr3d','convert_radiancecube_to_spectralcube','send_asynch_command','set_gci_serial_number',
- 'get_gci_serial_number','get_calib_dictionary','jcamp_reader','jcamp_spectrum','format_coord',
- 'find_nearest_brackets','is_float','getcolors','get_poly_coeffs','polyeval_Horner','get_parallax_mask',
- 'get_vismotion_mask','get_irmotion_mask','mask_overlay','draw_box','get_vis_steam_mask',
- 'get_ir_steam_mask','get_spectral_mask','get_detection_mask','get_spectral_detection_mask','set_saturated_to_nan','pullcube',
- 'gci_file_ismotioncalib','gci_msg_ismotioncalib','to_json','serial_number_from_filterlist',
- 'send_keepalive_command','send_trigger_cal_command']
- __docformat__ = 'restructuredtext en'
- ## =================================================================================================
- def decode_gci_msg(msg, use_ir=True, use_vis=True, use_masks=True, save_header=False, debug=False):
- '''
- Convert a GCI message string to a dictionary of numpy objects, reading only the metadata and \
- ignoring images and datacubes.
- Parameters
- ----------
- msg : list of str
- The message delivered by the GCI's zmq network interface, or through the contents of \
- a `*.gci` file
- use_ir : bool
- Whether to decode any datacubes. (Setting to False improves decoding speed.)
- use_vis : bool
- Whether to decode any visible images. (Setting to False improves decoding speed.)
- use_masks : bool
- Whether to decode any masks. (Setting to False improves decoding speed.)
- save_header : bool
- Whether to save the text-version of the metadata as "header".
- Returns
- -------
- data : dict
- A dictionary containing all data elements from the decoded gci msg. If present in the gci \
- file, the list of dictionary keys can include
- * "cool_shutter_temperatures"
- * "heated_shutter_temperatures"
- * "timestamp"
- * "accumulated_frames_list"
- * "gain_cube_filename"
- * "offset_cube_filename"
- * "cropping_xy"
- * "filter_to_camera_trans"
- * "serial_number"
- * ...
- '''
- ## The "algorithm output" image uses different subscription topics, shown in msg[0],
- ## to tell which one is being sent at a given time.
- assert isinstance(msg, (list, tuple, ndarray))
- topic = msg[0]
- if (topic != '') and debug: print('topic = ' + topic)
- d = json.loads(msg[1]) ## the "payload" dictionary
- ## Set up to grab the system data.
- meta = {}
- meta['topic'] = topic
- if save_header:
- import yaml
- meta['header'] = yaml.dump(d, indent=3, width=130)
- if debug:
- import yaml
- print(yaml.dump(d, indent=3, width=130))
- if ('gci-meta' in d) and ('metadata' in d['gci-meta']):
- ## The first three items are always present.
- meta['cool_shutter_temperatures'] = float32(d['gci-meta']['metadata']['unheated-temps-list'])
- meta['warm_shutter_temperatures'] = float32(d['gci-meta']['metadata']['heated-temps-list'])
- meta['accumulated_frames_list'] = d['gci-meta']['metadata']['frames-accum-list']
- meta['timestamp'] = d['gci-meta']['metadata']['ts-ms-since-epoch']
- Nw = alen(meta['accumulated_frames_list'])
- meta['Nw'] = Nw
- if ('cube' in d) and ('metadata' in d['cube']):
- meta['Nx'] = d['cube']['metadata']['height']
- else:
- meta['Nx'] = 240
- if ('cube-width' in d['gci-meta']['metadata']):
- meta['Ny'] = d['gci-meta']['metadata']['cube-width']
- if ('unheated-radiances-list' in d['gci-meta']['metadata']):
- meta['cool_shutter_radiances'] = float32(d['gci-meta']['metadata']['unheated-radiances-list'])
- if ('heated-radiances-list' in d['gci-meta']['metadata']):
- meta['warm_shutter_radiances'] = float32(d['gci-meta']['metadata']['heated-radiances-list'])
- if ('gain-cube-filename' in d['gci-meta']['metadata']):
- meta['gain_cube_filename'] = d['gci-meta']['metadata']['gain-cube-filename']
- if ('offset-cube-filename' in d['gci-meta']['metadata']):
- meta['offset_cube_filename'] = d['gci-meta']['metadata']['offset-cube-filename']
- if ('static-offset-type' in d['gci-meta']['metadata']):
- ## This can be either "cool" or "warm".
- meta['static_offset_type'] = d['gci-meta']['metadata']['static-offset-type'].lower()
- if ('reg-data-str' in d['gci-meta']['metadata']):
- ## Had to hard-code the Nx value here. Need a better structure for data to make it
- ## flexible for *any* detector array dimensions.
- rect_dict = d['gci-meta']['metadata']['reg-data-str']['IR']
- meta['rd'] = {}
- meta['rd']['cropping_xy'] = daisi_rect_to_xycoords(rect_dict, 240, meta['Nw'])
- #if ('scene-reference-rect' in d['gci-meta']['metadata']):
- scene_xy = daisi_rect_to_xycoords(d['gci-meta']['metadata']['scene-reference-rect'], meta['Nx'], 0)
- meta['rd']['scene_xy'] = scene_xy
- if ('aper-mean-temps' in d['gci-meta']['metadata']):
- ## Since not all w's are represented in the aperture values, you need to initialize the
- ## array and fill in only those values which are defined.
- meta['aper_mean_temps'] = float32([None]*Nw)
- for key in d['gci-meta']['metadata']['aper-mean-temps']:
- w = int(key)
- meta['aper_mean_temps'][w] = float32(d['gci-meta']['metadata']['aper-mean-temps'][key])
- if ('scene-mean-temps' in d['gci-meta']['metadata']):
- meta['sceneref_mean_temps'] = zeros(Nw, 'float32')
- for key in d['gci-meta']['metadata']['scene-mean-temps']:
- w = int(key)
- meta['sceneref_mean_temps'][w] = float32(d['gci-meta']['metadata']['scene-mean-temps'][key])
- if ('aper-offset-deltas' in d['gci-meta']['metadata']):
- meta['aper_offset_deltas'] = float32([None]*Nw)
- for key in d['gci-meta']['metadata']['aper-offset-deltas']:
- w = int(key)
- meta['aper_offset_deltas'][w] = float32(d['gci-meta']['metadata']['aper-offset-deltas'][key])
- if ('cropped-page-mean-temps' in d['gci-meta']['metadata']):
- meta['cropped_page_mean_temps'] = float32(d['gci-meta']['metadata']['cropped-page-mean-temps'])
- if ('spec-radiance-means' in d['gci-meta']['metadata']):
- meta['spec_radiance_means'] = float32(d['gci-meta']['metadata']['spec-radiance-means'])
- meta['Nc'] = alen(meta['spec_radiance_means'])
- if ('version-info' in d['gci-meta']['metadata']):
- meta['version_info'] = d['gci-meta']['metadata']['version-info']
- if ('calibration-state' in d['gci-meta']['metadata']):
- ## This string can be {'calibration-2-pt-heating','calibration-2-pt-running',
- ## 'calibration-1-pt-running','calibration-idle'}.
- meta['calibration_state'] = d['gci-meta']['metadata']['calibration-state'].lower()
- if ('pan-tilt-in-motion' in d['gci-meta']['metadata']):
- meta['pan_tilt_in_motion'] = d['gci-meta']['metadata']['pan-tilt-in-motion']
- if ('pan-tilt-position' in d['gci-meta']['metadata']):
- meta['pan_tilt_position'] = float32(d['gci-meta']['metadata']['pan-tilt-position'])
- if ('pan-tilt-setpoint' in d['gci-meta']['metadata']):
- meta['pan_tilt_setpoint'] = float32(d['gci-meta']['metadata']['pan-tilt-setpoint'])
- if ('gain-cube-timestamp' in d['gci-meta']['metadata']):
- meta['gain_cube_timestamp'] = d['gci-meta']['metadata']['gain-cube-timestamp']
- if ('offset-cube-timestamp' in d['gci-meta']['metadata']):
- meta['offset_cube_timestamp'] = d['gci-meta']['metadata']['offset-cube-timestamp']
- if ('heated-temp-ts-ms' in d['gci-meta']['metadata']):
- meta['heated_temp_timestamp'] = d['gci-meta']['metadata']['heated-temp-ts-ms']
- if ('scene-ref-timestamp' in d['gci-meta']['metadata']):
- meta['scene_ref_timestamp'] = d['gci-meta']['metadata']['scene-ref-timestamp']
- if ('is-scene-ref' in d['gci-meta']['metadata']):
- meta['is_scene_ref'] = d['gci-meta']['metadata']['is-scene-ref']
- if ('num-pixels-masked-by-ir-mc' in d['gci-meta']['metadata']):
- meta['npix_masked_ir_mc'] = d['gci-meta']['metadata']['num-pixels-masked-by-ir-mc']
- if ('num-pixels-masked-by-ir-steam' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_ir_steam'] = d['gci-meta']['metadata']['num-pixels-masked-by-ir-steam']
- if ('num-pixels-masked-by-parallax' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_parallax'] = d['gci-meta']['metadata']['num-pixels-masked-by-parallax']
- if ('num-pixels-masked-by-vis-mc' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_vis_mc'] = d['gci-meta']['metadata']['num-pixels-masked-by-vis-mc']
- if ('num-pixels-masked-by-vis-steam' in d['gci-meta']['metadata']):
- meta['num_pixels_masked_by_vis_steam'] = d['gci-meta']['metadata']['num-pixels-masked-by-vis-steam']
- ## Look inside the filter map to determine which GCI you're talking to.
- if ('filt-chan-map' in d['gci-meta']['metadata']):
- if ('propylene0' in d['gci-meta']['metadata']['filt-chan-map']):
- filterlist = get_filterlist(d['gci-meta']['metadata']['filt-chan-map'])
- meta['filterlist'] = filterlist
- else:
- meta['filterlist'] = []
- for key in d['gci-meta']['metadata']['filt-chan-map']:
- w = int(key)
- meta['filterlist'].append(d['gci-meta']['metadata']['filt-chan-map'][key]['uniqueID'])
- meta['serial_number'] = serial_number_from_filterlist(meta['filterlist'], debug=debug)
- if ('shutter-state-info' in d['gci-meta']['metadata']):
- sh = d['gci-meta']['metadata']['shutter-state-info']
- if 'heater_enabled' in sh: meta['heater_enabled'] = sh['heater_enabled'] ## bool
- if 'heater_on_cond' in sh: meta['heater_on_cond'] = sh['heater_on_cond'] ## bool
- if 'heater_set_point' in sh: meta['heater_set_point'] = float32(sh['heater_set_point'])
- if 'cool_set_open' in sh: meta['cool_shutter_set_open'] = sh['cool_set_open'] ## bool
- if 'cool_pos' in sh: meta['cool_shutter_position'] = sh['cool_pos'].lower() ## can be 'open', 'closed', 'transit', 'invalid'
- if 'warm_set_open' in sh: meta['warm_shutter_set_open'] = sh['warm_set_open'] ## bool
- if 'warm_pos' in sh: meta['warm_shutter_position'] = sh['warm_pos'].lower() ## can be 'open', 'closed', 'transit', 'invalid'
- if ('force-temporal-ref-reset' in d['gci-meta']['metadata']):
- meta['force_temporal_ref_reset'] = d['gci-meta']['metadata']['force-temporal-ref-reset']
- ## Some heuristics to guess which port was the source for the frame.
- if ('version-info' in d['gci-meta']['metadata']):
- #if ('cube_chan_viewer' in d['gci-meta']['metadata']['version-info']):
- if ('detection-mask' in d):
- meta['source'] = '9501'
- elif ('spectral_cube_gen' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9005'
- elif ('mask_gen' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9004'
- elif ('degC_to_rad' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9003'
- elif ('dynamic_cal' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9002'
- elif ('rotate_vis_cam' in d['gci-meta']['metadata']['version-info']):
- meta['source'] = '9001'
- else:
- meta['source'] = '9000'
- elif('gain-cube-filename' in d['gci-meta']['metadata']):
- meta['source'] = '9000'
- else:
- meta['source'] = '6009'
- if debug: print('Input GCI message contains a "' + meta['source'] + '" type frame.')
- ## Grab the datacube metadata.
- if use_ir and ('cube' in d) and ('buffer' in d['cube']) and ('metadata' in d['cube']):
- #meta['Nx'] = d['cube']['metadata']['height']
- meta['Ny'] = d['cube']['metadata']['width']
- meta['Nz'] = d['cube']['metadata']['depth']
- meta['dcb'] = decode_datacube(d['cube'], msg)
- meta['dcb'+meta['source']] = meta['dcb']
- if (topic in ('spectral-cube','absorption-cube','normalized-absorption-cube','snr-cube','stdev-cube')):
- meta['Nc'] = meta['Nz']
- ## Grab the absorption cube metadata.
- if use_ir and ('absorption-cube' in d) and ('buffer' in d['absorption-cube']) and ('metadata' in d['absorption-cube']):
- meta['Ny'] = d['absorption-cube']['metadata']['width']
- meta['Nz'] = d['absorption-cube']['metadata']['depth']
- meta['dcb_absorption'] = decode_datacube(d['absorption-cube'], msg)
- #zzz
- #pdb.set_trace()
- ## Grab the standard-deviation cube metadata.
- if use_ir and ('std-dev-cube' in d) and ('buffer' in d['std-dev-cube']) and ('metadata' in d['std-dev-cube']):
- meta['Ny'] = d['std-dev-cube']['metadata']['width']
- meta['Nz'] = d['std-dev-cube']['metadata']['depth']
- meta['dcb_stdev'] = decode_datacube(d['std-dev-cube'], msg)
- ## Grab the SNR cube metadata.
- if use_ir and ('snr-cube' in d) and ('buffer' in d['snr-cube']) and ('metadata' in d['snr-cube']):
- meta['Ny'] = d['snr-cube']['metadata']['width']
- meta['Nz'] = d['snr-cube']['metadata']['depth']
- meta['dcb_snr'] = decode_datacube(d['snr-cube'], msg)
- ## Grab the visible image metadata.
- if use_vis and ('image' in d) and ('buffer' in d['image']) and ('metadata' in d['image']):
- meta['vis_Nx'] = d['image']['metadata']['height']
- meta['vis_Ny'] = d['image']['metadata']['width']
- meta['vis'] = decode_image(d['image'], msg)
- ## Grab the saturation mask.
- if use_masks and ('saturation-mask' in d) and ('buffer' in d['saturation-mask']) and ('metadata' in d['saturation-mask']):
- meta['saturation_mask'] = decode_image(d['saturation-mask'], msg) // 255
- ## Grab the IR motion mask.
- if use_masks and ('ir-motion-canc-mask' in d) and ('buffer' in d['ir-motion-canc-mask']) and ('metadata' in d['ir-motion-canc-mask']):
- meta['irmotion_mask'] = decode_image(d['ir-motion-canc-mask'], msg) // 255
- ## Grab the parallax mask.
- if use_masks and ('parallax-mask' in d) and ('buffer' in d['parallax-mask']) and ('metadata' in d['parallax-mask']):
- meta['parallax_mask'] = decode_image(d['parallax-mask'], msg) // 255
- ## Grab the VIS motion mask.
- if use_masks and ('vis-motion-canc-mask' in d) and ('buffer' in d['vis-motion-canc-mask']) and ('metadata' in d['vis-motion-canc-mask']):
- meta['vismotion_mask'] = decode_image(d['vis-motion-canc-mask'], msg) // 255
- ## Grab the detection mask.
- if use_masks and ('detection-mask' in d) and ('buffer' in d['detection-mask']) and ('metadata' in d['detection-mask']):
- meta['detmask'] = decode_image(d['detection-mask'], msg) // 255
- ## Grab the difference image. Note! When the detection algorithm is a Xcorr type, then the "difference image"
- ## is actually a xcorr image.
- if ('difference-image' in d) and ('buffer' in d['difference-image']) and ('metadata' in d['difference-image']):
- meta['diffimg'] = decode_image(d['difference-image'], msg)
- ## Grab the visible *overlay* image.
- if use_vis and ('vis-image' in d) and ('buffer' in d['vis-image']) and ('metadata' in d['vis-image']):
- meta['overlay'] = decode_image(d['vis-image'], msg)
- if debug:
- ## The line below cannot be implemented as-is, because the JSON module does not know how
- ## to serialize numpy objects.
- #print(json.dumps(meta, indent=4, sort_keys=True))
- #print(meta)
- pass
- #dynamic_gains = (meta['sceneref_mean_temps'][9] - meta['aper_mean_temps'][9]) / (meta['sceneref_mean_temps'] - meta['aper_mean_temps'])
- #dynamic_offsets = meta['aper_mean_temps'][9] + meta['aper_offset_deltas']
- ## Can't use cropped_mean_temps for calculating dynamic offsets of the referenced cameras
- ## because these values are *after* the dynamic cal was applied. And if we can't get the
- ## offsets, then we might as well skip the gains, because they're just 1.0.
- #rd = read_rectanglesfile(meta['serial_number'])
- #for key in rd['refcam']:
- # dynamic_gains[int(key)] = 1.0
- # #dynamic_offsets[int(key)] = meta['cropped_page_mean_temps'][9] - meta['cropped_page_mean_temps'][int(key)]
- #print('dynamic offsets = ', dynamic_offsets)
- #print('dynamic gains = ', dynamic_gains)
- return(meta)
- ## =================================================================================================
- def decode_datacube(cubedata, msg, debug=False):
- datatype = cubedata['type'].lower()
- raw_str = msg[cubedata['buffer']['index']]
- Nx = cubedata['metadata']['height']
- Ny = cubedata['metadata']['width']
- Nz = cubedata['metadata']['depth']
- if debug: print('%s: Nx, Ny, Nz = %3i %3i %3i' % (cubedata['topic'], Nx, Ny, Nz))
- if (datatype == 'ipp16u-cube'):
- bytesize = 2
- dcb = zeros((Nx,Ny,Nz), 'uint16')
- q = 0
- for z in arange(Nz):
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = uint16(struct.unpack('<'+str(Ny)+'H',raw_str[q:q+(Ny*bytesize)]))
- dcb[x,:,z] = row
- q += Ny * bytesize
- elif (datatype == 'ipp32f-cube'):
- bytesize = 4
- dcb = zeros((Nx,Ny,Nz), 'float32')
- q = 0
- for z in arange(Nz):
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny)+'f',raw_str[q:q+(Ny*bytesize)])
- dcb[x,:,z] = row
- q += Ny * bytesize
- else:
- raise NotImplementedError('The data type "' + datatype + '" is not implemented.')
- ## If scaling is needed, then apply the gain and offset.
- if ('gain' in cubedata['metadata']):
- cubedata['dcb_dtype'] = 'float32'
- scalegain = float(cubedata['metadata']['gain'])
- scaleoffset = float(cubedata['metadata']['offset'])
- if (float(cubedata['metadata']['gain']) > 0.0):
- dcb = (float32(dcb) / scalegain) + scaleoffset
- return(dcb)
- ## =================================================================================================
- def decode_image(cubedata, msg, debug=False):
- raw_str = msg[cubedata['buffer']['index']]
- Nx = cubedata['metadata']['height']
- Ny = cubedata['metadata']['width']
- if (cubedata['metadata']['imageTypeString'] == 'RGB_UINT8'):
- bytesize = 1
- vis = zeros((Nx,Ny,3), 'uint8')
- q = 0
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny*3)+'B', raw_str[q:q+(Ny*3*bytesize)])
- vis[x,:,0] = row[0::3]
- vis[x,:,1] = row[1::3]
- vis[x,:,2] = row[2::3]
- q += Ny * 3 * bytesize
- elif (cubedata['metadata']['imageTypeString'] == 'MONO_UINT8'):
- bytesize = 1
- vis = zeros((Nx,Ny), 'uint8')
- q = 0
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny)+'B', raw_str[q:q+(Ny*bytesize)])
- vis[x,:] = row
- q += Ny * bytesize
- elif (cubedata['metadata']['imageTypeString'] == 'MONO_FLOAT32'):
- bytesize = 4
- vis = zeros((Nx,Ny), 'float32')
- q = 0
- xvalues = Nx - 1 - arange(Nx)
- for x in xvalues:
- row = struct.unpack('<'+str(Ny)+'f',raw_str[q:q+(Ny*bytesize)])
- vis[x,:] = row
- q += Ny * bytesize
- else:
- raise NotImplementedError('That visible image data type ("' + cubedata['metadata']['imageTypeString'] + \
- '") is not implemented.')
- return(vis)
- ## =================================================================================================
- def acquire_raw(nframes, port=6009, gcinum=3, username=None, keyfile=None, topic='', verbose=False):
- '''
- Acquire a raw data sequence from the GCI. This is the lowest-level acquisition function, called
- by everything that wants live data from hardware.
- Parameters
- ----------
- nframes : int
- The number of frames of data to acquire
- port : int
- The port you want to pull from (i.e. 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9006, or 9501).
- gcinum : int, optional
- The GCI serial number.
- username : str, optional
- The username to use for connecting to the remote host
- keyfile : str, optional
- The RSA keyfile file to use for connecting to the remote host.
- topic : str
- The GCI message topic to wait for. This is used primarily when listening to ports \
- 9005 (which can have topics "spectral-cube","absorption-cube", or "normalized-absorption-cube") \
- and 9501 (which can have a variety of topics, such as "methane_binary", etc --- see the \
- detection algorithm config file).
- Returns
- -------
- raw : dict
- A dictionary whose keys are "0", "1", etc. labelling the time index of the frames.\
- raw['0'] contains the GCI message string that can be used as input to `decode_gci_msg()`.
- Example
- -------
- import gci
- ## To acquire data from hardware connected to the local network.
- raw = gci.acquire_raw(9002)
- ## To acquire data from a remote system.
- raw = gci.acquire_raw(9002, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa')
- ## Or even using an SSH tunnel to a locally-connected system.
- raw = gci.acquire_raw(9002, 3, 'nhagen', '/home/nhagen/.ssh/nhagen_lab_rsa')
- ## Or, for acquiring a detection mask from a remote system.
- raw = gci.acquire_raw(9501, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa', topic='methane_binary')
- '''
- assert (nframes == int(nframes)), '"nframes" must be an integer type.'
- assert isinstance(port, basestring) or isinstance(port, int), '"units" must be either a string or integer type.'
- gcinum = int(gcinum)
- assert gcinum in (2,3), 'Only GCI serial numbers 2 and 3 are mapped to known systems.'
- ctx = zmq.Context()
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
- subsock.setsockopt(zmq.HWM, nframes)
- ## If the input uses a units string rather than a port number, then translate to a number.
- trans = {'counts':6009, '6009':6009, 'degc':9000, '9000':9000, 'corrected_degc':9002, '9002':9002,
- 'corrected_radiance':9003, '9003':9003, 'radiance':9003, 'mask':9004, '9004':9004,
- 'spectrum':9005, '9005':9005, '9006':9006, 'detection':9501, '9501':9501}
- if isinstance(port, str):
- port = trans[port.lower()]
- if (port not in (6009,9000,9001,9002,9003,9004,9005,9006,9007,9501)):
- raise ValueError('That port number ("' + str(port) + '") is not recognized.')
- if (port == 9005):
- if (topic == ''): topic = 'spectral-cube'
- if (port == 9501):
- send_keepalive_command(topic, debug=verbose)
- if (username == None) or (keyfile == None):
- ## Use the local network to acquire data.
- if (port == 6009):
- ## Talk to the embedded system.
- address = 'tcp://10.1.10.10:'
- else:
- ## Talk to the server system.
- address = 'tcp://10.1.10.11:'
- #address = 'tcp://localhost:'
- subsock.connect(address + str(port))
- raw = {}
- for t in arange(nframes):
- #raw[str(t)] = subsock.recv_multipart()
- poller = zmq.Poller()
- poller.register(subsock, zmq.POLLIN)
- REQUEST_TIMEOUT = 2500
- q = 1 ## counter to see how many requests for messages are made
- while True:
- sockets = dict(poller.poll(REQUEST_TIMEOUT))
- q += 1
- if (subsock in sockets) and (sockets[subsock] == zmq.POLLIN):
- msg = subsock.recv_multipart()
- if (topic != '') and (msg[0] != topic): continue
- raw[str(t)] = msg
- break
- if (q > 5):
- subsock.close()
- raise ImportError('Failed to receive a datacube message.')
- if verbose: print('Frame index = ' + str(t))
- subsock.close()
- return(raw)
- else:
- ## Here we try to build an SSH tunnel to a remotely-connected GCI system.
- import subprocess
- import signal
- import time
- username = str(username)
- keyfile = str(keyfile)
- if not os.path.exists(keyfile): raise LookupError, 'Cannot find file "' + keyfile + '"'
- try:
- if (gcinum == 2): ## Toledo system
- if (port == 6009):
- connect_str = 'ssh -i %s -p 22001 %s@24.52.111.98 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:6009'
- else:
- connect_str = 'ssh -i %s -p 22000 %s@24.52.111.98 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:' + str(port)
- elif (gcinum == 3): ## lab system
- if (port == 6009):
- connect_str = 'ssh -i %s -p 22 %s@10.1.10.11 -L %i:10.1.20.10:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:6009'
- else:
- connect_str = 'ssh -i %s -p 22 %s@10.1.10.11 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
- address = 'tcp://localhost:' + str(port)
- ## Note: SSH creates a shell, and so any commands running in the shell are its children. When you try to
- ## kill the shell, its children are not terminated with it, leaving them orphan. One way around this is
- ## to create the shell and its children as a process group (via "os.setsid") and send a command to kill
- ## all processes in that group. This works on Linux, but not on MS Windows, which doesn't have os.setsid
- ## available.
- raw = {}
- ssh_process = subprocess.Popen(connect_str, shell=True, preexec_fn=os.setsid)
- time.sleep(2.0) ## give it some time to connect
- ctx = zmq.Context()
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
- subsock.setsockopt(zmq.HWM, 1)
- subsock.connect(address)
- raw = {}
- for t in arange(nframes):
- poller = zmq.Poller()
- poller.register(subsock, zmq.POLLIN)
- REQUEST_TIMEOUT = 2500
- q = 1 ## counter to see how many requests for messages are made
- while True:
- sockets = dict(poller.poll(REQUEST_TIMEOUT))
- q += 1
- if (subsock in sockets) and (sockets[subsock] == zmq.POLLIN):
- msg = subsock.recv_multipart()
- if (msg[0] != topic): continue
- raw[str(t)] = msg
- break
- if (q > 5):
- subsock.close()
- raise ImportError('Failed to receive a datacube message with topic="' + topic + '".')
- if verbose: print('Frame index = ' + str(t))
- subsock.close()
- time.sleep(0.1) ## give it some time to close the connection before you terminate the SSH process
- ## Send the terminate signal to all the processes in the group. This apparently doesn't work on MS Windows.
- ## Note that ssh_process.kill() does *not* work, since that is a child process and not a parent.
- os.killpg(ssh_process.pid, signal.SIGTERM)
- except (OSError,ImportError), errmsg:
- print('Tunnel failed:', errmsg)
- time.sleep(0.1) ## give it some time to close the connection before you terminate the SSH process
- ## Send the terminate signal to all the processes in the group. This apparently doesn't work on MS Windows.
- ## Note that ssh_process.kill() does *not* work, since that is a child process and not a parent.
- os.killpg(ssh_process.pid, signal.SIGTERM)
- raise Exception(errmsg)
- return(raw)
- ## =================================================================================================
- def acquire_data(port=6009, gcinum=3, username=None, keyfile=None, use_ir=True, use_vis=True, use_masks=True,
- topic='', save_header=False, debug=False):
- '''
- Acquire a *single frame* of data containing the datacube, vis image, and all associated metadata.
- Parameters
- ----------
- port : int
- The port to acquire data from. (This can be 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9051).
- gcinum : int, optional
- The serial number of the GCI to talk to.
- username : str, optional
- The username to use for connecting to the remote host
- keyfile : str, optional
- The RSA keyfile file to use for connecting to the remote host
- use_ir : bool, optional
- Whether to load the IR datacube. (Set to false to improve loading speed.)
- use_vis : bool, optional
- Whether to load the visible image. (Set to false to improve loading speed.)
- use_masks : bool, optional
- Whether to load the full set of masks. (Set to false to improve loading speed.)
- topic : str
- The GCI message topic to wait for. This is used primarily when listening to ports \
- 9005 (which can have topics "spectral-cube","absorption-cube", or "normalized-absorption-cube") \
- and 9501 (which can have a variety of topics, such as "methane_binary", etc --- see the \
- detection algorithm config file).
- save_header : bool
- Whether to save the text version of the metadata as "header".
- Returns
- -------
- data : dict
- A dictionary containing keys 'dcb', 'vis'', and all the other elements returned by\
- `decode_gci_msg()`.
- Example
- -------
- import gci
- ## To acquire data from hardware connected to the local network.
- data = gci.acquire_data(9002)
- ## To acquire data from a remote system.
- data = gci.acquire_data(9002, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa')
- ## Or even using an SSH tunnel to a locally-connected system.
- data = gci.acquire_data(9002, 3, 'nhagen', '/home/nhagen/.ssh/nhagen_lab_rsa')
- '''
- raw = acquire_raw(1, port=port, gcinum=gcinum, username=username, keyfile=keyfile, topic=topic, verbose=debug)
- data = decode_gci_msg(raw['0'], use_ir=use_ir, use_vis=use_vis, use_masks=use_masks, save_header=save_header, debug=debug)
- return(data)
- ## =================================================================================================
- def acquire_dcb(ncubes, port=6009, debug=False):
- '''
- Acquire a sequence of infrared datacubes (i.e. hypercube if ncubes>1) from the GCI. This throws away
- all of the associated metadata and keeps only the final cubes.
- Parameters
- ----------
- ncubes : int
- the number of datacubes to acquire
- port : int
- The port to acquire data from. (This can be 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9051).
- Returns
- -------
- hcb : (list of ndarrays)
- If ncubes==1, a single datacube is returned; if ncubes>1, a Nt-long list of them is
- returned.
- '''
- assert (ncubes == int(ncubes)), '"ncubes" must be an integer type.'
- raw = acquire_raw(ncubes, port)
- gcidata = decode_gci_msg(raw['0'], use_ir=True, use_vis=False, use_masks=False, debug=debug)
- if (ncubes == 1):
- hcb = gcidata['dcb']
- else:
- hcb = []
- hcb.append(gcidata['dcb'])
- for t in arange(1,ncubes):
- gcidata = decode_gci_msg(raw[str(t)], use_ir=True, use_vis=False, use_masks=False, debug=debug)
- hcb.append(gcidata['dcb'])
- return(hcb)
- ## =================================================================================================
- def acquire_vis(nframes, debug=False):
- '''
- Acquire a sequence of visible images (i.e. hypercube if nframes>1) from the GCI. This throws away
- all of the associated metadata and keeps only the final images.
- Parameters
- ----------
- nframes : int
- The number of images to acquire
- Returns
- -------
- hcb : ndarray
- * If `nframes==1`, and using a color camera: a (Nx,Ny,3) image is returned
- * If `nframes>1`, and using a color camera: a (Nx,Ny,3,Nt) image is returned
- * If `nframes==1`, and using a monochrome camera: a (Nx,Ny) image is returned
- * If `nframes>1`, and using a monochrome camera: a (Nx,Ny,Nt) image is returned
- '''
- assert (nframes == int(nframes)), '"nframes" must be an integer type.'
- raw = acquire_raw(nframes)
- gcidata = decode_gci_msg(raw['0'], use_ir=False, use_vis=True, use_masks=False, debug=debug)
- if (nframes == 1):
- vcb = gcidata['vis']
- else:
- if (gcidata['vis'].ndim == 2):
- (vis_Nx,vis_Ny) = gcidata['vis'].shape
- vcb = zeros((vis_Nx,vis_Ny,nframes), gcidata['vis'].dtype)
- for t in arange(1,nframes):
- gcidata = decode_gci_msg(raw[str(t)], use_ir=False, use_vis=True, use_masks=False, debug=debug)
- vcb[:,:,t] = gcidata['vis']
- elif (gcidata['vis'].ndim == 3):
- (vis_Nx,vis_Ny,_) = gcidata['vis'].shape
- vcb = zeros((vis_Nx,vis_Ny,3,nframes), gcidata['vis'].dtype)
- for t in arange(1,nframes):
- gcidata = decode_gci_msg(raw[str(t)], use_ir=False, use_vis=True, use_masks=False, debug=debug)
- vcb[:,:,:,t] = gcidata['vis']
- return(vcb)
- ## =================================================================================================
- ## Convert all of the GCI files in a folder to a Numpy hypercube, and save as .npz.
- def gci_sequence_to_hcb(path, debug=True):
- '''
- Glob a directory for `*.gci` files, and decode each file into an infrared datacube and (if use_vis==True)
- a visible image.
- If there is more than one file in the directory, assemble the datacubes and images
- into an infrared hypercube and a visible hypercube. Throw away all of the associated metadata.
- Parameters
- ----------
- path : str
- The directory where to find all of the *.gci files.
- Returns
- -------
- hcb : (list of ndarrays)
- A list of datacubes.
- '''
- assert isinstance(path, str), 'The input "path" must be a string type.'
- if os.path.isfile(path):
- f = path
- nfiles = 1
- elif os.path.isdir(path):
- gci_files = sorted(glob.glob(path+'*.gci'))
- nfiles = len(gci_files)
- f = gci_files[0]
- if debug: print('Converting "' + os.path.basename(f) + '" (' + str(1) + ' of ' + str(nfiles) + ') ...')
- else:
- print('No files found in folder "' + path + '". Exiting ...')
- return
- ## Open the first file nd grab the dcb and vis image to figure out the data dimensions. This will allow you to create the
- ## hcb and vcb objects, which we can later fill in by looping over the remaining files.
- print('Decoding image #1 from "' + os.path.basename(f) + '"')
- gcidata = decode_gci_msg(f, use_vis=False, use_masks=False, debug=False)
- if (nfiles == 1):
- return(gcidata['dcb'])
- (Nx,Ny,Nw) = gcidata['dcb'].shape
- if debug: print('(Nx,Ny,Nw)=', (Nx,Ny,Nw))
- hcb = []
- hcb.append(gcidata['dcb'])
- for t in arange(1,nfiles):
- print('Decoding image #' + str(t+1) + ' from "' + os.path.basename(gci_files[t]) + '"')
- gcidata = decode_gci_msg(gci_files[t], use_vis=False, use_masks=False, debug=False)
- hcb.append(gcidata['dcb'])
- return(hcb)
- ## =================================================================================================
- def crop_dcb(dcb, xycoords):
- '''
- Crop a datacube using a matrix of cropping coordinates.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) datacube to be cropped
- xycoords : ndarray
- The (Nw,4) array of (xlo,xhi,ylo,yhi) cropping coordinates for all Nw cameras
- Returns
- -------
- dcb_cropped : ndarray
- The cropped datacube, with x-dimension equal to xycoords[w,1]-xycoords[w,0],\
- y-dimension equal to xycoords[w,3]-xycoords[w,2], and w-dimension equal to Nw.
- '''
- dcb = asarray(dcb)
- assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
- xycoords = asarray(xycoords)
- (_,_,Nw) = dcb.shape
- Nx = xycoords[0,1] - xycoords[0,0]
- Ny = xycoords[0,3] - xycoords[0,2]
- dcb_cropped = zeros((Nx,Ny,Nw), dtype(dcb[0,0,0]))
- for w in arange(Nw):
- xlo = xycoords[w,0]
- xhi = xycoords[w,1]
- ylo = xycoords[w,2]
- yhi = xycoords[w,3]
- dcb_cropped[:,:,w] = dcb[xlo:xhi,ylo:yhi,w]
- return(dcb_cropped)
- ## =================================================================================================
- def crop_hypercube(hcb, xycoords):
- '''
- Crop a hypercube (datacube sequence) using a matrix of cropping coordinates.
- Parameters
- ----------
- hcb : ndarray
- The (Nx,Ny,Nw,Nt) hypercube to be cropped
- xycoords : ndarray
- The (Nw,4) array of (xlo,xhi,ylo,yhi) cropping coordinates for all Nw cameras
- Returns
- -------
- hcb_cropped : ndarray
- The cropped hypercube, with x-dimension equal to `xycoords[w,1]-xycoords[w,0]`, y-dimension \
- equal to `xycoords[w,3]-xycoords[w,2]`, w-dimension equal to Nw, and t-dimension equal to \
- Nt. Here `w` can be any index value 0 through Nw-1.
- '''
- hcb = asarray(hcb)
- assert (hcb.ndim == 4), 'Input "hcb" must have three dimensions.'
- xycoords = asarray(xycoords)
- (_,_,Nw,Nt) = hcb.shape
- Nx = xycoords[0,1] - xycoords[0,0]
- Ny = xycoords[0,3] - xycoords[0,2]
- hcb_cropped = zeros((Nx,Ny,Nw,Nt), dtype(hcb[0,0,0,0]))
- for t in arange(Nt):
- for w in arange(Nw):
- xlo = xycoords[w,0]
- xhi = xycoords[w,1]
- ylo = xycoords[w,2]
- yhi = xycoords[w,3]
- hcb_cropped[:,:,w] = hcb[xlo:xhi,ylo:yhi,w,t]
- return(hcb_cropped)
- ## =================================================================================================
- ## keyword "plottype" allows options ['image', 'histogram']
- ## The "box1", "box2", "box3", and "box4" optional arguments can be used to draw boxes in the datacube slices images.
- ## Each argument can be either a 4-element vector (i.e. same box coordinates in each wavelength image) or a Nw*4
- ## matrix (giving different box coordinates for each wavelength image).
- def show_dcb(dcb, waves=None, figsize=None, title='', plottype='image', showstats=False, \
- box1=None, box2=None, box3=None, box4=None, invert_display=False, show=False,
- use_colorbars=True, **kwargs):
- '''
- Display a datacube in a single window, showing each w-plane of the cube as an image.
- Parameters
- ----------
- dcb : ndarray
- A (Nx,Ny,Nw) datacube.
- waves : ndarray, optional
- A vector of Nw values with which to label the datacube images.
- figsize : tuple, optional
- The size of the plotting window.
- title : str, optional
- The figure title.
- plottype : str, optional ('image','histogram','difference1','difference2')
- What type of figure to draw --- the images themselves, their histograms, or their \
- difference cubes.
- showstats : bool
- Whether or now to show the image stats as text overlaid on the image.
- box1 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box1 is drawn in blue.
- box2 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box2 is drawn in red.
- box3 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box3 is drawn in green.
- box4 : ndarray or list, optional
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box4 is drawn in yellow.
- invert_display : bool, optional
- Whether or not to draw the datacube planes with 0 at the bottom (invert_display==True), \
- or 0 at the top (invert_display==False).
- show : bool, optional
- Whether or not to call matplotlib's `show()` function.
- use_colorbars : bool, optional
- Whether to display colorbars with the datacube images.
- **kwargs: any, optional
- Ay keyword arguments that can be used by matplotlib's `imshow()` function.
- Notes
- -----
- Also, the inputs to the `box1`, `box2`, `box3`, and `box4` keywords can be 4-element vectors \
- rather than (Nw,4) size matrices. This forces the box coordinates to be the same for all \
- `w`.
- The dark gray shaded regions of the histograms extend out to the 1*sigma value, while the \
- light gray shaded regions of the histograms extend out to the 2*sigma value. The central \
- white line in the background shows the position of the mean.
- Setting `log=True` will allow logarithmic scaling of the histogram plots.
- '''
- import matplotlib.pyplot as plt
- import matplotlib.cm as cm
- from matplotlib.ticker import MaxNLocator
- import matplotlib.patheffects as PathEffects
- #print('PLATFORM = ', platform.system())
- #dcb = asarray(dcb)
- assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
- assert (plottype.lower() in ['image','histogram','difference1','difference2']), 'That plottype value ("' + plottype + '") is not recognized.'
- (Nx,Ny,Nw) = dcb.shape
- if (waves != None):
- waves = asarray(waves)
- assert (alen(waves) == Nw), 'The input "waves" should have Nw elements, not ' + str(alen(waves)) + '.'
- if (plottype == 'difference1'):
- dcb = array(dcb[:,:,1:] - dcb[:,:,:-1])
- Nw -= 1
- print('DIFFERENCE1')
- elif (plottype == 'difference2'):
- dcb = array(dcb - dcb[:,:,::-1])
- print('DIFFERENCE2')
- aspect_ratio = Ny / float(Nx)
- if (Nw == 1):
- if (figsize == None): figsize=(6*1.65*aspect_ratio,6)
- plt.figure(figsize=figsize)
- if (plottype.lower() == 'image'):
- plt.imshow(dcb[:,:,0], zorder=1, **kwargs)
- plt.axis('tight')
- if use_colorbars: plt.colorbar()
- elif (plottype.lower() == 'histogram'):
- ## Note: there is a matplotlib bug for logarithmic plotting when NaNs are present.
- (n, bin_edges) = histogram(log10(dcb[:,:,0].flatten()), 40)
- meanval = mean(dcb[:,:,0].flatten())
- stdval = std(dcb[:,:,0].flatten())
- ax = plt.subplot(111)
- ax.fill_between([meanval-2.0*stdval, meanval+2.0*stdval, meanval+2.0*stdval, meanval-2.0*stdval, meanval-2.0*stdval], [0, 0, amax(n)*1.1, amax(n)*1.1, 0], 0.5, color='0.75', zorder=1)
- ax.fill_between([meanval-stdval, meanval+stdval, meanval+stdval, meanval-stdval, meanval-stdval], [0, 0, amax(n)*1.1, amax(n)*1.1, 0], 0.5, color='0.5', zorder=2)
- ax.plot([meanval, meanval], [0, amax(n)*1.1], 'w-', linewidth=1.5, zorder=3)
- (counts, edges, patches) = ax.hist(dcb[:,:,0].flatten(), 40, zorder=4, edgecolor='0.5', log=True, **kwargs)
- if (meanval > 100):
- statstr = 'mean='+('%.0f' % meanval) + '\nstd='+('%.0f' % stdval)
- else:
- statstr = 'mean='+('%.2f' % meanval) + '\nstd='+('%.2f' % stdval)
- ax.text(0.8, 0.8, statstr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), zorder=5)
- ax.axis([amin(edges), amax(edges), 0.5, amax(counts)*1.1])
- else:
- if (figsize == None): figsize=(8*1.65*aspect_ratio,8)
- ncols = (ceil(sqrt(Nw)))
- nrows = (ceil(Nw / ncols))
- plt.figure(figsize=figsize)
- dimstr = '(Nx,Ny,Nw)=(' + str(Nx) + ',' + str(Ny) + ',' + str(Nw) + ')'
- plt.suptitle(dimstr + ' ' + title)
- for w in arange(Nw):
- if invert_display:
- c = int(w % ncols)
- r = int((Nw - w - 1) // ncols)
- else:
- c = int(w % ncols)
- r = int(w // ncols)
- if (plottype == 'image') or (plottype == 'histogram'):
- titlestr = 'w='+str(w) if (waves == None) else 'w='+str(waves[w])
- elif (plottype == 'difference1'):
- titlestr = '(w=' + str(w+1) + ') - (w=' + str(w) + ') difference'
- elif (plottype == 'difference2'):
- titlestr = '(w=' + str(Nw-1-w) + ') - (w=' + str(w) + ') difference'
- ## The "sharex=ax0" and "sharey=ax0" business is needed in order for zooming of one of the
- ## subplots is to propagate into all of the subplots. Don't turn this on for the histogram
- ## plot type, since we want to allow the axes to be different for each plot in that case.
- if (w == 0):
- ax0 = plt.subplot2grid((int(nrows), int(ncols)), (r,c))
- elif (w != 0) and (plottype.lower() == 'histogram'):
- ax = plt.subplot2grid((int(nrows), int(ncols)), (r,c))
- else:
- ax = plt.subplot2grid((int(nrows), int(ncols)), (r,c), sharex=ax0, sharey=ax0)
- if (plottype.lower() in ('image','difference1','difference2')):
- if (w == 0):
- plt.imshow(dcb[:,:,w], zorder=1, **kwargs)
- else:
- plt.imshow(dcb[:,:,w], zorder=1, **kwargs)
- plt.axis('tight')
- if use_colorbars:
- cb = plt.colorbar()
- cb.formatter.set_useOffset(False) ## turn off any offset that it may apply
- cb.update_ticks() ## update the new ticks for no-offset
- if (w == 0):
- ax0.set_yticklabels([])
- ax0.set_xticklabels([])
- else:
- ax.set_yticklabels([])
- ax.set_xticklabels([])
- plt.gca().axes.get_xaxis().set_visible(False)
- plt.gca().axes.get_yaxis().set_visible(False)
- if use_colorbars:
- cl = plt.getp(cb.ax, 'ymajorticklabels')
- plt.setp(cl, fontsize=10)
- plt.title(titlestr)
- if showstats:
- minval = min(dcb[:,:,w].flatten())
- meanval = mean(dcb[:,:,w].flatten())
- maxval = max(dcb[:,:,w].flatten())
- stdval = std(dcb[:,:,w].flatten())
- if isnan(meanval):
- sigdig = 0
- else:
- logmean = log10(abs(meanval))
- sigdig = 2 if (logmean >= -1) else int(-floor(logmean))
- fmt = '%.0f' if (meanval > 100) else '%.' + str(sigdig) + 'f'
- statstr = (('min=' + fmt + '\nmean=' + fmt + '\nmax=' + fmt + '\nstd=' + fmt) % (minval, meanval, maxval, stdval))
- if (w == 0):
- stattext = ax0.text(0.4, 0.4, statstr, horizontalalignment='center', transform=ax0.transAxes, zorder=2, color='k', fontsize=10)
- stattext.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='w')])
- else:
- stattext = ax.text(0.4, 0.4, statstr, horizontalalignment='center', transform=ax.transAxes, zorder=2, color='k', fontsize=10)
- stattext.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='w')])
- elif (plottype.lower() == 'histogram'):
- (counts, edges) = histogram(dcb[:,:,w].flatten(), 40)
- meanval = mean(dcb[:,:,w].flatten())
- stdval = std(dcb[:,:,w].flatten())
- if isnan(meanval):
- sigdig = 0
- else:
- logmean = log10(abs(meanval))
- sigdig = 2 if (logmean >= -1) else int(-floor(logmean))+1
- ymin = 1 if ('log' in kwargs) else 0
- if (w == 0):
- ax0.fill([meanval-2.0*stdval, meanval+2.0*stdval, meanval+2.0*stdval, meanval-2.0*stdval, meanval-2.0*stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.75', zorder=1)
- ax0.fill([meanval-stdval, meanval+stdval, meanval+stdval, meanval-stdval, meanval-stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.5', zorder=2)
- plt.plot([meanval, meanval], [0, amax(counts)*1.1], 'w-', linewidth=1.5, zorder=3)
- ax0.hist(dcb[:,:,w].flatten(), edges, zorder=4, edgecolor='0.5', **kwargs)
- ax0.tick_params(axis='both', which='major', labelsize=8)
- ax0.xaxis.set_major_locator(MaxNLocator(6))
- if (meanval > 100):
- titlestr += ':\nmean='+('%.0f' % meanval) + '\nstd='+('%.0f' % stdval)
- else:
- titlestr += ':\nmean='+(('%.' + str(sigdig) + 'f') % meanval) + '\nstd='+(('%.' + str(sigdig) + 'f') % stdval)
- ax0.axis([amin(edges), amax(edges), ymin, amax(counts)*1.1])
- ax0.text(0.75, 0.6, titlestr, horizontalalignment='center', transform=ax0.transAxes, bbox=dict(facecolor='white', alpha=0.5), fontsize='small', zorder=5)
- else:
- ax.fill([meanval-2.0*stdval, meanval+2.0*stdval, meanval+2.0*stdval, meanval-2.0*stdval, meanval-2.0*stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.75', zorder=1)
- ax.fill([meanval-stdval, meanval+stdval, meanval+stdval, meanval-stdval, meanval-stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.5', zorder=2)
- plt.plot([meanval, meanval], [0, amax(counts)*1.1], 'w-', linewidth=1.5, zorder=3)
- ax.hist(dcb[:,:,w].flatten(), edges, zorder=4, edgecolor='0.5', **kwargs)
- ax.tick_params(axis='both', which='major', labelsize=8)
- ax.xaxis.set_major_locator(MaxNLocator(6))
- if (meanval > 100):
- titlestr += ':\nmean='+('%.0f' % meanval) + '\nstd='+('%.0f' % stdval)
- else:
- titlestr += ':\nmean='+(('%.' + str(sigdig) + 'f') % meanval) + '\nstd='+(('%.' + str(sigdig) + 'f') % stdval)
- ax.axis([amin(edges), amax(edges), ymin, amax(counts)*1.1])
- ax.text(0.75, 0.6, titlestr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), fontsize='small', zorder=5)
- if (plottype.lower() == 'image') and (box1 != None):
- draw_box(box1, color='b', w=w)
- if (plottype.lower() == 'image') and (box2 != None):
- draw_box(box2, color='r', w=w)
- if (plottype.lower() == 'image') and (box3 != None):
- draw_box(box3, color='g', w=w)
- if (plottype.lower() == 'image') and (box4 != None):
- draw_box(box4, color='y', w=w)
- if show: plt.show()
- return
- ## =================================================================================================
- def get_offsets_and_gains(cool_dcb, warm_dcb, cool_temps_degC, warm_temps_degC, units='radiance',
- poly_approx=False, debug=False):
- '''
- Find out the offset and gain values to convert raw detector counts into degC or radiance units.
- Parameters
- ----------
- cool_dcb : ndarray
- The (Nx,Ny,Nw) datacube captured when viewing the cool shutter.
- warm_dcb : ndarray
- The (Nx,Ny,Nw) datacube captured when viewing the warm shutter.
- cool_temps_degC : ndarray
- Either a scalar value or a Nw-length vector of temperatures of the cool shutter during capture.
- warm_temps_degC : ndarray
- Either a scalar value or a Nw-length vector of temperatures of the warm shutter during capture.
- poly_approx : bool
- Whether to use the polynomial approximation to the degC->radiance conversion.
- units : str
- Whether the input cube is in units of 'radiance' or 'degC'.
- Returns
- -------
- offset_dcb : ndarray
- the (Nx,Ny,Nw) datacube of offset values.
- gain_dcb : ndarray
- the (Nx,Ny,Nw) datacube of gain values.
- '''
- cool_dcb = asarray(cool_dcb)
- assert (cool_dcb.ndim == 3), 'Input "cool_dcb" must have three dimensions.'
- warm_dcb = asarray(warm_dcb)
- assert (warm_dcb.ndim == 3), 'Input "warm_dcb" must have three dimensions.'
- assert (units.lower() in ['radiance','degc']), '"units" input must be either "radiance" or "degC".'
- assert all(cool_temps_degC < 150.0) and all(cool_temps_degC > -150.0), 'Cool shutter temperatures fall outside the valid range!'
- assert all(warm_temps_degC < 150.0) and all(warm_temps_degC > -150.0), 'Warm shutter temperatures fall outside the valid range!'
- (Nx,Ny,Nw) = cool_dcb.shape
- assert ((alen(cool_temps_degC) == Nw) or (alen(cool_temps_degC) == 1)), 'The number of cool shutter temperatures must be either 1 or Nw.'
- assert ((alen(warm_temps_degC) == Nw) or (alen(warm_temps_degC) == 1)), 'The number of warm shutter temperatures must be either 1 or Nw.'
- ## Verify that the inputs are reasonable.
- if any((cool_temps_degC - warm_temps_degC) > 0.0):
- print("deltaT's = ", cool_temps_degC - warm_temps_degC)
- raise ValueError('get_offsets_and_gains(): There is no positive temperature delta between cool and warm shutters!')
- if all(isnan(cool_dcb)) or all(isnan(warm_dcb)):
- return(cool_dcb*0.0, cool_dcb*0.0)
- if (alen(cool_temps_degC) == 1):
- cool_temps_degC = ones(Nw) * cool_temps_degC
- if (alen(warm_temps_degC) == 1):
- warm_temps_degC = ones(Nw) * warm_temps_degC
- if debug:
- print('cool_temps=', cool_temps_degC)
- print('warm_temps=', warm_temps_degC)
- if (units == 'degC'):
- deltaT = warm_temps_degC - cool_temps_degC
- scale_offsets = cool_temps_degC
- offset_dcb = cool_dcb
- gain_dcb = zeros_like(cool_dcb)
- for w in arange(Nw):
- gain_dcb[:,:,w] = (warm_dcb[:,:,w] - cool_dcb[:,:,w]) / deltaT[w]
- elif (units == 'radiance'):
- filterlist = get_filterlist()
- filter_data = get_filter_spectra(filterlist=filterlist)
- Ecool = zeros(Nw)
- Ewarm = zeros(Nw)
- if poly_approx:
- (LUT, temps) = get_degc_to_atsensor_radiance_lut(filterlist=filterlist, filter_data=filter_data)
- new_temps = temps[1:-1]
- for w in arange(Nw):
- coeffs = get_poly_coeffs(new_temps, LUT[1:-1,w], 5)
- if debug:
- if (w == 0): print('camera coeffs[0] coeffs[1] coeffs[2] coeffs[3] coeffs[4] coeffs[5]')
- print('%2i %15.6e %15.6e %15.6e %15.6e %15.6e %15.6e' % (w, coeffs[0], coeffs[1], coeffs[2], coeffs[3], coeffs[4], coeffs[5]))
- Tcool = cool_temps_degC[w]
- Twarm = warm_temps_degC[w]
- Ecool[w] = coeffs[0] + (coeffs[1] * Tcool) + (coeffs[2] * Tcool**2) + (coeffs[3] * Tcool**3) + (coeffs[4] * Tcool**4) + (coeffs[5] * Tcool**5)
- Ewarm[w] = coeffs[0] + (coeffs[1] * Twarm) + (coeffs[2] * Twarm**2) + (coeffs[3] * Twarm**3) + (coeffs[4] * Twarm**4) + (coeffs[5] * Twarm**5)
- #Ecool[w] = polyeval_Horner(cool_temps_degC[w], coeffs)
- #Ewarm[w] = polyeval_Horner(warm_temps_degC[w], coeffs)
- else:
- sensitivity = filter_data['tamarisk_responsivity'] * filter_data['tamarisk_lens_transmission'] * filter_data['germanium_window_transmission']
- for w in arange(Nw):
- Ecool[w] = degc_to_atsensor_radiance(cool_temps_degC[w], filter_data['waves'], filter_data[filterlist[w]], sensitivity)
- Ewarm[w] = degc_to_atsensor_radiance(warm_temps_degC[w], filter_data['waves'], filter_data[filterlist[w]], sensitivity)
- if debug:
- print('cool_radiances=', Ecool)
- print('warm_radiances=', Ewarm)
- deltaE = Ewarm - Ecool
- scale_offsets = Ecool
- offset_dcb = cool_dcb
- gain_dcb = zeros_like(cool_dcb)
- for w in arange(Nw):
- gain_dcb[:,:,w] = (warm_dcb[:,:,w] - cool_dcb[:,:,w]) / deltaE[w]
- ## Since the gains are used in the divisor when converting to temperatures, make sure that
- ## there are no zeros in the gain estimate.
- if any(logical_not(isfinite(gain_dcb))):
- raise ValueError('get_offsets_and_gains(): Elements in the gain cube are undefined. How did this happen?')
- if any(gain_dcb == 0.0):
- print('SETTING ZERO-VALUED ELEMENTS OF THE GAIN CUBE TO 1 ...')
- gain_dcb[gain_dcb == 0.0] = 1.0
- #if any(gain_dcb < 0.0):
- # print('SETTING NEGATIVE ELEMENTS OF THE GAIN CUBE TO 1 ...')
- # gain_dcb[gain_dcb < 0.0] = 1.0
- return(offset_dcb, gain_dcb, scale_offsets)
- ## =================================================================================================
- def receive_command_replies(ctx, ncameras=12, silent=False):
- '''
- Given a ZMQ context, set up a subscription socket and wait for `ncameras` number of replies.
- Assemble these replies into a list of messages.
- Parameters
- ----------
- ctx : ZMQ context object
- ncameras : int, optional
- The number of replies to listen for
- silent : bool, optional
- Whether or not to print the replies as they come
- Returns
- -------
- allreplies : list
- The list of reply messages
- '''
- ## Socket for receiving command replies.
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
- #subsock.connect('tcp://10.1.10.10:6002')
- subsock.connect('tcp://10.1.10.11:9550')
- allreplies = []
- #nreplies = 0
- for n in arange(ncameras):
- reply = subsock.recv_multipart()
- if not silent: print(reply)
- nreplies += 1
- allreplies.append(reply)
- subsock.close()
- return(allreplies)
- ## =================================================================================================
- def get_control_data(debug=False):
- '''
- Open a ZMQ subscription socket and grab data from all four available channels connected
- to the heater/shutter system. Place the (translated) results into a single dictionary.
- Returns
- -------
- newdict : dict
- A dictionary of data labels containing the various shutter/heater values. `newdict` has the
- form {`heater_setpoint`:_, `heater_levels`:_, `heater_control_enabled`:_, `warm_shutter_position`:_,\
- `cool_shutter_position`:_, `warm_shutter_temperatures`:_, `warm_shutter_board_temperatures`:_,\
- `warm_shutter_chassis_temperatures`:_, `cool_shutter_temperatures`:_,\
- `cool_shutter_board_temperatures`:_, `cool_shutter_chassis_temperatures`:_}
- Returns
- -------
- newdict : dict
- A dictionary containing the system information.
- Notes
- -----
- TODO: add an option to the function to call only one of the four channels and not all four, to get
- quicker responses?
- '''
- ctx = zmq.Context()
- reply_dict = {}
- ## Socket for receiving shutter/heater/thermister data. Set up the high water mark for
- ## 25 messages, rather than 4, because sometimes one of the messages is VERY slow in
- ## coming. And before it finally arrives, ZMQ keeps sending the other three messages
- ## over and over. This mainly happens when one or more cameras is misbehaving, slowing
- ## down the whole network.
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt(zmq.HWM, 10)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'') ## subscribe to all five channels, and wait for replies from each one.
- subsock.connect('tcp://10.1.10.10:6255')
- for i in arange(20):
- reply = subsock.recv_multipart()
- if (reply[0] in reply_dict): continue
- reply_dict[reply[0]] = json.loads(reply[1])
- if ('heated_heater' in reply_dict) and ('heated_shutter' in reply_dict) and \
- ('unheated_shutter' in reply_dict) and ('heated_temperatures' in reply_dict) and \
- ('unheated_temperatures' in reply_dict):
- break
- subsock.close()
- if debug:
- #print(reply_dict)
- import yaml
- #d = json.dumps(reply_dict) ## the "payload" dictionary
- print(yaml.dump(reply_dict, indent=3, width=140))
- ## The entries commented out here are ones I doubt we will have use for. We can turn them
- ## on any time they may be useful...
- newdict = {}
- newdict['heater_setpoint'] = reply_dict['heated_heater']['heater_set_point']['value']
- newdict['heater_setpoint_changed'] = reply_dict['heated_heater']['heater_set_point']['changed']
- newdict['heater_levels'] = reply_dict['heated_heater']['heater_levels']
- newdict['heater_control_enabled'] = reply_dict['heated_heater']['heater_control']['enabled']
- newdict['heater_control_changed'] = reply_dict['heated_heater']['heater_control']['changed']
- newdict['warm_shutter_position'] = reply_dict['heated_shutter']['shutter_position']
- newdict['warm_shutter_command'] = 'open' if reply_dict['heated_shutter']['shutter_command']['open'] else 'closed'
- newdict['warm_shutter_command_changed'] = reply_dict['heated_shutter']['shutter_command']['changed']
- newdict['cool_shutter_position'] = reply_dict['unheated_shutter']['shutter_position']
- newdict['cool_shutter_command'] = 'open' if reply_dict['unheated_shutter']['shutter_command']['open'] else 'closed'
- newdict['cool_shutter_command_changed'] = reply_dict['unheated_shutter']['shutter_command']['changed']
- newdict['warm_shutter_temperatures'] = array(reply_dict['heated_temperatures']['shutter_temperatures'])
- newdict['warm_shutter_chassis_temperatures'] = array(reply_dict['heated_temperatures']['chassis_temperatures'])
- newdict['warm_shutter_board_temperatures'] = array(reply_dict['heated_temperatures']['board_temperatures'])
- newdict['cool_shutter_temperatures'] = array(reply_dict['unheated_temperatures']['shutter_temperatures'])
- newdict['cool_shutter_chassis_temperatures'] = array(reply_dict['unheated_temperatures']['chassis_temperatures'])
- newdict['cool_shutter_board_temperatures'] = array(reply_dict['unheated_temperatures']['board_temperatures'])
- return(newdict)
- ## =================================================================================================
- ## shutter_name: can be ['heated_shutter', 'heated', 'warm'] or ['unheated_shutter', 'unheated', 'cool']
- ## position: can be ['in', 'closed', 'down'] or ['out', 'open', 'up']
- def move_shutter(shutter_name, position, debug=False):
- '''
- Move one of the GCI shutters in/out.
- Parameters
- ----------
- shutter_name : str
- {'heated_shutter', 'heated', 'warm'} if the warm shutter,\
- {'unheated_shutter', 'unheated', 'cool'} if the cool shutter
- position : str
- {'in', 'closed', 'close', 'down'} if moving the shutter in, {'out', 'open', 'up'}\
- if moving the shutter out.
- '''
- assert isinstance(shutter_name, str), 'Input "shutter_name" must be a string.'
- assert isinstance(position, str), 'Input "position" must be a string.'
- if (shutter_name.lower() in ['heated_shutter','warm','heated']):
- shutter = 'heated'
- elif (shutter_name.lower() in ['unheated_shutter','cool','unheated']):
- shutter = 'unheated'
- elif (shutter_name.lower() == 'both'):
- shutter = 'both'
- else:
- raise ValueError('move_shutter(): The shutter name "' + shutter_name + '" is not recognized.')
- ## "pos" is used to send a command to move the shutter, while "query_pos" is used to
- ## ask the system where the shutter is now. Note that pos != query_pos for the "CLOSE"
- ## command!
- if (position.lower() in ['in', 'closed', 'close', 'down']):
- pos = 'CLOSE'
- query_pos = 'CLOSED'
- elif (position.lower() in ['out', 'open', 'up']):
- pos = 'OPEN'
- query_pos = pos
- else:
- raise ValueError('move_shutter(): The input position "' + position + '" is not recognized.')
- print('COMMAND: shutter=', shutter, ', pos=', pos)
- ctx = zmq.Context()
- reqrepsock = ctx.socket(zmq.REQ)
- reqrepsock.connect('tcp://10.1.10.10:6256')
- ## First check if the other shutter is already in. If so, move it out before moving this shutter in.
- #data = get_control_data(debug=debug)
- if (shutter == 'heated') or (shutter == 'both'):
- ## It's tempting to move this "reqrepsock" definition above the if-block, but doing so produces
- ## an error when shutter=="both".
- cmd = [b'heated_shutter', b'shutter='+pos]
- reqrepsock.send_multipart(cmd)
- reqrepsock.close()
- ## Next watch the shutter position state and wait until it reaches the desired position.
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt(zmq.HWM, 5)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'heated_shutter')
- subsock.connect('tcp://10.1.10.10:6255')
- for i in arange(5):
- reply = subsock.recv_multipart()
- reply_dict = json.loads(reply[1])
- print(i, reply_dict)
- if (reply_dict['shutter_position'].upper() == query_pos):
- break
- subsock.close()
- if (shutter == 'unheated') or (shutter == 'both'):
- reqrepsock = ctx.socket(zmq.REQ)
- reqrepsock.connect('tcp://10.1.10.10:6256')
- cmd = [b'unheated_shutter', b'shutter='+pos]
- reqrepsock.send_multipart(cmd)
- #reply = reqrepsock.recv_multipart()
- reqrepsock.close()
- ## Next watch the shutter position state and wait until it reaches the desired position.
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt(zmq.HWM, 5)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'unheated_shutter')
- subsock.connect('tcp://10.1.10.10:6255')
- for i in arange(5):
- reply = subsock.recv_multipart()
- reply_dict = json.loads(reply[1])
- print(i, reply_dict)
- if (reply_dict['shutter_position'].upper() == query_pos):
- break
- subsock.close()
- return
- ## =================================================================================================
- def set_heater_setpoint(setpoint):
- '''
- Change the heater setpoint value.
- Parameters
- ----------
- setpoint : float
- The temperature (in degC) to set the heater setpoint to.
- '''
- ctx = zmq.Context()
- reqrepsock = ctx.socket(zmq.REQ)
- reqrepsock.connect('tcp://10.1.10.10:6256')
- cmd = [b'heated_heater', b'setpoint='+str(setpoint)]
- reqrepsock.send_multipart(cmd)
- reply = reqrepsock.recv_multipart()
- #print('reply=', reply)
- reqrepsock.close()
- return
- ## =================================================================================================
- def set_heater_state(on_or_off):
- '''
- Change the heater state on/off.
- Parameters
- ----------
- on_or_off : str
- {'on','off'} turn the heater on or off.
- '''
- state = on_or_off.upper()
- assert (state in ['ON','OFF']), 'The input "on_or_off" value ("' + on_or_off + '") is not recognized.'
- ctx = zmq.Context()
- reqrepsock = ctx.socket(zmq.REQ)
- reqrepsock.connect('tcp://10.1.10.10:6256')
- cmd = [b'heated_heater', b'heater='+state]
- reqrepsock.send_multipart(cmd)
- reply = reqrepsock.recv_multipart()
- print('set_heater_state() reply=', reply)
- reqrepsock.close()
- return
- ## =================================================================================================
- def replace_bad_pixels(badimg, xbad, ybad):
- '''
- Replace any bad pixels in an image with interpolated values from their neighbors.
- Parameters
- ----------
- badimg : ndarray
- The 2D input image (containing bad pixels).
- xbad : ndarray
- The vector of x-locations of bad pixels.
- ybad : ndarray
- The vector of y-locations of bad pixels.
- Returns
- -------
- img : ndarray
- A copy of the input image with bad pixels replaced.
- Notes
- -----
- The tricky part here is all in the geometry. For any given bad pixel, one must search the
- surrounding 8 pixels to make sure that these pixels (which are used to interpolate)
- are not themselves bad pixels. And, if at the edge of an array or a corner, then the
- search space is smaller than 8 pixels. Thus all of the if-elif statements below.
- '''
- badimg = asarray(badimg)
- xbad = asarray(xbad)
- ybad = asarray(ybad)
- img = badimg.copy()
- dims = img.shape
- nbad = alen(xbad)
- badlist = zip(ybad,xbad)
- ## First figure out whether the pixel we are examing is at an edge (top,left,bottom, or right)
- ## or at a corner. The pixels to interpolate across are different in each case.
- for i in arange(nbad):
- xlo = (xbad[i] == 0)
- xhi = (xbad[i] == dims[1]-1)
- ylo = (ybad[i] == 0)
- yhi = (ybad[i] == dims[0]-1)
- xok = (not xlo) and (not xhi)
- yok = (not ylo) and (not yhi)
- if xok and yok:
- xj = [-1,0,1,-1,1,-1,0,1]
- yj = [-1,-1,-1,0,0,1,1,1]
- elif xlo and yok:
- xj = [0,1,1,0,1]
- yj = [-1,-1,0,1,1]
- elif xok and ylo:
- xj = [-1,1,-1,0,1]
- yj = [0,0,1,1,1]
- elif xhi and yok:
- xj = [-1,0,-1,-1,0]
- yj = [-1,-1,0,1,1]
- elif xok and yhi:
- xj = [-1,0,1,-1,1]
- yj = [-1,-1,-1,0,0]
- elif xlo and ylo:
- xj = [1,0,1]
- yj = [0,1,1]
- elif xhi and yhi:
- xj = [-1,0,-1]
- yj = [-1,-1,0]
- elif xlo and yhi:
- xj = [0,1,1]
- yj = [-1,-1,0]
- elif xhi and ylo:
- xj = [-1,-1,0]
- yj = [0,1,1]
- else:
- print('ERROR! How did you wind up here? Maybe a bad definition of the dead pixels?')
- xj = asarray(xj)
- yj = asarray(yj)
- z = zip(ybad[i]+yj,xbad[i]+xj)
- w = 1.0 / sqrt(asarray(xj)**2 + asarray(yj)**2)
- ## Determine whether any of the surrounding pixels (the ones we wish to use for interpolating
- ## to replace the bad pixel) are themselves bad. If bad, then skip them when calculating the
- ## average. Also, give direct neighbors a weight of 1 while diagonal pixels get a weight of
- ## 1/sqrt(2). The final interpolated average is a weighted average between them.
- bad = [item in badlist for item in z]
- newval = 0.0
- weight = 0.0
- for j in range(alen(xj)):
- if not bad[j]:
- newval += (img[z[j]]) * w[j]
- weight += w[j]
- newval = round(newval / weight)
- #print('(ybad,xbad)=(' + str(badlist[i]) + '): bad = ' + str(bad) + ', weight=' + str(weight) + ', newval=' + str(newval))
- img[badlist[i]] = newval
- return(img)
- ## =================================================================================================
- def degc_to_atsensor_radiance(T_celsius, waves_um, filter_spectrum, sensitivity, emissivity=1.0):
- '''
- Convert from degC temperature units to at-sensor radiance units, using the Planck blackbody
- curve, the emissivity of the surface, the sensitivity spectrum (responsivity tmies optical
- transmission) of the detector, and the filter transmission spectrum.
- Parameters
- ----------
- T_celsius : float
- The surface temperature in degC.
- waves_um : ndarray
- The vector of wavelengths over which to analyze the spectrum.
- filter_spectrum : ndarray
- The transmission spectrum of the filter. The spectrum must be sampled at the wavelength \
- points given by `waves_um`.
- sensitivity : ndarray
- The sensitivity spectrum of the detector. The spectrum must be sampled at the wavelength \
- points given by `waves_um`.
- emissivity : ndarray
- The emissivity spectrum of the surface. The spectrum must be sampled at the wavelength \
- points given by `waves_um`.
- Returns
- -------
- watts_per_cm2_per_sr : float
- The at-sensor radiance integrated across the full spectral range given by `waves_um`.
- '''
- T_celsius = asarray(T_celsius)
- waves_um = asarray(waves_um)
- assert (alen(filter_spectrum) == alen(waves_um)), 'The input "filter_spectrum" must be the same length as "waves_um".'
- assert (amin(filter_spectrum) >= 0.0) and (amax(filter_spectrum) <= 1.0), 'The input "filter_spectrum" must everywhere be in the range 0.0-1.0.'
- assert (alen(sensitivity) == alen(waves_um)), 'The input "sensitivity" must be the same length as "waves_um".'
- assert (amin(sensitivity) >= 0.0) and (amax(sensitivity) <= 1.0), 'The input "sensitivity" spectrum must everywhere be in the range 0.0-1.0.'
- assert (alen(emissivity) in (1, alen(waves_um))), 'The input "emissivity" must be the either length 1 or the same length as "waves_um".'
- assert (amin(emissivity) >= 0.0) and (amax(emissivity) <= 1.0), 'The input emissivity must be in the range 0.0-1.0.'
- planck_spectral_radiance = planck_blackbody(waves_um, T_celsius)
- watts_per_cm2_per_sr = sum(planck_spectral_radiance * filter_spectrum * sensitivity * emissivity * gradient(waves_um))
- return(watts_per_cm2_per_sr)
- ## =================================================================================================
- def get_filter_spectra(waves=None, filterlist=None, path=None, debug=False):
- '''
- Get the transmission spectra corresponding to a list of filters. If the list is not given, then just grab
- all transmission spectra located in the data archive directory.
- The function returns a dictionary whose keys are the generic filter names (i.e. "LP-11450", etc),
- and whose values are the transmission spectra associated with the filter. Also included in the dictionary is
- the "waves" key which gives the wavelengths of all of the associated spectra (equal to the "waves" input
- argument of the function). All of the spectra have been interpolated onto the "waves" sampling grid. Also
- included in the dictionary are the non-filters "transmission" (of the atmosphere), "emission" (of the
- atmosphere), the "optical_transmission", and "responsivity" (of the Tamarisk sensor), since these spectra are also included as `*.dat`
- files in the spectral data directory. If "filterlist" is included as an input argument, then any filter
- uniqueIDs that are *not* the filter list will be ignored and not included in the dictionary returned.
- Parameters
- ----------
- waves : ndarray, optional
- A vector of wavelengths (in microns) at which to sample the spectra.
- filterlist : list, optional
- A list of strings giving the uniqueIDs of the filters of interest. Ignore any \
- filters not in this list.
- path : str
- A directory giving a nonstandard location for getting the spectral data.
- Returns
- -------
- datadict : dict
- A dictionary whose keys are the filter uniqueIDs, and whose values contain the corresponding \
- transmission spectra. Added to the dictionary is an extra `waves` key giving the wavelength \
- sampling of all spectra included in the dictionary.
- Notes
- -----
- In general, the filter spectra located in the given path are assumed to be encoded in two-column text
- format, with wavelengths in the first column and transmission in the second. (This can be either %
- transmission or absolute transmission --- the code below will convert the former to absolute
- transmission.) The transmission spectra are not assumed to be sampled on a uniform grid, and so the
- code below interpolates the input from the file onto a uniformly-sampled grid.
- In addition, any unphysical values (trans < 0.0 or trans > 1.0) are truncated to the physical limit.
- '''
- from scipy.interpolate import interp1d
- fdir = GCIDIR + 'cal/filter_spectra/' if (path == None) else path
- if debug: print('Loading files from ' + fdir)
- files = glob.glob(os.path.normpath(fdir + '*.dat'))
- nfiles = alen(files)
- if debug: print('Found ' + str(nfiles) + ' files')
- datadict = {}
- if (waves == None):
- #waves = linspace(6.0, 20.0, 500)
- waves = linspace(7.0, 18.0, 500)
- else:
- waves = asarray(waves)
- nwaves = alen(waves)
- datadict['waves'] = waves
- wavemin = amin(waves)
- wavemax = amax(waves)
- datadict['none'] = ones(nwaves)
- ids = []
- labels = []
- filelist = []
- extras = ('tamarisk_responsivity', 'tamarisk_lens_transmission', 'germanium_window_transmission')
- ## Process the filename to extract a string that represents the filter's designed "center wavelength". Use that string
- ## label as a key for each dictionary entry (i.e. for each wavelength/intensity spectrum).
- for i,f in enumerate(files):
- (head,tail) = os.path.split(f)
- uniqueID = tail[:-4]
- if (filterlist != None) and (uniqueID not in filterlist) and (uniqueID not in extras): continue
- uniqueID_elements = uniqueID.split('_')
- filtername = uniqueID_elements[1]
- if (filtername.count('-') > 1):
- idx = filtername.rindex('-')
- filtername = filtername[:idx]
- ## Make a map of uniqueID to generic name.
- if debug: print(i, 'filtername=', filtername, uniqueID)
- ids = append(ids, uniqueID)
- labels = append(labels, filtername)
- filelist = append(filelist, f)
- ## Here (wav,spect) are from the data read from the file, and (wave,spectrum) are the *interpolated*
- ## results we want as output.
- for i,f in enumerate(filelist):
- data = genfromtxt(f)
- (wav,spect) = transpose(data)
- ## Transpose the data if given in descending order.
- if (wav[0] > wav[-1]):
- wav = wav[::-1]
- spect = spect[::-1]
- if (amin(wav) > 1000.0): wav = wav / 1000.0 ## convert nm to um
- if (amin(spect) < 0.0): spect[spect < 0.0] = 0.0 ## fix unphysical negative values
- if (amax(spect) > 2.0): spect = spect / 100.0 ## convert % to fraction transmission
- if (amax(spect) > 1.0): spect[spect > 1.0] = 1.0 ## fix unphysical values
- ## If the raw data does not support the full desired spectral range, then extrapolate the
- ## ends of the spectral to the full spectral range.
- if (amin(wav) > wavemin):
- wav = append(wavemin, wav)
- spect = append(spect[0], spect)
- if (amax(wav) < wavemax):
- wav = append(wav, wavemax)
- spect = append(spect, spect[-1])
- if debug:
- print(i, f)
- print('wavemin=%f, wavemax=%f, amin(wav)=%f, amax(wav)=%f' % (wavemin, wavemax, amin(wav), amax(wav)))
- #zzz
- ## If you want to test changing the amplitude of any given filter, then the easiest way is probably to
- ## use something like the commented lines below:
- #if ('LP11000' in f): spect = 0.5 * spect
- #if ('SP11578' in f): spect = 0.5 * spect
- #if ('LP8305' in f): spect = 0.5 * spect
- func = interp1d(wav, spect)
- spectrum = func(waves)
- datadict[ids[i]] = spectrum
- #datadict[labels[i]] = spectrum
- if debug:
- for key in datadict:
- if (key == 'waves'): continue
- if (alen(datadict[key]) != nwaves): continue
- import matplotlib.pyplot as plt
- plt.figure()
- plt.plot(datadict['waves'], datadict[key])
- plt.title(key)
- plt.show()
- if ('tamarisk_responsivity') in datadict:
- datadict['responsivity'] = datadict['tamarisk_responsivity']
- return(datadict)
- ## =================================================================================================
- def get_filter_edge_wavelength(waves, trans, filtertype='LP'):
- '''
- For a given input pair of wavelengths and transmission spectrum, locate the (interpolated) wavelength
- which we can call the "edge wavelength" --- where the transmission passes through the "halfway point".
- For a perfect filter, the halfway point is at exactly transmission==0.5. For a real filter, we take the
- the halfway point to be 0.5 times the filter's mean transmission along its passband region.
- Parameters
- ----------
- waves : ndarray
- A vector of wavelengths at which the filter transmission spectrum is sampled.
- trans : ndarray
- The filter transmission spectrum.
- filtertype : str
- {'longpass','shortpass','broadband'} the type of filter
- Returns
- -------
- waveval : float
- The wavelength of the filter halfway point.
- spectval : float
- The transmission value of the filter at the halfway point.
- '''
- waves = asarray(waves)
- assert (alen(trans) == alen(waves)), 'The input "trans" must be the same length as "waves".'
- ## Make a first guess for the edge wavelength, then get the mean transmission of the filter
- ## above/below the edge wavelength.
- if (filtertype == 'LP'):
- edge = (trans > 0.5*amax(trans))
- edgewave = waves[edge][0]
- top = (waves > (edgewave+0.5)) * (waves < 15.0)
- mean_trans = mean(trans[top])
- idx = find_nearest_brackets(trans[waves < 15.0], 0.5*mean_trans)
- elif (filtertype == 'SP'):
- edge = (trans > 0.5*amax(trans))
- edgewave = waves[edge][-1]
- bot = (waves < (edgewave-0.5))
- mean_trans = mean(trans[bot])
- idx = find_nearest_brackets(trans, 0.5*mean_trans)
- else:
- return(nan, nan)
- slope = (trans[idx[1]] - trans[idx[0]]) / (waves[idx[1]] - waves[idx[0]])
- spectval = 0.5 * mean_trans
- if (slope == 0.0):
- waveval = waves[idx[1]]
- else:
- waveval = waves[idx[0]] + (spectval - trans[idx[0]]) / slope
- return(waveval, spectval)
- ## =================================================================================================
- def send_command(cmd, arg=None, read_replies=0, delay=0, debug=False):
- '''
- Issue one of the "miscellaneous" command messages to the GCI system. This function only uses
- "publication" sockets, which is why the heater commands do not appear here.
- Parameters
- ----------
- cmd : str
- any one of the following strings:
- * "enable_auto_calibration": turn off the CPU-based autocalibration (not the on-camera autocal).
- * "disable_auto_calibration": turn on the CPU-based autocalibration (not the on-camera autocal).
- * "enable_preheat": turn on the pre-heating leading to an autocal.
- * "disable_preheat": turn off the pre-heating leading to an autocal.
- * "trigger_1pt_calibration": tell the system to perform a 1pt calibration (not on-camera but with the CPU).
- * "trigger_2pt_calibration": tell the system to perform a 1pt calibration (not on-camera but with the CPU).
- * "enable_camera_autocal": tell all cameras to enable their regular (on-camera) 1pt autocal.
- * "disable_camera_autocal": tell all cameras to disable their regular (on-camera) 1pt autocal.
- * "enable_autocal_activity_control": turn on all cameras' control for performing range-change-induced on-camera autocals.
- * "disable_autocal_activity_control": turn off all cameras' control for performing range-change-induced on-camera autocals.
- * "get_system_status'": [?]
- * "query_calibration_pending'": query all cameras to see if they are requesting an autocal.
- * "get_exposure_mode": ask for the VIS camera's exposure mode.
- * "set_exposure_mode": set the VIS camera's exposure mode. The possible values are {'Off', 'Timed',\
- 'TriggerWidth', and 'IOExposureControl'}. If you want to allow manual changes to the exposure time,\
- the mode needs to be 'Timed'.
- * "set_vis_exposure_time": set the visible camera's exposure time in microseconds.
- * "get_vis_exposure_time": get the visible camera's exposure time in microseconds.
- * "get_vis_camera_setup_file": get the XML-formatted file which lists all of the VIS camera setup\
- parameters, their data types, and what options are available for their settings. If "arg" is\
- set to a filename, then the XML data will also be saved to that file.
- * "get_vis_whitebalance_mode": get the VIS camera's white balance mode.
- * "set_vis_whitebalance_mode": set the VIS camera's white balance mode. The possible values are 'Off',\
- 'Once', 'Auto', and 'Manual'.
- arg : any
- Some commands require an input argument (can be any data type).
- read_replies : int
- 0 if you don't want to check or wait for replies from the cameras; set to some number
- 0 < # <= Nw to say how many cameras' replies you wish to read (i.e. if one camera is having
- communication problems, you may want to set "read_replies=(Nw-1)").
- delay : float
- The amount of time in seconds to delay after sending the message before returning from the function.
- '''
- import time
- assert isinstance(cmd, str), 'The input "cmd" must be a string type.'
- ctx = zmq.Context()
- pubsock = ctx.socket(zmq.PUB)
- if (cmd in ['enable_auto_calibration','disable_auto_calibration','enable_preheat','disable_preheat',\
- 'trigger_1pt_calibration','trigger_2pt_calibration']):
- command_type = 'system'
- elif (cmd in ['enable_camera_autocal', 'disable_camera_autocal', 'enable_autocal_activity_control', \
- 'disable_autocal_activity_control', 'field_calibrate', 'get_system_status', 'query_calibration_pending']):
- command_type = 'IR_cameras'
- elif (cmd in ['get_exposure_mode', 'set_exposure_mode', 'set_camera_param', 'get_genicam_xml', 'get_camera_image_settings']):
- command_type = 'VIS_camera'
- else:
- raise ValueError('The command "' + cmd + '" is not recognized.')
- ## Assemble the ZMQ message.
- if (command_type == 'system'):
- pubsock.connect('tcp://10.1.10.10:5001')
- ## The six "calibration" commands do not issue replies, so turn "read_replies" off.
- read_replies = 0
- zmq_msg = [b'calibration-control', b'GENERIC_CMD']
- zmq_msg.append(bytearray(cmd))
- elif (command_type == 'IR_cameras'):
- #pubsock.connect('tcp://10.1.10.10:6001')
- pubsock.connect('tcp://10.1.10.11:9550')
- zmq_msg = [b'IR', b'SERIAL_CMD']
- if (cmd == 'enable_camera_autocal'):
- zmq_msg.append(b'auto_calibration_toggle')
- zmq_msg.append(b'Enable')
- elif (cmd == 'disable_camera_autocal'):
- zmq_msg.append(b'auto_calibration_toggle')
- zmq_msg.append(b'Disable' )
- elif (cmd == 'enable_autocal_activity_control'):
- zmq_msg.append(b'auto_cal_activity_control')
- zmq_msg.append(b'Enable' )
- elif (cmd == 'disable_autocal_activity_control'):
- zmq_msg.append(b'auto_cal_activity_control')
- zmq_msg.append(b'Disable' )
- elif (cmd == 'field_calibrate'):
- zmq_msg.append(b'field_calibrate')
- zmq_msg.append(b'1point_cal_shutter_disabled')
- elif (cmd == 'get_system_status'):
- read_replies = 12
- zmq_msg.append(b'system_status_get')
- elif (cmd == 'query_calibration_pending'):
- read_replies = 12
- zmq_msg.append(b'query_calibration_pending')
- elif (command_type == 'VIS_camera'):
- #pubsock.connect('tcp://10.1.10.10:6001')
- pubsock.connect('tcp://10.1.10.11:9550')
- zmq_msg = [b'VIS_0', b'GENERIC_CMD']
- if (cmd == 'get_exposure_mode'):
- read_replies = 1
- zmq_msg.append(b'get_camera_param')
- zmq_msg.append(b'ExposureMode:enum')
- elif (cmd == 'set_exposure_mode'):
- mode = 'Timed' if (arg == None) else arg
- zmq_msg.append(b'set_camera_param')
- zmq_msg.append(b'ExposureMode:enum:'+mode)
- if (delay < 0.2): delay = 0.2
- elif (cmd == 'get_exposure_time'):
- read_replies = 1
- zmq_msg.append(b'get_camera_param')
- zmq_msg.append(b'ExposureTimeRaw:int')
- elif (cmd == 'set_exposure_time'):
- exposure_time_microsec = 10000 if (arg == None) else int(arg)
- zmq_msg.append(b'set_camera_param')
- zmq_msg.append(b'ExposureTimeRaw:int:'+str(exposure_time_microsec))
- if (delay < 0.2): delay = 0.2
- elif (cmd =='get_vis_camera_setup_file'):
- zmq_msg.append(b'get_genicam_xml')
- elif (cmd == 'get_vis_whitebalance_mode'):
- read_replies = 1
- zmq_msg.append(b'get_camera_param')
- zmq_msg.append(b'WhiteBalanceMode:enum')
- elif (cmd == 'set_vis_whitebalance_mode'):
- mode = 'Off' if (arg == None) else arg
- zmq_msg.append(b'set_camera_param')
- zmq_msg.append(b'WhiteBalanceMode:enum:'+mode)
- if (delay < 0.2): delay = 0.2
- elif (cmd == 'get_camera_image_settings'):
- zmq_msg.append(b'get_camera_param')
- zmq_msg.append(b'ImageSizeControl:enum')
- ## Now go ahead and send the message.
- pubsock.send_multipart(zmq_msg)
- pubsock.close()
- ## After sending the command, now listen for replies.
- if (read_replies > 0):
- if (cmd == 'get_system_status'):
- allreplies = receive_command_replies(ctx, ncameras=read_replies, silent=True)
- all_status = {}
- for reply in allreplies:
- d = {}
- if ('ERROR' in reply) or ('EXTVID' not in reply):
- d['EXTVID'] = 'ERROR'
- d['CAL'] = 'ERROR'
- d['AGC'] = 'ERROR'
- d['SHUTTER'] = 'ERROR'
- d['POL'] = 'ERROR'
- d['Manual Gain'] = 'ERROR'
- d['Manual Level'] = 'ERROR'
- d['Gain Bias'] = 'ERROR'
- d['Level Bias'] = 'ERROR'
- else:
- idx = reply.index('EXTVID')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('CAL')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('AGC')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('SHUTTER')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('POL')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('Manual Gain')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('Manual Level')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('Gain Bias')
- d[reply[idx]] = reply[idx+1]
- idx = reply.index('Level Bias')
- d[reply[idx]] = reply[idx+1]
- if reply[0].startswith('IR_'):
- cameraname = reply[0]
- elif reply[0].startswith('IR') and reply[3].startswith('IR_'):
- cameraname = reply[3]
- else:
- cameraname = 'UNKNOWN'
- all_status[cameraname] = d
- if debug:
- for k in all_status: print(k + ': ' + str(all_status[k]))
- time.sleep(delay)
- return(all_status)
- elif (cmd == 'query_calibration_pending'):
- allreplies = receive_command_replies(ctx, ncameras=read_replies, silent=True)
- all_status = {}
- for reply in allreplies:
- if reply[0].startswith('IR_'):
- cameraname = reply[0]
- elif reply[0].startswith('IR') and reply[3].startswith('IR_'):
- cameraname = reply[3]
- else:
- cameraname = 'UNKNOWN'
- if ('query_calibration_pending' in reply):
- all_status[cameraname] = reply[4]
- else:
- all_status[cameraname] = 'ERROR'
- time.sleep(delay)
- return(all_status)
- elif (cmd in ['get_exposure_mode', 'get_exposure_time', 'get_vis_whitebalance_mode', 'get_camera_image_settings']):
- reply = receive_command_replies(ctx, ncameras=read_replies, silent=False)
- print(reply)
- if debug:
- for k in all_status: print(k + ': ' + str(all_status[k]))
- time.sleep(delay)
- return(reply[5])
- elif (cmd == 'get_vis_camera_setup_file'):
- subsock = ctx.socket(zmq.SUB)
- subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
- #subsock.connect('tcp://10.1.10.10:6002')
- subsock.connect('tcp://10.1.10.11:9551')
- reply = subsock.recv_multipart()
- xmlmsg = reply[4]
- if (arg != None):
- f = open(arg,'w', encoding='utf-8')
- f.write(xmlmsg)
- f.close()
- subsock.close()
- time.sleep(delay)
- return(xmlmsg)
- else:
- receive_command_replies(ctx, ncameras=read_replies, silent=False)
- time.sleep(delay)
- return
- else:
- ## This section is typically reached when using the "system" commands. After setting one
- ## of these, we should add a delay of at least 0.1 sec.
- if (delay < 0.1): delay = 0.1
- time.sleep(delay)
- return
- ## =======================================================================================
- def show_image_with_projections(img, title='', windowsize='normal', **kwargs):
- '''
- Display the image together with its x- and y-sums (or "marginals") displayed on the side.
- This is very useful for manually detecting the field reference aperture edges.
- Parameters
- ----------
- img : ndarray
- The 2D image to display.
- title : str
- Any title to put above the image
- windowsize : str
- {'small','normal','large','xlarge'}
- **kwargs : any
- Any keyword arguments that can be used by matplotlib's `imshow()` function.
- '''
- import matplotlib.pyplot as plt
- import matplotlib.gridspec as gridspec
- img = asarray(img)
- (Nx,Ny) = img.shape
- if windowsize == 'small':
- plt.figure(figsize=(9,6))
- elif windowsize == 'normal':
- plt.figure(figsize=(12,8))
- elif (windowsize == 'large'):
- plt.figure(figsize=(15,10))
- elif (windowsize == 'xlarge'):
- plt.figure(figsize=(17,12))
- gs = gridspec.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[1,3])
- gs.update(hspace=0.05, wspace=0.05)
- ax_xproj = plt.subplot(gs[0])
- ## Note: for the weird "100" and "50" in the aspect ratio, these are values which approximate the
- ## extra space needed to produce the tick labels and axis labels. These are apparently not taken
- ## into account by the plt.subplot() routine when determining the aspect ratio of the plot.
- aspect_ratio = float(Ny-100) / Nx
- ax_image = plt.subplot(gs[2], aspect=aspect_ratio)
- ## Draw the main image.
- #ax_image.imshow(img, aspect=aspect_ratio, **kwargs)
- plt.imshow(img, aspect=aspect_ratio, **kwargs)
- plt.xlabel('y')
- plt.ylabel('x')
- ax_yproj = plt.subplot(gs[3])
- ## Make ticklabels invisible for the x-projection's x-axis and y-projection's y-axis.
- for tl in ax_xproj.get_xticklabels():
- tl.set_visible(False)
- #for tl in ax_image.get_xticklabels() + ax_image.get_yticklabels():
- # tl.set_visible(False)
- for tl in ax_yproj.get_yticklabels():
- tl.set_visible(False)
- ## Only allow up to four ticks in the y-projection. Otherwise the labels run together.
- #ax_yproj.locator_params(nbins=5)
- ## Set the image display limits to fit exactly.
- ax_image.set_xlim((0,Ny-1))
- ax_image.set_ylim((0,Nx-1))
- xmarginal = sum(img, axis=0)
- ymarginal = sum(img, axis=1)
- ax_xproj.plot(arange(Ny), xmarginal)
- ax_yproj.plot(ymarginal, arange(Nx))
- ax_xproj.set_xlim(ax_image.get_xlim())
- ax_yproj.set_ylim(ax_image.get_ylim())
- if (title != ''): plt.title(title)
- return
- ## =================================================================================================
- def dynamically_correct_dcb(dcb_meas, dcb_cal=None, rect_dict=None, refcam=9, serial_number=None, \
- saturation_ceiling=90.0, delta_output=None, beta_output=None,
- debug=False):
- '''
- Apply the dynamic correction algorithm to the datacube. The input datacube should be in at-sensor
- radiance units.
- Parameters
- ----------
- dcb_meas : ndarray
- The (Nx,Ny,Nw) datacube of a valid scene (in at-sensor radiance units)
- dcb_cal : ndarray, optional
- The (Nx,Ny,Nw) datacube of the calibration scene (different than the measurement scene) \
- in at-sensor radiance units.
- rect_dict : str or dict, optional
- The rectangles dictionary, or the filename of it. This contains the coordinates of the \
- field reference aperture and the scene reference aperture.
- refcam : int
- Wich camera index to use for the reference camera
- silent : bool
- Whether to silently truncate data that extends beyond the interpolation table. If False, \
- this case throws an exception.
- saturation_ceiling : float
- The value above which all the datacube pixels are truncated.
- delta_output: ndarray
- An output array to hold the dynamic cal's offset values.
- beta_output: ndarray
- An output array to hold the dynamic cal's gain values.
- Returns
- -------
- corr_meas_dcb : ndarray
- The corrected datacube (also in degC units).
- Notes
- -----
- We use the lookup table to convert at-sensor radiance back to estimated object temperature by \
- assuming the light is emitted by a blackbody with no path radiance or path absorption. Currently \
- all cameras are assigned to match camera #9 by default; change the `refcam` input argument to \
- change that. For abbreviation, we use `rd` = "rectangles dictionary".
- '''
- dcb_meas = asarray(dcb_meas)
- assert (dcb_meas.ndim == 3), 'Input "dcb_meas" must have three dimensions.'
- if (dcb_cal == None): dcb_cal = dcb_meas
- dcb_cal = asarray(dcb_cal)
- assert (dcb_cal.ndim == 3), 'Input "dcb_cal" must have three dimensions.'
- assert (dcb_meas.shape == dcb_cal.shape), 'Input "dcb_meas" and "dcb_cal" must have the same shape.'
- gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
- (Nx,Ny,Nw) = dcb_meas.shape
- if debug: print('(Nx,Ny,Nw)=', (Nx,Ny,Nw))
- if (rect_dict == None):
- rd = read_rectanglesfile(serial_number=serial_number)
- elif isinstance(rect_dict, str):
- rd = get_calib_dictionary(rect_dict, serial_number=gci_serial_number)
- if ('aperture_rects' in rd): rd = daisi_rect_to_xycoords(rd, Nx)
- elif isinstance(rect_dict, dict):
- rd = rect_dict
- if ('aperture_rects' in rd): rd = daisi_rect_to_xycoords(rd, Nx)
- else:
- raise TypeError('Input "rect_dict" must be either a filename or a dictionary.')
- ## If any of the aperture coordinates lie outside the image, then raise an exception. Most
- ## likely, the rectangles are wrong or a cropped datacube was used as input.
- if any(rd['aperture_xy'] < 0):
- raise ValueError('One or more of the "aperture_xy" coordinates is negative!')
- elif any(rd['aperture_xy'][:,1] > Nx):
- raise ValueError('One or more of the "aperture_xy" x-coordinates is > Nx!')
- elif any(rd['aperture_xy'][:,3] > Ny):
- raise ValueError('One or more of the "aperture_xy" y-coordinates is > Ny!')
- if any(rd['unregistered_scene_xy'] < 0):
- raise ValueError('One or more of the "unregistered_scene_xy" coordinates is negative!')
- elif any(rd['unregistered_scene_xy'][:,1] > Nx):
- raise ValueError('One or more of the "unregistered_scene_xy" x-coordinates is > Nx!')
- elif any(rd['unregistered_scene_xy'][:,3] > Ny):
- raise ValueError('One or more of the "unregistered_scene_xy" y-coordinates is > Ny!')
- ## Apply the saturation_ceiling to all cameras that have reference cameras or are themselves reference
- ## cameras. On gci2, this means cameras 5, 6, and 9.
- refcam_list = rd['refcam'].keys()
- for k in rd['refcam']:
- if rd['refcam'][k] not in refcam_list:
- refcam_list.append(rd['refcam'][k])
- ## Create the saturation mask using numpy's masked arrays.
- dcb_meas = ma.masked_greater_equal(dcb_meas, saturation_ceiling)
- dcb_cal = ma.masked_greater_equal(dcb_cal, saturation_ceiling)
- ## Grab the mean temperatures from each region.
- meas_aper_temps = get_mean_spect(dcb_meas, rd['aperture_xy'])
- ref_aper_temps = get_mean_spect(dcb_cal, rd['aperture_xy'])
- ref_scene_temps = get_mean_spect(dcb_cal, rd['unregistered_scene_xy'])
- meas_whole_temps = get_mean_spect(dcb_meas, rd['cropping_xy'])
- ref_whole_temps = get_mean_spect(dcb_cal, rd['cropping_xy'])
- aper_offset_deltas = ref_aper_temps - ref_aper_temps[9]
- if debug:
- import matplotlib.pyplot as plt
- import matplotlib.cm as cm
- show_dcb(array(dcb_cal), box1=rd['aperture_xy'], box2=rd['cropping_xy'],
- box3=rd['unregistered_scene_xy'], title='1: original calib. dcb (est. obj temp)',
- cmap=cm.jet)
- #plt.show()
- ## Correct the aperture-occluded cameras to have their temperature estimates agree with refcam.
- ## The non-occluded center cameras have to be corrected in a different way --- I've chosen to average the
- ## whole image as a pretend reference aperture temperature. Then they can be matched to a reference
- ## camera that shares their same spectral bandpass.
- refcam_meas_aper_temps = meas_aper_temps[refcam] * ones(Nw)
- meas_aper_temps[isnan(meas_aper_temps)] = ref_whole_temps[isnan(meas_aper_temps)]
- ## In "rd['refcam']", the keys are the center camera numbers, and the values are their
- ## corresponding reference cameras.
- for key in rd['refcam']:
- aper_offset_deltas[key] = 0.0
- refcam_meas_aper_temps[key] = ref_whole_temps[rd['refcam'][key]]
- delta = refcam_meas_aper_temps + aper_offset_deltas
- beta = (ref_scene_temps[refcam] - ref_aper_temps) / (ref_scene_temps - ref_aper_temps)
- ## For the referenced cameras (5 and 6), force the beta to be 1.0.
- for key in rd['refcam']:
- beta[key] = 1.0
- ## Apply the correction with the estimated delta and beta variables.
- corr_meas_dcb = zeros_like(dcb_meas)
- for w in arange(Nw):
- if w in rd['refcam']:
- corr_meas_dcb[:,:,w] = (dcb_meas[:,:,w] - meas_whole_temps[w]) * beta[w] + delta[w]
- else:
- corr_meas_dcb[:,:,w] = (dcb_meas[:,:,w] - meas_aper_temps[w]) * beta[w] + delta[w]
- if debug:
- apert_new_temps = get_mean_spect(corr_meas_dcb, rd['aperture_xy'])
- for key in rd['refcam']:
- meas_aper_temps[key] = meas_whole_temps[key]
- whole_new_temps = get_mean_spect(corr_meas_dcb, rd['cropping_xy'])
- scene_new_temps = (ref_scene_temps - meas_aper_temps) * beta + delta
- print('aper_offset_deltas=', aper_offset_deltas)
- print('aperture_xy=', rd['aperture_xy'])
- print('cropping_xy=', rd['cropping_xy'])
- print('scene_xy=', rd['scene_xy'])
- print('unregistered_scene_xy=', rd['unregistered_scene_xy'])
- print('')
- print('Mean temperatures before/after dynamic correction:')
- print(' ---------- BEFORE --------- ----- CALIB ----- ---------- AFTER ----------')
- print(' w aper scene whole delta beta aper scene whole')
- for w in arange(Nw):
- if w in rd['refcam']:
- print('%2i %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f' %
- (w, nan, ref_scene_temps[w], ref_whole_temps[w], delta[w], beta[w], apert_new_temps[w], scene_new_temps[w], whole_new_temps[w]))
- else:
- print('%2i %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f' %
- (w, meas_aper_temps[w], ref_scene_temps[w], ref_whole_temps[w], delta[w], beta[w], apert_new_temps[w], scene_new_temps[w], whole_new_temps[w]))
- print('')
- show_dcb(corr_meas_dcb, box3=rd['unregistered_scene_xy'], title='dynamically corrected dcb (degC)', showstats=True)
- temp_dcb = crop_dcb(corr_meas_dcb, rd['cropping_xy'])
- show_dcb(temp_dcb, box3=rd['scene_xy'], title='dynamically corrected dcb (degC) -- cropped', showstats=True)
- show_dcb(temp_dcb, plottype='difference1', title='dynamically corrected dcb (degC)', showstats=True)
- plt.show()
- ## If the user called the function asking the delta and beta values, then set them here. These are assigned
- ## in a loop rather than using vectorized assignment because the latter would actually move the pointer to the
- ## object from the one passed in to the local 'delta' here.
- if (delta_output != None):
- for w in arange(Nw):
- delta_output[w] = delta[w]
- if (beta_output != None):
- for w in arange(Nw):
- beta_output[w] = beta[w]
- ## Finally, apply the saturation ceiling by filling in the masked values.
- corr_meas_dcb = array(ma.fix_invalid(corr_meas_dcb, fill_value=saturation_ceiling))
- return(corr_meas_dcb)
- ## =================================================================================================
- def read_gcifile_meta(filename, debug=False, save_header = False):
- '''
- Read in a GCI datafile;s metadata contents.
- Parameters
- ----------
- filename : str
- The name of the `*.gci` file to read
- Returns
- -------
- data : dict
- A dictionary containing all data elements from the decoded gci msg. If present in the gci \
- file, the list of dictionary keys can include "cool_shutter_temperatures", "heated_shutter_temperatures", \
- "timestamp", "accumulated_frames_list", "gain_cube_filename", "offset_cube_filename", \
- "cropping_xy", "filter_to_camera_trans", "serial_number", ....
- '''
- import struct
- assert isinstance(filename, str), 'The input "filename" must be a string type.'
- assert os.path.exists(filename), 'The input filename ("' + filename + '") does not exist.'
- f = open(filename, 'rb')
- raw = f.read()
- f.close()
- total_nbytes = sys.getsizeof(raw)
- if debug: print('total_nbytes=', total_nbytes)
- n = 0
- msg = []
- nparts = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('nparts=', nparts)
- n += 4
- ## The first message part is the string label giving the ZMQ subscription topic.
- strlen = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('strlen=', strlen)
- n += 4
- topic = ''.join(struct.unpack('>'+str(strlen)+'c', raw[n:n+strlen]))
- msg.append(topic)
- if debug: print('subscription topic=', topic)
- n += strlen
- ## Loop through the subsequent parts, defining each part of the msg list.
- msg_nbytes = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('msg_nbytes=', msg_nbytes)
- n += 4
- msg.append(raw[n:n+msg_nbytes])
- meta = decode_gci_msg_meta(msg, debug=debug)
- return(meta)
- ## =================================================================================================
- def read_gcifile(filename, use_ir=True, use_vis=True, use_masks=True, debug=False, return_msg = False, save_header = False):
- '''
- Read in a GCI datafile and convert the contents to Numpy arrays. Note that `*.cub` files
- use the same format as the `*.gci` files, but are just missing some of the metadata.
- Parameters
- ----------
- filename : str
- The name of the `*.gci` file to read.
- use_ir : bool
- Whether to include the IR dcb in the returned dictionary.
- use_vis : bool
- Whether to include the VIS image in the returned dictionary.
- use_masks : bool
- Whether to include the masks in the returned dictionary.
- Returns
- -------
- data : dict
- A dictionary containing all data elements from the decoded gci msg. If present in the gci \
- file, the list of dictionary keys can include "cool_shutter_temperatures", "heated_shutter_temperatures", \
- "timestamp", "accumulated_frames_list", "gain_cube_filename", "offset_cube_filename", \
- "cropping_xy", "filter_to_camera_trans", "serial_number", "dcb", "vis".
- '''
- import struct
- assert isinstance(filename, str), 'The input "filename" must be a string type.'
- assert os.path.exists(filename), 'The input filename ("' + filename + '") does not exist.'
- f = open(filename, 'rb')
- raw = f.read()
- f.close()
- total_nbytes = sys.getsizeof(raw)
- if debug: print('total_nbytes=', total_nbytes)
- n = 0
- msg = []
- nparts = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('nparts=', nparts)
- n += 4
- ## The first message part is the string label giving the ZMQ subscription topic.
- strlen = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('strlen=', strlen)
- n += 4
- topic = ''.join(struct.unpack('>'+str(strlen)+'c', raw[n:n+strlen]))
- msg.append(topic)
- if debug: print('subscription topic=', topic)
- n += strlen
- ## Loop through the subsequent parts, defining each part of the msg list.
- for i in arange(nparts-1):
- msg_nbytes = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('msg_nbytes=', msg_nbytes)
- n += 4
- msg.append(raw[n:n+msg_nbytes])
- n += msg_nbytes
- if return_msg:
- return(msg)
- else:
- gcidata = decode_gci_msg(msg, use_ir=use_ir, use_vis=use_vis, use_masks=use_masks, debug=debug, save_header=save_header)
- return(gcidata)
- ## =================================================================================================
- def dcb_objtemp_to_atsensor_radiance(dcb_temp, LUT=None, LUTtemps=None, poly_approx=False, \
- serial_number=None, debug=False):
- '''
- Convert object temperature to at-sensor radiance values, given the filter set, their
- transmission spectra, and the detector responsivity spectrum.
- Parameters
- ----------
- dcb_temp : ndarray
- The datacube in degC units.
- LUT : ndarray
- A (ntemps,Nw) matrix of lookup table elements to use for interpolation.
- LUTtemps : ndarray
- A (ntemps) long vector of the degC values used to construct the lookup table.
- poly_approx : bool
- Whether to use the polynomial approximation to the LUT.
- serial_number : int
- The serial number of the GCI being analyzed. Not needed if LUT and LUTtemps are inputs.
- Returns
- -------
- dcb_radiance : ndarray
- The datacube converted to at-sensor radiance units.
- '''
- from scipy.interpolate import interp1d
- gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
- if (LUT == None):
- (LUT, LUTtemps) = get_degc_to_atsensor_radiance_lut(poly_approx=poly_approx, serial_number=gci_serial_number)
- (Nx,Ny,Nw) = dcb_temp.shape
- LUT_mintemp = int(amin(LUTtemps))
- LUT_maxtemp = int(amax(LUTtemps))
- dcb_radiance = zeros_like(dcb_temp)
- for w in arange(Nw):
- lut_radiance = LUT[:,w]
- if any(isnan(dcb_temp[:,:,w])):
- raise ValueError('NaN vales in the datacube!')
- if debug:
- print('[after] w=%i: amin(dcb)=%f, amax(dcb)=%f' % (w, amin(dcb_temp[:,:,w]), amax(dcb_temp[:,:,w])))
- dcb_mintemp = amin(dcb_temp[:,:,w])
- dcb_maxtemp = amax(dcb_temp[:,:,w])
- if (dcb_mintemp < LUT_mintemp):
- ## Need to make a 3D mask for indexing even though we're using 2D slices.
- mask = zeros((Nx,Ny,Nw), 'bool')
- mask[:,:,w] = (dcb_temp[:,:,w] < LUT_mintemp)
- dcb_temp[mask] = LUT_mintemp
- print('Truncating datacube values below %f in w=%i ...' % (LUT_mintemp, w))
- #raise ValueError('One or more corrected datacube temperatures are below ' + str(LUT_mintemp) + ' degC!')
- elif (dcb_maxtemp > LUT_maxtemp):
- ## Need to make a 3D mask for indexing even though we're using 2D slices.
- mask = zeros((Nx,Ny,Nw), 'bool')
- mask[:,:,w] = (dcb_temp[:,:,w] > LUT_maxtemp)
- dcb_temp[mask] = LUT_maxtemp
- print('Truncating datacube values above %f in w=%i ...' % (LUT_maxtemp, w))
- #raise ValueError('One or more corrected datacube temperatures are above ' + str(LUT_maxtemp) + ' degC!')
- f = interp1d(LUTtemps, lut_radiance)
- dcb_radiance[:,:,w] = f(dcb_temp[:,:,w])
- if debug:
- show_dcb(dcb_radiance, title='degC -> atsensor radiance converted dcb')
- return(dcb_radiance)
- ## =================================================================================================
- def get_filterlist(d=None, serial_number=None, debug=False):
- '''
- Get the Nw-length list of filter uniqueIDs associated with each of the cameras.
- Parameters
- ----------
- d : dict, optional
- The "filter_to_channel_map" dictionary.
- serial_number : int
- The serial number of the GCI whose filterlist you want to look up.
- Returns
- -------
- filterlist : list
- A list of the filter uniqueIDs.
- '''
- gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
- print('gci_serial_number=', gci_serial_number)
- if (d == None):
- d = get_calib_dictionary('filter_to_channel_map.js', serial_number=gci_serial_number)
- else:
- assert isinstance(d, dict), 'Input "d" must be a dictionary.'
- if (gci_serial_number == 2):
- if ('system_serial_number' in d): del d['system_serial_number']
- if debug:
- print(json.dumps(d, sort_keys=True, indent=4))
- ## First, find out how many cameras there are.
- maxcam = 0
- for key in d:
- camval = int(d[key]['camera'])
- ## Check if `camval` is an integer.
- if (camval == int(camval)): maxcam = max((maxcam, int(camval)))
- ## Now assemble the list of filters installed.
- Nw = maxcam + 1
- #nfilters = len(d)
- filterlist = ['']*Nw
- for key in d:
- for w in arange(Nw):
- if (d[key]['camera'] == str(w)):
- filterlist[w] = d[key]['uniqueID']
- else:
- ## Assemble the list of filters installed on the system.
- Nw = len(d.keys())
- filterlist = ['']*Nw
- for key in d:
- filterlist[int(key)] = d[key]['uniqueID']
- for w in arange(Nw):
- if (d[key] == str(w)):
- filterlist[w] = d[key]['uniqueID']
- return(filterlist)
- ## =================================================================================================
- def acquire_manually_calibrated_hypercube(ncubes, do_registration=True, cropping_xy=None, units='radiance', debug=False):
- '''
- Skip the DAISI calibration stuff and perform all of the calibration steps manually.
- Parameters
- ----------
- ncubes : int
- The number of measurement cubes to acquire.
- do_registration : bool
- Whether or not to output a cropped registered hypercube or a fullsize hypercube.
- cropping_xy : ndarray
- A (Nw,4) size matrix of (xlo,xhi,ylo,yhi) coordinates for registration cropping.
- units : str
- {'degC','radiance'} the output data type. ('counts' is not present in this list because \
- there is no sense in calling a calibration collection routine if you want uncalibrated data.)
- Returns
- -------
- hcb : ndarray
- A (Nx,Ny,Nw,Nt) hypercube.
- Notes
- -----
- If there is something wrong with the DAISI calibration code, then you need to perform all of the calibration
- steps manually. This code performs the pre-measurement calibration setup, acquires ncubes of raw data,
- uses the acquired calibration data to calibrated the raw counts into degC, and then optionally crops the result
- into a fully registered hypercube.
- '''
- import time
- assert (ncubes == int(ncubes)), 'The input "ncubes" must be an integer type.'
- assert (units.lower() in ['radiance','degc']), '"units" input ("' + units + '") must be either "radiance" or "degC".'
- ## This block to set up and acquire the datacube of the unheated shutter.
- if debug: print('Taking the cool shutter measurement ...')
- set_heater_state('on')
- #send_command('disable_camera_autocal')
- #send_command('disable_autocal_activity_control')
- #send_command('disable_auto_calibration')
- move_shutter('warm', 'up')
- move_shutter('cool', 'down')
- send_command('trigger_1pt_calibration')
- time.sleep(1)
- hcb = acquire_dcb(5, 6009)
- dcb_cool = mean(float32(hcb), axis=3)
- cool_temps = get_control_data()['cool_shutter_temperatures']
- if (alen(cool_temps) == 2): cool_temps = mean(cool_temps)
- ## This block to acquire the datacube of the heated shutter.
- if debug: print('Taking the warm shutter measurement ...')
- move_shutter('cool', 'up')
- move_shutter('warm', 'down')
- hcb = acquire_dcb(5, 6009)
- dcb_warm = mean(float32(hcb), axis=3)
- warm_temps = get_control_data()['warm_shutter_temperatures']
- move_shutter('warm', 'up')
- if debug:
- print('Cool shutter temperatures=')
- print(cool_temps)
- print('Warm shutter temperatures=')
- print(warm_temps)
- print('Calculating the offset and gain cubes ...')
- (offset_dcb, gain_dcb) = get_offsets_and_gains(dcb_cool, dcb_warm, cool_temps, warm_temps, units=units)
- ## This block to acquire the measurement datacube.
- if debug: print('Collecting the measurement cube ...')
- hcb = float32(acquire_dcb(ncubes, 6009))
- ## Now that we're done with data acquisition, turn autocal back on.
- send_command('enable_camera_autocal')
- if debug:
- show_dcb(dcb_cool, title='cool cal dcb')
- show_dcb(dcb_warm, title='warm cal dcb')
- show_dcb(offset_dcb, title='offset dcb')
- show_dcb(gain_dcb, title='gain dcb')
- show_dcb(mean(hcb, axis=3), title='meas dcb, counts')
- ## Next use the offset and gain cubes to convert the raw counts to degC units.
- if debug: print('Converting the dcb to ' + units + ' units ...')
- gain_dcb[logical_not(isfinite(gain_dcb))] = mean(gain_dcb[isfinite(gain_dcb)])
- for t in arange(ncubes):
- hcb[:,:,:,t] = ((hcb[:,:,:,t] - offset_dcb) / gain_dcb)
- if debug:
- show_dcb(mean(hcb, axis=3), title='measurement dcb (' + units + ' units)')
- if do_registration:
- if (cropping_xy == None):
- raise ValueError('The manual registration function has been disabled. '
- 'Cannot change registration rectangles.')
- #dcb = mean(hcb, axis=3)
- ### Use the dataset to get registration info.
- #cropping_xy = generate_aois_from_dcb(dcb)
- #if debug: print('Saving the registration rectangles to "cropping_xy.npz" ...')
- #savez('cropping_xy.npz', cropping_xy=cropping_xy)
- dcb_cropped = crop_dcb(dcb, cropping_xy)
- if debug:
- show_dcb(dcb_cropped, title='co-registered dcb')
- return(hcb)
- ## =================================================================================================
- def get_gas_spectra(waves=None, gaslist=None, path=None, skip_nonquant=True, debug=False):
- '''
- Get the dictionary of gas cross-section spectra from the library of JDX files located in given directory.
- Parameters
- ----------
- waves : ndarray
- A vector of wavelengths at which to interpolate the gas spectra.
- gaslist : list
- A list of gas names (matching the JDX filenames) of interest. Ignore any filenames that are \
- not in this list.
- skip_nonquant : bool
- Whether or not to ignore gas spectra that are non-quantitative (i.e. not in absolute \
- cross-section units).
- path : str
- A directory for where to find the gas spectra files (if you want to look in a place other \
- than the standard calibration directory).
- Returns
- -------
- gas_data : dict
- A dictionary whose keys are the filenames of the gases (minus the filename suffix) and whose \
- values are the cross-section spectra in units of cm^2 ppm. An extra key "waves" is also added, and gives \
- the wavelengths at which the spectra have been interpolated.
- '''
- from scipy.interpolate import interp1d
- if (waves == None):
- #waves = linspace(6.0, 20.0, 500)
- waves = linspace(7.0, 18.0, 500)
- #nwaves = alen(waves)
- wavemin = amin(waves)
- wavemax = amax(waves)
- path = GCIDIR+'cal/gas_spectra/' if (path == None) else path
- jcamp_files = glob.glob(os.path.normpath(path+'*.jdx'))
- #ngases = alen(jcamp_files)
- gas_data = {}
- gas_data['waves'] = waves
- gas_data['none'] = ones_like(waves)
- ## Read in the gas spectra
- for i,f in enumerate(jcamp_files):
- gasname = os.path.basename(f)[:-4]
- if (gaslist != None) and (gasname not in gaslist): continue
- if debug: print(gasname)
- jcamp_dict = jcamp_spectrum(f, wavemin-0.25, wavemax+0.25, skip_nonquant=skip_nonquant, debug=debug)
- if not jcamp_dict: continue
- f = interp1d(jcamp_dict['x'], jcamp_dict['xsec'])
- gas_data[jcamp_dict['title']] = f(waves)
- return(gas_data)
- ## =================================================================================================
- def calculate_hmatrix(filterlist, filter_data, return_data_structure=False, debug=False):
- '''
- Calculate the GCI system matrix, for a set of M filters and N spectral channels. The spectral
- channels are defined automatically by the filter edge wavelengths. (If any bandpass filter is
- in the system, it ignores it for the purpose of calculating the spectral channel wavelengths.)
- Parameters
- ----------
- filterlist : list
- A list the filter uniqueIDs in the system, in the order of the cameras they are mounted to.
- filter_data : dict
- A dictionary containing the filter spectra.
- return_data_structure : bool
- Whether to return just the H-matrix itself or to also return some of the associated spectral \
- channel information.
- Returns
- -------
- H : ndarray
- A MxN matrix.
- channel_basisfcns : ndarray
- The nwaves x nchannels matrix of basis functions for discretizing the spectrum.
- edgewave_list : list
- A list of the Nw "edge wavelengths" for each of the Nw filters.
- channel_boundaries : ndarray
- A Nw+1 list of the wavelengths associated with the spectral channel boundaries.
- '''
- assert isinstance(filterlist, (list, tuple)), 'The input "filterlist" must be a list type.'
- assert isinstance(filter_data, dict), 'The input "filter_data" must be a dict type.'
- nfilters = len(filterlist)
- waves = filter_data['waves']
- ## First go through and get a list of the simple filter names (stripped out of the uniqueIDs).
- filternames = []
- for f in filterlist:
- if (f.lower() == 'none'): continue
- pieces = f.split('_')
- filternames.append(pieces[1])
- ## Grab the rectangles dictionary, which contains information about the reference cameras. Make a list
- ## of which cameras have reference cameras.
- gci_serial_number = serial_number_from_filterlist(filterlist)
- rd = read_rectanglesfile(serial_number=gci_serial_number)
- refcam_list = rd['refcam'].keys()
- edgewave_list = {}
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- legendstr = []
- for i,f in enumerate(filterlist):
- if i in refcam_list: continue
- if (f.lower() == 'none'): continue
- filter_nameparts = f.split('_')
- filtertype = filter_nameparts[1][0:2] ## gives 'BP', 'LP', or 'SP'
- if (filtertype == 'BP'):
- continue
- elif (filtertype not in ('LP','SP')):
- continue
- (edgewave, edgeval) = get_filter_edge_wavelength(waves, filter_data[f], filtertype=filtertype)
- edgewave_list[f] = edgewave
- if debug:
- print('edgewave_list=', edgewave_list)
- plt.plot(waves, filter_data[f])
- #legendstr.append(f)
- legendstr.append(filter_nameparts[1])
- if debug:
- leg = plt.legend(legendstr, prop={'size':12})
- leg.draggable()
- plt.title('Filter transmission curves')
- plt.xlabel('wavelength (um)')
- dw = mean(gradient(waves))
- nfilters = alen(filterlist)
- ## Define the list of channel boundary wavelengths.
- edgewave_values = sorted(float32(array(edgewave_list.items())[:,1]))
- channel_boundaries = append(append(amin(waves), edgewave_values), amax(waves))
- #zzz
- #import pdb
- #pdb.set_trace()
- ## Sometimes the channel boundaries are too close to define a useful channel (i.e. less than three data
- ## points). If so, delete the unwanted boundary. The "roll" is to add the too-narrow channel to the
- ## next channel to the right and not the next channel to the left. The "append" command is needed
- ## to make up for the fact that the dimensionality of the difference between channel boundary wavelengths
- ## is one less than that of the original vector.
- #okay = ((channel_boundaries[1:] - channel_boundaries[:-1]) > (3.0*dw))
- okay = ((channel_boundaries[1:] - channel_boundaries[:-1]) > 0.2)
- okay = append(okay, True)
- okay = roll(okay, 1)
- channel_boundaries = channel_boundaries[okay]
- nchannels = alen(channel_boundaries) - 1
- if debug: print('channel_boundaries=', channel_boundaries)
- channel_basisfcns = zeros((alen(waves),nchannels))
- ## From the list of channel boundary wavelengths, generate the channel basis functions.
- for n in arange(nchannels):
- channel_basisfcns[:,n] = (waves > channel_boundaries[n]) * (waves <= channel_boundaries[n+1])
- channel_basisfcns[0,0] = 1.0
- H = zeros((nfilters,nchannels))
- sensitivity = filter_data['responsivity'] * filter_data['tamarisk_lens_transmission'] * filter_data['germanium_window_transmission']
- ## Given the channel basis functions, we can now form our integral for building the system matrix.
- for w,f in enumerate(filterlist):
- indicator = 0.0 if (w in refcam_list) else 1.0
- if debug: plt.figure()
- for n in arange(nchannels):
- integral = indicator * sum(filter_data[f] * sensitivity * channel_basisfcns[:,n] * dw)
- #zzz
- #normalizer = sum(channel_basisfcns[:,n] * dw)
- normalizer = 1.0
- if debug: print('w,f,n,integral,normalizer=', (w, f, n, integral, normalizer))
- if debug: plt.plot(waves, channel_basisfcns[:,n])
- H[w,n] = integral / normalizer
- if debug:
- plt.plot(waves, filter_data[f], 'r-', linewidth=2)
- plt.plot(waves, sensitivity, 'k-', linewidth=2)
- plt.title('w=' + str(w) + ': ' + f)
- plt.axis([6.5,18.5,-0.075,1.075])
- plt.xlabel('wavelength (um)')
- plt.ylabel('normalized\n responsivity')
- #plt.show()
- if not return_data_structure:
- return(H)
- else:
- return(H, channel_basisfcns, edgewave_list, channel_boundaries)
- ## =================================================================================================
- ## Return "none", "bandpass', "longpass" or "shortpass" depending on whether the input filter spectrum represents no filterat all,
- ## a bandpass filter, a longpass filter, or a shortpass filter. The input wavelengths ("waves_um") must be
- ## in microns for this to work.
- def get_filter_type(waves_um, filter_spectrum):
- '''
- Determine what type of filter a given transmission spectrum represents.
- Parameters
- ----------
- waves_um : ndarray
- A vector of wavelengths (in microns)
- filter_spectrum : ndarray
- A vector giving a filter transmission spectrum
- Returns
- -------
- type : str
- {'none','bandpass','longpass','shortpass'}
- '''
- waves_um = asarray(waves_um)
- filter_spectrum = asarray(filter_spectrum)
- assert (alen(waves_um) == alen(filter_spectrum)), 'Inputs "waves_um" and "filter_spectrum" must have the same length.'
- #nwaves = alen(waves_um)
- if all(filter_spectrum == filter_spectrum[0]):
- return('none')
- peak_trans = amax(filter_spectrum)
- idx = ravel(where(filter_spectrum > 0.5*peak_trans))
- first_idx = idx[0]
- last_idx = idx[-1]
- #print('peak_trans=%5.2f, first_idx=%i, last_idx=%i' % (peak_trans, first_idx, last_idx))
- if (first_idx == 0) and (waves_um[last_idx] < 12.0):
- filtertype = 'shortpass'
- elif (first_idx > 0) and (waves_um[last_idx] > 12.0):
- filtertype = 'longpass'
- else:
- filtertype = 'broadband'
- return(filtertype)
- ## =================================================================================================
- def planck_blackbody(waves_um, T_celsius=25.0):
- '''
- Calculate the Planck blackbody spectrum.
- Parameters
- ----------
- waves_um : ndarray
- A vector of wavelengths (in microns) over which to sample the spectrum.
- T_celsius : float
- A temperature (in degC) at which to evaluate the blackbody spectrum.
- Returns
- -------
- spectrum : ndarray
- A vector sampled at the input wavelengths.
- '''
- waves_um = asarray(waves_um)
- assert (alen(T_celsius) == 1), 'The input "T_celsius" should be a scalar, but is a length ' + \
- str(alen(T_celsius)) + ' object.'
- waves_m = waves_um * 1.0E-6 ## convert from wavelength in um to meters
- h = 6.63E-34 ## Planck's constant
- c = 3.0E8 ## speed of light
- k = 1.38E-23 ## Boltzmann's constant
- T = T_celsius + 273.15 ## convert from degC to Kelvin
- if (T > 0.1):
- numer = 2.0E-10 * h * c**2 / waves_m**5
- factor2 = h * c / (waves_m * k * T)
- if any(factor2 > 127.0):
- #print('Exponent overflow detected. Planck exponent term = exp(' + str(amax(factor2)) + ')!')
- factor2[factor2 > 127.0] = 127.0
- elif any(factor2 < -127.0):
- #print('Exponent underflow detected. Planck exponent term = exp(' + str(amin(factor2)) + ')!')
- factor2[factor2 < -127.0] = -127.0
- return(numer / (exp(factor2) - 1.0)) ## Units are (W)/(cm^2.sr.um).
- else:
- return(waves_m * 0.0) ## Units are (W)/(cm^2.sr.um).
- ## =================================================================================================
- def get_degc_to_atsensor_radiance_lut(temps=None, filename=None, filterlist=None, filter_data=None, \
- poly_approx=False, serial_number=None, debug=False):
- '''
- Create the lookup table for converting temperature in degC to at-sensor radiance for each of the Nw cameras.
- Parameters
- ----------
- temps : ndarray, optional
- A vector of temperatures at which to evaluate the table.
- filename : str, optional
- The filename at which to save a text version of the LUT.
- filterlist : list, optional
- A list of the filters attached to each of the Nw cameras.
- filter_data : dict, optional
- A dictionary containing the filter spectra.
- poly_approx : bool, optional
- Whether to use a polynomial approximation to the LUT, and not the LUT itself.
- serial_number : int, optional
- The serial number of the GCI being queried.
- Returns
- -------
- LUT : ndarray
- The (ntemps,Nw) matrix of lookup table elements. If `poly_approx == True`, then the \
- lookup table elements returned are generated from the polynomial approximation and not \
- from the LUT directly.
- temps : ndarray
- The (ntemps) vector of degC temperatures used to sample the lookup table.
- '''
- if (filterlist == None): filterlist = get_filterlist(serial_number=serial_number)
- if (filter_data == None): filter_data = get_filter_spectra(filterlist=filterlist)
- assert isinstance(filterlist, (list, tuple)), 'The input "filterlist" must be a list type.'
- assert isinstance(filter_data, dict), 'The input "filter_data" must be a dict type.'
- Nw = len(filterlist)
- if (temps == None):
- temps = linspace(-150.0, 250.0, 46)
- ## Make sure that the temperatures are rounded to single-decimal place for an easy LUT.
- for n in arange(alen(temps)):
- temps[n] = around(temps[n] * 10.0) / 10.0
- ## Add extreme temperatures at the ends of the table. We needs to add *duplicate* entries
- ## on both ends of the table in preparation to enforcing truncation at the table ends
- ## (this helps the Intell IPP library used by DAISI which has annoying behavior outside the
- ## bounds of the table).
- temps = append(temps, 500.0) ## add a really hot temp
- temps = append(temps, 500.1) ## add a really hot temp plus a little
- temps = append(-273.0, temps) ## add almost absolute zero
- temps = append(-273.1, temps) ## add absolute zero
- ntemps = alen(temps)
- LUT = zeros((ntemps,Nw))
- new_temps = temps[1:-1]
- sensitivity = filter_data['responsivity'] * filter_data['tamarisk_lens_transmission'] * filter_data['germanium_window_transmission']
- for w in arange(Nw):
- for n in arange(1,ntemps-1):
- LUT[n,w] = degc_to_atsensor_radiance(temps[n], filter_data['waves'], filter_data[filterlist[w]], sensitivity)
- if poly_approx:
- new_LUT = zeros_like(LUT)
- for w in arange(Nw):
- coeffs = get_poly_coeffs(new_temps, LUT[1:-1,w], 5)
- if debug:
- if (w == 0): print('camera coeffs[0] coeffs[1] coeffs[2] coeffs[3] coeffs[4] coeffs[5]')
- print('%2i %15.6e %15.6e %15.6e %15.6e %15.6e %15.6e' % (w, coeffs[0], coeffs[1], coeffs[2], coeffs[3], coeffs[4], coeffs[5]))
- new_LUT[1:-1,w] = polyeval_Horner(new_temps, coeffs)
- if debug and (w == 0):
- import matplotlib.pyplot as plt
- plt.figure()
- plt.plot(new_temps, LUT[1:-1,0])
- plt.plot(new_temps, new_LUT[1:-1,w])
- plt.legend(('LUT','poly_approx'), loc='best')
- plt.show()
- if debug:
- ## Only show the polynomial -> LUT comparison for w=0. It's too long to show all Nw.
- w = 0
- for n,t in enumerate(temps):
- print('n=%3i, t=%8.3f: LUT[n,w]=%f, polyval=%f' % (n, t, LUT[n,w], new_LUT[n,w]))
- ## Last step in the polynomial approximation is to replace the LUT with the approximate form.
- LUT = new_LUT
- ## Now replace the end temperatures with absurdly high and low values. We need to allow a tiny
- ## difference at the ends of the table to prevent scipy's interp1d from throwing an exception
- ## due to seeing zero slope there.
- LUT[0,:] = -1.0E6
- LUT[-1,:] = 1.0E6
- ## Write the LUT out to a file, if the filename is given.
- if (filename != None):
- f = open(filename, 'w', encoding='utf-8')
- f.write(' '.join(('Temp','E0','E1','E2','E3','E4','E5','E6','E7','E8','E9','E10','E11'))+'\n')
- for n in arange(ntemps):
- f.write('%10.1f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f\n' % \
- (temps[n], LUT[n,0], LUT[n,1], LUT[n,2], LUT[n,3], LUT[n,4], LUT[n,5], LUT[n,6], LUT[n,7], LUT[n,8], LUT[n,9], LUT[n,10], LUT[n,11]))
- f.close()
- return(LUT, temps)
- ## =================================================================================================
- def dcb_atsensor_radiance_to_objtemp(dcb_radiance, LUT=None, LUTtemps=None, silent=True, debug=False):
- '''
- Convert a datacube in radiance units to temperature in degC.
- Parameters
- ----------
- dcb_radiance : ndarray
- A (Nx,Ny,Nw) datacube in radiance units.
- LUT : ndarray
- A (ntemps,Nw) matrix of lookup table elements to use for interpolation.
- LUTtemps : ndarray
- A (ntemps) long vector of the degC values used to construct the lookup table.
- silent : bool
- Allow an interpolation exception to be passed silently.
- Returns
- -------
- dcb_objtemp : ndarray
- A (Nx,Ny,Nw) datacube in units of object temperature (degC).
- Notes
- -----
- For the degC->radiance mapping, DAISI uses a polynomial approximation to the actual function.
- For the radiance->degC mapping used here, however, DAISI uses a lookup table --- the same one
- we use here.
- '''
- from scipy.interpolate import interp1d
- dcb_radiance = asarray(dcb_radiance)
- assert (dcb_radiance.ndim == 3), 'Input "dcb_radiance" must have three dimensions.'
- if (LUT == None):
- (LUT, temps) = get_degc_to_atsensor_radiance_lut(debug=debug)
- else:
- temps = asarray(LUT)
- temps = asarray(LUTtemps)
- (Nx,Ny,Nw) = dcb_radiance.shape
- dcb_objtemp = zeros_like(dcb_radiance)
- #import matplotlib.pyplot as plt
- #show_dcb(dcb_radiance)
- #plt.show()
- for w in arange(Nw):
- lut_radiance_values = LUT[:,w]
- dcb_min_radiance = amin(dcb_radiance[:,:,w])
- dcb_max_radiance = amax(dcb_radiance[:,:,w])
- lut_min_radiance = LUT[1,w] ## the ends of the LUT are special - don't truncate to those
- lut_max_radiance = LUT[-1,w] ## the ends of the LUT are special - don't truncate to those
- if (dcb_min_radiance < lut_min_radiance):
- if silent:
- mask = zeros((Nx,Ny,Nw), 'bool')
- mask[:,:,w] = (dcb_radiance[:,:,w] < lut_min_radiance)
- dcb_radiance[mask] = lut_min_radiance
- print('Truncating datacube values below %f in w=%i ...' % (lut_min_radiance, w))
- else:
- raise ValueError('The min datacube value (' + str(dcb_min_radiance) + ') is below the lower limit of the LUT.')
- if (dcb_max_radiance > lut_max_radiance):
- if silent:
- mask = zeros((Nx,Ny,Nw), 'bool')
- mask[:,:,w] = (dcb_radiance[:,:,w] > lut_max_radiance)
- dcb_radiance[mask] = lut_max_radiance
- print('Truncating datacube values above %f in w=%i ...' % (lut_max_radiance, w))
- else:
- raise ValueError('The max datacube value (' + str(dcb_max_radiance) + ') is above the upper limit of the LUT.')
- #f = interp1d(lut_radiance_values, temps, bounds_error=False, fill_value=0.0)
- f = interp1d(lut_radiance_values, temps)
- dcb_objtemp[:,:,w] = f(dcb_radiance[:,:,w])
- return(dcb_objtemp)
- ## =================================================================================================
- def read_rectanglesfile(serial_number=None, debug=False):
- '''
- Read the rectangles file "dynamic_cal_config.js" (from the calibration data archive) into a Python dictionary.
- Returns
- -------
- c : dict
- a dictionary containing the various rectangles definitions. The set of keys are
- * `aperture_xy`: defines the field reference aperture coordinates.
- * `cropping_xy`: defines the cropping rectangles used to co-register each image for the datacube.
- * `noaperture_xy`: defines the region of each image completely unobscured by the reference aperture edge.
- * `scene_xy`: defines the scene rectangle in the final coregistered images (1x4 array).
- * `unregistered_scene_xy`: defines the scene rectangle in coordinates of the *original* full image.
- * `full_xy`: defines the coordinates of the original uncropped images (1x4 array).
- serial_number : int
- The serial number of the GCI system whose rectangles you want to look up.
- Notes
- -----
- The DAISI software uses a JSON file format which defines the following keys: {`noaperture_rects`,\
- `aperture_rects`,`cropper_rects`,`scene_rect`}.
- The rectangles dictionary defined here adds a couple more keys, and changes the format of the entries.
- Note that the `xycoords` array is structured with `w` as the row index, and `[xlo,xhi,ylo,yhi]` as the
- four columns. The `xhi` and `yhi` values are not the highest pixel value but rather one more than the
- highest pixel value, so that cropping can be performed as `img[xlo:xhi,ylo:yhi]` without adding of one on
- the high indices.
- '''
- gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
- d = get_calib_dictionary('dynamic_cal_config.js', serial_number=gci_serial_number, debug=debug)
- ## Find out the *full* (not cropped!) dcb dimensions. Don't use "aperture_xy", since the unobscured
- ## cameras do not have their aperture rectangles defined, and so Nw will be too small.
- Nx = d['full_rects']['IR']['height']
- #Ny = d['full_rects']['IR']['width']
- Nw = len(d['cropper_rects']['IR'].keys())
- ## Make a new dictionary "c" and convert the aperture rectangles to xy-coordinate format.
- c = {}
- c['aperture_xy'] = int32(daisi_rect_to_xycoords(d['aperture_rects'], Nx, Nw))
- c['cropping_xy'] = int32(daisi_rect_to_xycoords(d['cropper_rects']['IR'], Nx, Nw))
- c['noaperture_xy'] = int32(daisi_rect_to_xycoords(d['noaperture_rects'], Nx, Nw))
- c['full_xy'] = int32(daisi_rect_to_xycoords(d['full_rects']['IR'], Nx))
- c['reference_camera'] = int32(d['reference_channel'])
- ## The dictionary mapping center cameras to their reference cameras will have the form that the
- ## center cameras will be the keys and the reference cameras the values.
- if ('center_cam_config' in d):
- c['refcam'] = {}
- for config in d['center_cam_config']:
- c['refcam'][config['center']] = int(config['reference'])
- ## The registered scene coords are relative to the cropped coords and not the full coords.
- Nx_cropped = d['cropper_rects']['IR']['0']['height']
- c['scene_xy'] = int32(daisi_rect_to_xycoords(d['scene_rect'], Nx_cropped))
- if ('scene_rect' in d):
- c['unregistered_scene_xy'] = zeros((Nw,4), 'int32')
- c['unregistered_scene_xy'][:,0] = c['scene_xy'][0] + c['cropping_xy'][:,0]
- c['unregistered_scene_xy'][:,1] = c['scene_xy'][1] + c['cropping_xy'][:,0]
- c['unregistered_scene_xy'][:,2] = c['scene_xy'][2] + c['cropping_xy'][:,2]
- c['unregistered_scene_xy'][:,3] = c['scene_xy'][3] + c['cropping_xy'][:,2]
- if debug: print('c=', c)
- return(c)
- ## =================================================================================================
- def update_rectangles_dict(rect_dict, rect_name, xycoords, debug=False):
- '''
- Take an existing "rectangles" dictionary and update one of the entries with the input. Update the rectangles
- file with the result, and return the updated rectangles dictionary as well. This requires a *function*
- because some of the rectangle values are interconnected (i.e. if you change `cropping_xy`, then
- `scene_xy` must also change to correspond).
- Parameters
- ----------
- rect_dict : dict
- The rectangles dictionary to update.
- rect_name : str
- {'aperture_xy','noaperture_xy','scene_xy','cropping_xy','unregistered_scene_xy'} \
- name of the rectangle object to update.
- xycoords : ndarray
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates for all Nw cameras. Or, in the \
- case of the "scene_xy" rectangle, `xycoords` is only a length-4 vector.
- Returns
- -------
- rd : dict
- The updated rectangles dictionary.
- '''
- assert isinstance(rect_dict, dict), 'Input "rect_dict" must be a dict type.'
- assert isinstance(rect_name, str), 'Input "rect_name" must be a string type.'
- xycoords = asarray(xycoords)
- if (rect_name not in ['aperture_xy','noaperture_xy','scene_xy','cropping_xy','unregistered_scene_xy']):
- raise ValueError('The "rect_name" input (' + rect_name + ') does not correspond to one of the defined rectangle names!')
- ## Define an abbreviation.
- rd = rect_dict
- Nx = rd['full_xy'][0,1]
- Ny = rd['full_xy'][0,3]
- Nw = alen(rd['full_xy'][:,0])
- ## Update the dictionary entry.
- rd[rect_name] = xycoords
- ## If the input rectangle is "unregistered_scene_xy", then we need to update the corresponding
- ## "scene_rect". Translate the (uniform) registered scene coordinates to the unregistered (varies from
- ## wavelength to wavelength) scene rectangles.
- if (rect_name == 'unregistered_scene_xy'):
- rd['scene_xy'][0] = xycoords[0,0] - rd['cropping_xy'][0,0]
- rd['scene_xy'][1] = xycoords[0,1] - rd['cropping_xy'][0,0]
- rd['scene_xy'][2] = xycoords[0,2] - rd['cropping_xy'][0,2]
- rd['scene_xy'][3] = xycoords[0,3] - rd['cropping_xy'][0,2]
- ## If the input rectangle is "scene_reg_rect" or "cropping_xy", then we need to update the corresponding
- ## "scene_xy". Translate the (uniform) registered scene coordinates to the unregistered (varies from wavelength to
- ## wavelength) scene rectangles. Note that the scene rectangles do *not* change when the registration
- ## rectangles change.
- if (rect_name == 'scene_xy'):
- for w in arange(Nw):
- rd['unregistered_scene_xy'][w,0] = xycoords[0] + rd['cropping_xy'][w,0]
- rd['unregistered_scene_xy'][w,1] = xycoords[1] + rd['cropping_xy'][w,0]
- rd['unregistered_scene_xy'][w,2] = xycoords[2] + rd['cropping_xy'][w,2]
- rd['unregistered_scene_xy'][w,3] = xycoords[3] + rd['cropping_xy'][w,2]
- elif (rect_name == 'cropping_xy'):
- for w in arange(Nw):
- rd['unregistered_scene_xy'][w,0] = xycoords[w,0] + rd['cropping_xy'][w,0]
- rd['unregistered_scene_xy'][w,1] = xycoords[w,1] + rd['cropping_xy'][w,0]
- rd['unregistered_scene_xy'][w,2] = xycoords[w,2] + rd['cropping_xy'][w,2]
- rd['unregistered_scene_xy'][w,3] = xycoords[w,3] + rd['cropping_xy'][w,2]
- return(rd)
- ## =================================================================================================
- def write_rectanglesfile(rect_dict, serial_number=None, debug=False):
- '''
- Convert a rectangles dictionary to the form used by DAISI and save to a file.
- Parameters
- ----------
- rect_dict : dict
- The rectangles dictionary.
- serial_number : int
- The serial number of the system whose repository rectangles file you want to modify.
- '''
- assert isinstance(rect_dict, dict), 'Input "rect_dict" must be a dict type.'
- gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
- ## Define an abbreviation
- rd = rect_dict
- ## Need to convert each dictionary to the unusual format used by the JSON file.
- assert ('aperture_xy' in rd), '"aperture_xy" is not in the input dictionary!'
- assert ('scene_xy' in rd), '"scene_rect" is not in the input dictionary!'
- assert ('noaperture_xy' in rd), '"noaperture_xy" is not in the input dictionary!'
- assert ('full_xy' in rd), '"full_xy" is not in the input dictionary!'
- Nx = rd['full_xy'][0,1]
- Ny = rd['full_xy'][0,3]
- Nw = alen(rd['full_xy'][:,0])
- ## Note you *must* convert from the numpy integer type to a pure Python int-type in order for "simplejson"
- ## to be able to serialize the result.
- d = {}
- d['scene_rect'] = xycoords_to_daisi_rect(rd['scene_xy'], Nx)
- d['aperture_rects'] = xycoords_to_daisi_rect(rd['aperture_xy'], Nx, Nw)
- d['cropper_rects'] = xycoords_to_daisi_rect(rd['full_xy'], Nx, Nw)
- d['noaperture_rects'] = xycoords_to_daisi_rect(rd['noaperture_xy'], Nx, Nw)
- if ('cropping_xy' in rd):
- d['registration_rects'] = xycoords_to_daisi_rect(rd['cropping_xy'], Nx, Nw)
- ## This can trip you up, so an explicit check is helpful!
- assert isinstance(d['aperture_rects']['0']['left'], int), 'daisi-format dictionary entries must be datatype "int" to be JSON-serializable!'
- s = json.dumps(d, sort_keys=True, indent=4)
- if debug: print(s)
- filename = os.path.normpath(GCIDIR + 'cal/gci' + str(gci_serial_number) + '/dynamic_cal_config.js')
- if os.path.exists(filename):
- import shutil
- shutil.copyfile(filename, filename+'~')
- f = open(filename, 'w', encoding='utf-8')
- f.write(s)
- f.close()
- return
- ## =================================================================================================
- def draw_block_spectrum(channel_boundaries, spectrum, newfigure=True, title=None, **kwargs):
- '''
- Draw a plot where the spectral channels are nonuniform in width and shaped by histogram-like rectangles.
- Parameters
- ----------
- channel_boundaries : ndarray
- A vector of length Nw+1 giving the wavelengths defining the boundaries of the Nw spectral channels.
- spectrum : ndarray
- A Nw length vector.
- newfigure : bool
- Whether or not to call matplotlib's `figure()` function.
- title : str
- The plot title.
- **kwargs : any
- Any keyword arguments that can be used by matplotlib's `plot()` function.
- '''
- import matplotlib.pyplot as plt
- channel_boundaries = asarray(channel_boundaries)
- spectrum = asarray(spectrum)
- assert (alen(channel_boundaries) == 1 + alen(spectrum)), 'Input "channel_boundaries" must have length 1 more than input "spectrum".'
- cb = channel_boundaries
- nchannels = alen(cb) - 1
- x = []
- y = []
- x.append(cb[0])
- y.append(0.0)
- for n in arange(nchannels):
- x.append(cb[n])
- x.append(cb[n+1])
- y.append(spectrum[n])
- y.append(spectrum[n])
- x.append(cb[-1])
- y.append(0.0)
- if newfigure:
- plt.figure()
- xmin = amin(x)
- xmax = amax(x)
- xptp = xmax - xmin
- xmean = 0.5 * (xmin + xmax)
- xlo = xmean - 0.55 * xptp
- xhi = xmean + 0.55 * xptp
- ymin = amin(y)
- ymax = amax(y)
- yptp = ymax - ymin
- ymean = 0.5 * (ymin + ymax)
- ylo = ymean - 0.55 * yptp
- yhi = ymean + 0.55 * yptp
- else:
- ## First grab the existing plot limits. If these were previously determined by draw_block_spectrum(),
- ## then we need only rescale the plot range by (0.5/0.55) to get to the original data limits.
- ## Appending these to the current data vector, we can update the plot limits using both old and new
- ## data. First grab the existing limits.
- (x0,x1,y0,y1) = plt.axis()
- x0mean = 0.5 * (x0 + x1)
- x0ptp = (0.5 / 0.55) * (x1 - x0)
- x0min = x0mean - 0.5 * x0ptp
- x0max = x0mean + 0.5 * x0ptp
- y0mean = 0.5 * (y0 + y1)
- y0ptp = (0.5 / 0.55) * (y1 - y0)
- y0min = y0mean - 0.5 * y0ptp
- y0max = y0mean + 0.5 * y0ptp
- ## Next determine the new plot range using the old and new data limits.
- xmin = amin(append(array(x), x0min))
- xmax = amax(append(array(x), x0max))
- xptp = xmax - xmin
- xmean = 0.5 * (xmin + xmax)
- xlo = xmean - 0.55 * xptp
- xhi = xmean + 0.55 * xptp
- ymin = amin(append(array(y), y0min))
- ymax = amax(append(array(y), y0max))
- yptp = ymax - ymin
- ymean = 0.5 * (ymin + ymax)
- ylo = ymean - 0.55 * yptp
- yhi = ymean + 0.55 * yptp
- plt.plot(x, y, **kwargs)
- if (title != None): plt.title(title)
- plt.axis([xlo,xhi,ylo,yhi])
- return
- ## =================================================================================================
- def get_mean_spect(dcb, xycoords):
- '''
- Get the mean spectrum of a rectangular region of interest in a datacube.
- Parameters
- ----------
- dcb : ndarray
- A (Nx,Ny,Nw) datacube
- xycoords : ndarray
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates specifying a rectangular region \
- in each plane of the datacube.
- Returns
- -------
- spect : ndarray
- A Nw-length vector giving the mean spectrum of the region.
- '''
- dcb = asarray(dcb)
- xycoords = asarray(xycoords)
- assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
- Nw = dcb.shape[2]
- spect = zeros(Nw)
- if (xycoords.ndim == 1):
- for w in arange(Nw):
- xlo = xycoords[0]
- xhi = xycoords[1]
- ylo = xycoords[2]
- yhi = xycoords[3]
- if (xlo == xhi) or (ylo == yhi):
- spect[w] = nan
- else:
- spect[w] = mean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- else:
- for w in arange(Nw):
- xlo = xycoords[w,0]
- xhi = xycoords[w,1]
- ylo = xycoords[w,2]
- yhi = xycoords[w,3]
- if (xlo >= xhi) or (ylo >= yhi):
- spect[w] = nan
- else:
- spect[w] = mean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- return(spect)
- ## =================================================================================================
- def dcb_region_stats(dcb, xycoords=None):
- '''
- Get the mean and stdev spectra of a rectangular region of interest in a datacube.
- Parameters
- ----------
- dcb : ndarray
- A (Nx,Ny,Nw) datacube
- xycoords : ndarray
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates specifying a rectangular region \
- in each plane of the datacube.
- Returns
- -------
- meanvals : ndarray
- A Nw-length vector giving the mean spectrum of the region.
- stdvals : ndarray
- A Nw-length vector giving the stdev of each region in the Nw planes of the cube.
- Example
- -------
- import gci
- rect_dict = gci.read_rectanglesfile()
- xycoord = rect_dict('scene_xy')
- (means,stdevs) = gci.dcb_region_stats(dcb, xycoord)
- '''
- dcb = asarray(dcb)
- assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
- (Nx,Ny,Nw) = dcb.shape
- if (xycoords == None):
- xycoords = asarray([0,Nx-1,0,Ny-1])
- else:
- xycoords = asarray(xycoords)
- meanvals = zeros(Nw)
- stdvals = zeros(Nw)
- minvals = zeros(Nw)
- maxvals = zeros(Nw)
- if (xycoords.ndim == 1):
- for w in arange(Nw):
- xlo = xycoords[0]
- xhi = xycoords[1]
- ylo = xycoords[2]
- yhi = xycoords[3]
- if (xlo == xhi) or (ylo == yhi):
- meanvals[w] = nan
- stdvals[w] = nan
- minvals[w] = nan
- maxvals[w] = nan
- else:
- #meanvals[w] = nanmean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- #stdvals[w] = nanstd(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- #minvals[w] = nanmin(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- #maxvals[w] = nanmax(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- meanvals[w] = mean(dcb[xlo:xhi,ylo:yhi,w])
- stdvals[w] = std(dcb[xlo:xhi,ylo:yhi,w])
- minvals[w] = amin(dcb[xlo:xhi,ylo:yhi,w])
- maxvals[w] = amax(dcb[xlo:xhi,ylo:yhi,w])
- else:
- for w in arange(Nw):
- xlo = xycoords[w,0]
- xhi = xycoords[w,1]
- ylo = xycoords[w,2]
- yhi = xycoords[w,3]
- if (xlo >= xhi) or (ylo >= yhi):
- meanvals[w] = nan
- stdvals[w] = nan
- minvals[w] = nan
- maxvals[w] = nan
- else:
- #meanvals[w] = nanmean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- #stdvals[w] = nanstd(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- #minvals[w] = nanmin(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- #maxvals[w] = nanmax(ravel(dcb[xlo:xhi,ylo:yhi,w]))
- meanvals[w] = mean(dcb[xlo:xhi,ylo:yhi,w])
- stdvals[w] = std(dcb[xlo:xhi,ylo:yhi,w])
- minvals[w] = amin(dcb[xlo:xhi,ylo:yhi,w])
- maxvals[w] = amax(dcb[xlo:xhi,ylo:yhi,w])
- return(meanvals, stdvals, minvals, maxvals)
- ## =================================================================================================
- def daisi_rect_to_xycoords(rect_dict, Nx, Nw=0):
- '''
- Convert an a DAISI-format rectangles dictionary (converted from the JSON file) to an xycoords matrix.
- Parameters
- ----------
- rect_dict : dict
- A rectangles dictionary.
- Nx : int
- The x-dimension of the datacube.
- Nw : int
- The w-dimension of the datacube.
- Returns
- -------
- xycoords : ndarray
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) for all Nw cameras. If the input contains only one \
- rectangle and not Nw rectangles, then `xycoords` is returned as simply a length-4 vector.
- '''
- assert isinstance(rect_dict, dict), 'Input "rect_dict" must be a dict type.'
- assert (Nx == int(Nx)), 'Input "Nx" must be an integer type.'
- rd = rect_dict
- if (Nw == 0):
- xycoords = zeros(4, 'int32')
- xycoords[0] = Nx - rd['top'] - rd['height']
- xycoords[1] = Nx - rd['top']
- xycoords[2] = rd['left']
- xycoords[3] = rd['left'] + rd['width']
- else:
- assert (Nx > 0), 'Input "Nx" is zero!'
- xycoords = zeros((Nw,4), 'int32')
- for w in arange(Nw):
- if (str(w) in rd):
- xycoords[w,0] = Nx - rd[str(w)]['top'] - rd[str(w)]['height']
- xycoords[w,1] = Nx - rd[str(w)]['top']
- xycoords[w,2] = rd[str(w)]['left']
- xycoords[w,3] = rd[str(w)]['left'] + rd[str(w)]['width']
- return(xycoords)
- ## =================================================================================================
- def xycoords_to_daisi_rect(xycoords, Nx, Nw=0):
- '''
- Convert an xycoords matrix to DAISI-format.
- Parameters
- ----------
- xycoords : ndarray
- A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates for all Nw cameras. Or the input can \
- be a length-4 vector of (xlo,xhi,ylo,yhi).
- Nx : int
- The x-dimension of the datacube.
- Nw : int
- The w-dimension of the datacube.
- Returns
- -------
- d : dict
- A DAISI-format dictionary (ready to be converted to JSON format).
- '''
- xycoords = asarray(xycoords)
- assert (Nx == int(Nx)), 'Input "Nx" must be an integer type.'
- ## Note that the "int" conversion here is *necessary* because the datatype must be a pure Python type
- ## and not a numpy datatype in order for simplejson to serialize the resulting dictionary.
- d = {}
- if (Nw == 0):
- assert (xycoords.ndim == 1), 'Input "xycoords" should be a simple vector when Nw=0.'
- d['left'] = int(xycoords[2])
- d['top'] = int(Nx - xycoords[1])
- d['width'] = int(xycoords[3] - xycoords[2])
- d['height'] = int(xycoords[1] - xycoords[0])
- else:
- assert (Nx > 0), 'Input "Nx" is zero!'
- for w in arange(Nw):
- if (xycoords[w,0] + xycoords[w,1] == 0): continue
- d[str(w)] = {}
- d[str(w)]['left'] = int(xycoords[w,2])
- d[str(w)]['top'] = int(Nx - xycoords[w,1])
- d[str(w)]['width'] = int(xycoords[w,3] - xycoords[w,2])
- d[str(w)]['height'] = int(xycoords[w,1] - xycoords[w,0])
- return(d)
- ## =================================================================================================
- def print_rects(rect_dict=None):
- '''
- Print out the rectangles dictionary.
- Parameters
- ----------
- rect_dict : dict
- The rectangles dictionary to print. If not entered, then the function reads from the \
- default file to get the rectangles dictionary.
- '''
- rd = rect_dict
- if (rd == None): rd = read_rectanglesfile()
- aperture_xy = rd['aperture_xy']
- cropping_xy = rd['cropping_xy']
- noaperture_xy = rd['noaperture_xy']
- scene_xy = rd['scene_xy']
- full_xy = rd['full_xy']
- unregistered_scene_xy = rd['unregistered_scene_xy']
- Nw = len(cropping_xy)
- print('aperture_xy:')
- for w in arange(Nw):
- print(' w%2i: %3i, %3i, %3i, %3i' % (w, aperture_xy[w,0], aperture_xy[w,1], aperture_xy[w,2], aperture_xy[w,3]))
- print('cropping_xy:')
- for w in arange(Nw):
- print(' w%2i: %3i, %3i, %3i, %3i' % (w, cropping_xy[w,0], cropping_xy[w,1], cropping_xy[w,2], cropping_xy[w,3]))
- print('noaperture_xy:')
- for w in arange(Nw):
- print(' w%2i: %3i, %3i, %3i, %3i' % (w, noaperture_xy[w,0], noaperture_xy[w,1], noaperture_xy[w,2], noaperture_xy[w,3]))
- print('scene_xy:')
- print(' %3i, %3i, %3i, %3i' % (scene_xy[0], scene_xy[1], scene_xy[2], scene_xy[3]))
- print('full_xy:')
- print(' %3i, %3i, %3i, %3i' % (full_xy[0], full_xy[1], full_xy[2], full_xy[3]))
- print('unregistered_scene_xy:')
- for w in arange(Nw):
- print(' w%2i: %3i, %3i, %3i, %3i' % (w, unregistered_scene_xy[w,0], unregistered_scene_xy[w,1], unregistered_scene_xy[w,2], unregistered_scene_xy[w,3]))
- return
- ## =================================================================================================
- def xcorr(f, g):
- '''
- Calculate the normalized cross-correlation coefficient of the two vectors `f` and `g` without shifting
- one relative to the other.
- Parameters
- ----------
- f : ndarray
- g : ndarray
- Returns
- -------
- xcorr : float
- The normalized cross-correlation value (ranging from -1 to +1).
- '''
- assert (f.size == g.size), 'Inputs "f" and "g" must have the same length.'
- assert (f.shape == g.shape), 'Inputs "f" and "g" must have the same shape.'
- assert not any(isnan(f)), 'Cannot compute the cross-correation, since input "f" has NaN values.'
- assert not any(isnan(g)), 'Cannot compute the cross-correation, since input "g" has NaN values.'
- from numpy.linalg import norm
- f_mean = mean(f)
- g_mean = mean(g)
- f_unit = (f - f_mean) / norm(f - f_mean)
- g_unit = (g - g_mean) / norm(g - g_mean)
- xcorr = dot(f_unit, g_unit)
- ## The code above is equivalent to:
- ## xcorr = sum((f - mean(f)) * (g - mean(g))) / (std(f) * std(g))
- ## xcorr = xcorr / (alen(f) - 1.0)
- return(xcorr)
- ## =================================================================================================
- #def xcorr3d_old(dcb, g):
- # '''
- # Calculate the normalized cross-correlation coefficient of each pixel in `dcb` with the input
- # spectrum `g` without shifting one spectrum relative to the other.
- # '''
- #
- # ## Cross-correlate each spectrum in "dcb" (giving vector "f") with an equal-length vector spectrum
- # ## "g".
- # from numpy.linalg import norm
- #
- # g_mean = mean(g)
- # g_unit = (g - g_mean) / norm(g - g_mean)
- # (Nx,Ny,Nw) = dcb.shape
- # xcorr_img = zeros((Nx,Ny))
- #
- # for x in arange(Nx):
- # for y in arange(Ny):
- # if all(dcb[x,y,:] == 0):
- # xcorr_img[x,y] = nan
- # else:
- # f_mean = mean(dcb[x,y,:])
- # f_unit = (dcb[x,y,:] - f_mean) / norm(dcb[x,y,:] - f_mean)
- # xcorr_img[x,y] = dot(f_unit, g_unit)
- #
- # return(xcorr_img)
- ## =================================================================================================
- def xcorr3d(dcb, g, weights=None):
- '''
- Calculate the normalized cross-correlation coefficient of each pixel in `dcb` with the input
- spectrum `g` without shifting one spectrum relative to the other.
- Parameters
- ----------
- dcb : ndarray
- The datacube to run cross-correlation on.
- g : ndarray
- The spectrum to cross-correlate to.
- weights: ndarray
- The weights to apply to the cross correlation, a cube of the same dimension as `dcb`.
- normalize : bool
- Whether or not to perform normalization on the cube and spectrum prior to calculating the \
- normalized cross-correlation. If you are not pulling from the "unit absorption cube" then you \
- should set this keyword to "True".
- Returns
- -------
- weighted_xcorr : ndarray
- The weighted cross-correlation image, giving the normalized cross-correlation of each column in the \
- datacube with the reference spectrum.
- '''
- ## Cross-correlate each spectrum in "dcb" (giving vector "f") with an equal-length vector spectrum
- ## "g".
- from numpy.linalg import norm
- dcb = float64(dcb)
- if (weights == None):
- weights = ones_like(dcb)
- (Nx,Ny,Nw) = dcb.shape
- gcube = zeros_like(dcb)
- normgcube = zeros_like(dcb)
- normdcb = zeros_like(dcb)
- ## The three "wcov" are weighted covariance cubes -- these calculated the covariance of the
- ## datacube spectra with the target spectrum "g", weighted according to the input weights.
- wcov1 = zeros((Nx,Ny,Nw))
- wcov2 = zeros((Nx,Ny,Nw))
- wcov3 = zeros((Nx,Ny,Nw))
- for w in arange(Nw):
- gcube[:,:,w] = g[w]
- ## Calc
- dcbmean = average(dcb, weights=weights, axis=2)
- gcubemean = average (gcube, weights=weights, axis=2)
- bad = (dcbmean == 0) & (sum(dcb, axis=2) == 0)
- for w in arange(Nw):
- wcov1[:,:,w] = weights[:,:,w] * (dcb[:,:,w] - dcbmean) * (gcube[:,:,w] - gcubemean)
- wcov2[:,:,w] = weights[:,:,w] * (dcb[:,:,w] - dcbmean)**2
- wcov3[:,:,w] = weights[:,:,w] * (gcube[:,:,w] - gcubemean)**2
- avg_wcov1 = mean(wcov1, axis=2)
- avg_wcov2 = mean(wcov2, axis=2)
- avg_wcov3 = mean(wcov3, axis=2)
- weighted_xcorr = avg_wcov1 / (sqrt(avg_wcov2 * avg_wcov3))
- weighted_xcorr[bad] = 0
- return(weighted_xcorr)
- ## =================================================================================================
- def convert_radiancecube_to_spectralcube(dcb, filterlist=None, filter_data=None, debug=False):
- '''
- For an input datacube in radiance units, use the filter spectra to build a system matrix and convert
- the input to a spectral radiance cube.
- Parameters
- ----------
- dcb : ndarray
- A (Nx,Ny,Nw) datacube in radiance units.
- filterlist : list
- A list of the Nw filter uniqueIDs associated with the Nw cameras.
- filter_data : dict
- A dictionary of the filter transmission spectra.
- Returns
- -------
- spectraldcb : ndarray
- A (Nx,Ny,nchannels) datacube in units of mean spectral radiance per channel (*not* per micron!).
- '''
- from scipy.linalg import pinv #, svd
- dcb = asarray(dcb)
- assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
- if (filterlist == None): filterlist = get_filterlist()
- if (filter_data == None): filter_data = get_filter_spectra(filterlist=filterlist)
- assert isinstance(filterlist, (list, tuple)), 'The input "filterlist" must be a list type.'
- assert isinstance(filter_data, dict), 'The input "filter_data" must be a dict type.'
- ## The next step is to transform the measurement cube to a spectral cube.
- H = calculate_hmatrix(filterlist, filter_data, return_data_structure=False, debug=debug)
- (Nx,Ny,Nw) = dcb.shape
- nchannels = (H.shape)[1]
- Hplus = pinv(H)
- spectraldcb = zeros((Nx,Ny,nchannels))
- for x in arange(Nx):
- for y in arange(Ny):
- spectraldcb[x,y,:] = dot(Hplus, dcb[x,y,:])
- if debug:
- print('H[w,c]=')
- print(('%12s'*nchannels) % tuple(arange(nchannels)))
- for w in arange(Nw):
- print('%2i ' % w, end='')
- print(('%11.7f '*nchannels) % tuple(H[w,:]))
- print('\nHplus[c,w]=')
- print(('%12s'*Nw) % tuple(arange(Nw)))
- for c in arange(nchannels):
- print('%2i ' % c, end='')
- print(('%11.7f '*Nw) % tuple(Hplus[c,:]))
- return(spectraldcb)
- ## =================================================================================================
- def send_asynch_command(command_name, verbose=False, *args):
- '''
- Send a command to DAISI through the JSON asynchronous messaging system.
- Parameters
- ----------
- command_name : str
- The name of the command to send.
- input : any
- A Python object to be serialized into the command.
- Combinations include :
- * "reset_temporal_reference": (no input) tell DAISI to reset the temporal reference
- * "change_scene_reference_rectangle": (input is "scene_xy", a 4-element vector) tell DAISI to change the
- scene reference rectangle (in memory, not in the file!) to the input xycoords vector.
- Returns
- -------
- ?
- '''
- assert isinstance(command_name, str), 'Input "command_name" must be a string type.'
- ctx = zmq.Context()
- if (command_name == 'reset_temporal_reference'):
- get_return_msg = False
- topic = 'reset-commands'
- payload = json.dumps({'cmd_name':'force-temporal-reset'})
- return_info = json.dumps({'return_addr':'temporal-reset'})
- elif (command_name == 'change_scene_reference_rectangle'):
- ## Parse the input arguments into a vector for "scene_xy" and a scalar for "Nx".
- Nx = None
- scene_xy = None
- for a in args:
- if (a.size == 1):
- Nx = a
- elif (a.size == 4):
- scene_xy = a
- if Nx and scene_xy: break
- ## Convert the xycoords into a DAISI-format cropping dictionary.
- top = Nx - 1 - scene_xy[0]
- height = scene_xy[1] - scene_xy[0] + 1
- left = scene_xy[2]
- width = scene_xy[3] - scene_xy[2] + 1
- payload = json.dumps({'cmd_name':'set_scene_ref_rect', 'rect':{'left':left, 'top':top, 'width':width, 'height':height}})
- topic = 'dynamic-cal'
- return_info = json.dumps({'return_addr':'dummy value'})
- cmd = [topic, 'JSON_ASYNC_REQ', payload, return_info]
- if verbose: print('Sending asynchronous command:\n' + cmd)
- pubsock = ctx.socket(zmq.PUB)
- pubsock.connect('tcp://10.1.10.11:9550')
- pubsock.send_multipart(cmd)
- pubsock.close()
- return
- ## =================================================================================================
- def set_gci_serial_number(num):
- ''' Set the gci serial number.'''
- global GCI_SERIAL_NUMBER
- GCI_SERIAL_NUMBER = int(num)
- return
- ## =================================================================================================
- def get_gci_serial_number():
- ''' Get the gci serial number.'''
- return(GCI_SERIAL_NUMBER)
- ## =================================================================================================
- def get_calib_dictionary(filename, serial_number=None, debug=False):
- '''
- Get a calibration file from the data archive and convert its contents to a Python dictionary.
- Parameters
- ----------
- filename : str
- The name of the file within the archive we want to convert.
- serial_number : int
- The serial number of the GCI to query.
- Returns
- -------
- thisdict : dict
- A dictionary of the file contents.
- '''
- assert isinstance(filename, str), 'The input "filename" must be a string type.'
- gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
- path = os.path.normpath(GCIDIR + 'cal/gci' + str(gci_serial_number) + '/' + filename)
- if debug: print('calibration file = "' + path + '"')
- f = open(path, 'r')
- contents = f.read()
- f.close()
- thisdict = json.loads(contents)
- if ('system_serial_number' in thisdict):
- del thisdict['system_serial_number']
- if ('comment' in thisdict):
- del thisdict['comment']
- return(thisdict)
- ## =================================================================================================
- def jcamp_reader(filename_or_filecontentstr):
- '''
- Read a JDX-format file.
- The function returns three items in a dictionary: the numpy vectors `x` and `y` giving
- the abscissa and ordinate data, and `info`, a dictionary containing the header information.
- Parameters
- ----------
- filename_or_filecontentstr : str
- Either the string giving the entire file contents or the name of the file to read.
- Returns
- -------
- thisdict : dict
- A dictionary of the file contents, with keys `x`, `y`, and `info`.
- Notes
- -----
- ## TODO: you should split the data lines from the info lines using the ##XYDATA marker as the
- ## beginning of the former.
- '''
- if os.path.exists(filename_or_filecontentstr):
- f = open(filename_or_filecontentstr,'r')
- flines = f.read().splitlines()
- f.close()
- else:
- flines = filename_or_filecontentstr.splitlines()
- jcamp_dict = {}
- xstart = []
- xnum = []
- y = []
- #has_info = False
- #header_lines = 0
- for n,line in enumerate(flines):
- ## First work out the header dictionary.
- if line.startswith('##'):
- line = line[2:]
- if line.endswith('='):
- lhs = line
- rhs = ''
- else:
- (lhs,rhs) = line.split('=', 1)
- lhs = lhs.strip().lower()
- rhs = rhs.strip().lower()
- if rhs.isdigit():
- jcamp_dict[lhs] = int(rhs)
- elif is_float(rhs):
- jcamp_dict[lhs] = float(rhs)
- else:
- jcamp_dict[lhs] = rhs
- elif line.startswith('$$'):
- ## If the line starts with '$$' then it is a comment: ignore it.
- pass
- else:
- ## If the line does not start with '##' or '$$' then it should be a data line.
- ## To make sure, check that all split elements are floats.
- datavals = line.split(' ')
- if all(is_float(datavals)):
- xstart.append(float(datavals[0]))
- xnum.append(len(datavals)-1)
- for dataval in datavals[1:]:
- y.append(float(dataval))
- ## You got all of the Y-values. Next you need to figure out how to generate the missing X's...
- ## First look for the "lastx" dictionary entry. You will need that one to finish the set.
- xstart.append(jcamp_dict['lastx'])
- x = asarray([])
- y = asarray(y)
- for n in arange(len(xnum)):
- x = append(x, linspace(xstart[n],xstart[n+1],xnum[n]))
- jcamp_dict['x'] = x
- jcamp_dict['y'] = y
- return(jcamp_dict)
- ## =================================================================================================
- def jcamp_spectrum(filename, wavemin=None, wavemax=None, skip_nonquant=True, debug=False):
- '''
- Read a JDX-format file and convert its contents to an interpolated spectrum.
- The function attempts to extract quantitative information from the header, in order to
- convert the raw absorption data into cross-section data.
- Parameters
- ----------
- filename : str
- The file to read.
- wavemin : float, optional
- The minimum wavelength (in um) to interpolate to.
- wavemax : float, optional
- The maximum wavelength (in um) to interpolate to.
- skip_nonquant : bool
- Whether or not to return None for those spectra missing complete quantitative info. If False, \
- and the spectra are nonquantitative, then make a guess to fill in the missing scale values.
- Returns
- -------
- thisdict : dict
- A dictionary of the file contents, with keys `x`, `y`, `xsec`, and `info`.
- '''
- jcamp_dict = jcamp_reader(filename)
- x = jcamp_dict['x']
- y = jcamp_dict['y']
- ppm = 1.0E-6 ## for conversion to ppm units
- amagat = 2.687E+25 ## number of molecules per m^3 at std temp and pressure
- T = 288.0 ## standard temperature in Kelvin
- R = 2.78 ## the gas constant in (m^3 * mmHg) / (amg * K)
- ## Note: normally when we convert from wavenumber to wavelength units, the ordinate must be nonuniformly
- ## rescaled in order to compensate. But this is only true if we resample the abscissa to a uniform sampling
- ## grid. In this case here, we keep the sampling grid nonuniform in wavelength space, such that each digital
- ## bin retains its proportionality to energy, which is what we want.
- if (jcamp_dict['xunits'] == '1/cm') or (jcamp_dict['xunits'] == 'cm-1'):
- x = 10000.0 / x
- jcamp_dict['xunits'] = 'um'
- elif (jcamp_dict['xunits'] == 'micrometers'):
- jcamp_dict['xunits'] = 'um'
- elif (jcamp_dict['xunits'] == 'nanometers'):
- x = x * 1000.0
- jcamp_dict['xunits'] = 'um'
- if (jcamp_dict['yunits'] == 'transmittance'):
- y = 1.0 - y
- jcamp_dict['yunits'] = 'absorbance'
- if (x[0] > x[-1]):
- x = x[::-1]
- y = y[::-1]
- if ('xfactor' in jcamp_dict): x = x * jcamp_dict['xfactor']
- if (wavemin != None) and (amax(x) < wavemin):
- msg = 'The spectrum for "' + jcamp_dict['title'] + '" contains no data in the requested spectral range.'
- if debug: print(msg)
- return(None)
- elif (wavemax != None) and (amin(x) > wavemax):
- msg = 'The spectrum for "' + jcamp_dict['title'] + '" contains no data in the requested spectral range.'
- if debug: print(msg)
- return(None)
- elif (wavemin != None) and (amin(x) > wavemin):
- min_retrieved = str(amin(x))
- x = append(array(wavemin), x)
- y = append(y[0], y)
- msg = '"' + jcamp_dict['title'] + '": Minimum of x retrieved ( ' + min_retrieved + ') is greater than the required minimum wavelength (' + str(wavemin) + ').'
- if debug: print(msg)
- #raise ValueError(msg)
- elif (wavemax != None) and (amax(x) < wavemax):
- max_retrieved = str(amax(x))
- x = append(x, wavemax)
- y = append(y, y[-1])
- msg = '"' + jcamp_dict['title'] + '": Maximum of x retrieved ( ' + max_retrieved + ') is less than the required maximum wavelength (' + str(wavemax) + ').'
- if debug: print(msg)
- #raise ValueError(msg)
- ## Correct for any unphysical data.
- if any(y < 0.0):
- y[y < 0.0] = 0.0
- if any(y > 1.0):
- y[y > 1.0] = 1.0
- ## Determine the effective path length "ell" of the measurement chamber, in meters.
- if ('path length' in jcamp_dict):
- (val,unit) = jcamp_dict['path length'].split()[0:2]
- if (unit == 'cm'):
- ell = float(val) / 100.0
- elif (unit == 'm'):
- ell = float(val)
- elif (unit == 'mm'):
- ell = float(val) / 1000.0
- else:
- ell = 0.1
- else:
- if debug: print('"' + jcamp_dict['title'] + '": No path length value available for ' + jcamp_dict['title'] + '. Using the default ell = 0.1 ...')
- if skip_nonquant: return({'info':None, 'x':None, 'xsec':None, 'y':None})
- ell = 0.1
- assert(alen(x) == alen(y))
- if ('yfactor' in jcamp_dict):
- y = y * jcamp_dict['yfactor']
- if ('npoints' in jcamp_dict):
- if (alen(x) != jcamp_dict['npoints']):
- npts_retrieved = str(alen(x))
- msg = '"' + jcamp_dict['title'] + '": Number of data points retrieved (' + npts_retrieved + \
- ') does not equal the expected length (npoints = ' + str(jcamp_dict['npoints']) + ')!'
- if debug: print(msg)
- #raise ValueError(msg)
- #assert(alen(x) == jcamp_dict['npoints'])
- ## For each gas, manually define the pressure "p" at which the measurement was taken (in units of mmHg).
- if (jcamp_dict['title'] == 'carbon dioxide'):
- p = 200.0
- elif (jcamp_dict['title'] == 'carbon monoxide'):
- p = 400.0
- elif (jcamp_dict['title'] == 'hydrogen sulfide'):
- p = 600.0
- elif (jcamp_dict['title'] == 'methane'):
- p = 150.0
- elif (jcamp_dict['title'] == 'ethane'):
- p = 150.0
- elif (jcamp_dict['title'] == 'propane'):
- p = 150.0
- elif (jcamp_dict['title'] == 'n-pentane'):
- p = 6.0
- elif (jcamp_dict['title'] == 'propane, 2-methyl-'):
- p = 200.0
- jcamp_dict['title'] = 'iso-butane'
- elif (jcamp_dict['title'] == 'butane, 2-methyl-'):
- p = 100.0
- jcamp_dict['title'] = 'iso-pentane'
- elif (jcamp_dict['title'] == 'ammonia'):
- p = 50.0
- elif (jcamp_dict['title'] == 'benzene'):
- p = 70.0
- elif (jcamp_dict['title'] == 'butadiene'):
- p = 100.0
- elif (jcamp_dict['title'] == 'chlorobenzene'):
- p = 10.0
- elif (jcamp_dict['title'] == '1,2-dichloroethane'):
- p = 50.0
- elif (jcamp_dict['title'] == 'ethanol'):
- p = 30.0
- elif (jcamp_dict['title'] == 'methanol'):
- p = 70.0
- elif (jcamp_dict['title'] == 'propylene'):
- p = 150.0
- elif (jcamp_dict['title'] == 'propylene oxide'):
- p = 100.0
- elif (jcamp_dict['title'] == 'toluene'):
- p = 20.0
- elif (jcamp_dict['title'] == 'vinyl chloride'):
- p = 6.0
- elif (jcamp_dict['title'] == 'p-xylene'):
- p = 5.0
- elif (jcamp_dict['title'] == 'm-xylene'):
- p = 30.0
- elif (jcamp_dict['title'] == 'sulfur dioxide'):
- p = 100.0
- elif (jcamp_dict['title'] == 'butane'):
- p = 100.0
- elif (jcamp_dict['title'] == 'sulfur hexafluoride'):
- p = 600.0
- elif (jcamp_dict['title'] == 'ethylene'):
- p = 759.8
- elif (jcamp_dict['title'] == 'ozone'):
- p = 759.8 * 40.0 / 1.0E6
- else:
- if debug: print('No pressure "p" value entry for ' + jcamp_dict['title'] + '. Using the default p = 150.0 ...')
- if skip_nonquant: return([None]*4)
- p = 150.0
- ## Convert the absorbance units to cross-section in meters squared, for a gas at 1ppm at std atmospheric
- ## temperature and pressure.
- xsec = y * ppm * R * T / (p * ell)
- ## Finally, truncate the data to the range of interest. If xmin=xmax, then simply skip this step.
- if (wavemin != wavemax):
- okay = (x >= wavemin) & (x <= wavemax)
- x = x[okay]
- y = y[okay]
- xsec = xsec[okay]
- ## Update the "x" and "y" values and add the "xsec" values.
- jcamp_dict['x'] = x
- jcamp_dict['xsec'] = xsec
- jcamp_dict['y'] = y
- return(jcamp_dict)
- ## =================================================================================================
- def format_coord(self, x, y):
- '''
- Redefine the `format_coord()` method to matplotlib's Axes object --- this allows us to add a
- `z=...` in the status bar.
- The function *replaces* the existing method. So, calls to plt.imshow() will now use a different
- formatting function in the status bar.
- Parameters
- ----------
- self : matplotlib Axes handle
- The Axes object's `self` handle.
- x : int or float
- The x-position of the mouse cursor in data coordinates.
- y : int or float
- The y-position of the mouse cursor in data coordinates.
- '''
- col = int(x+0.5)
- row = int(y+0.5)
- ## If we're not looking at an image, then use "x" as the abscissa and "y" as the ordinate.
- if (self._current_image == None):
- if (x == int(x)):
- return('x=%i, y=%i' % (row, col))
- else:
- #return('x=%1.4f, y=%1.4f' % (x, y))
- return('x=%f, y=%f' % (x, y))
- else:
- xmin = self._current_image._extent[2]
- xmax = self._current_image._extent[3]
- ymin = self._current_image._extent[0]
- ymax = self._current_image._extent[1]
- iscolor = (self._current_image._A.ndim == 3)
- if iscolor:
- (Nx,Ny,_) = self._current_image._A.shape
- else:
- (Nx,Ny) = self._current_image._A.shape
- if (col >= ymin) and (col < ymax) and (row >= xmin) and (row < xmax):
- #print('(xmin,xmax,ymin,ymax)=', (xmin,xmax,ymin,ymax))
- if iscolor:
- z = self._current_image._A[row,col,:]
- else:
- z = self._current_image._A[row,col]
- ## Check if the image is RGB color, and if so then make sure that `z` is an integer.
- try:
- if iscolor:
- isint = (z[0] == int(z[0]))
- else:
- isint = (z == int(z))
- except Exception:
- isint = False
- if iscolor and isint:
- return('x=%i, y=%i, z=(%i,%i,%i)' % (row, col, z[0], z[1], z[2]))
- elif iscolor and not isint:
- return('x=%i, y=%i, z=(%f,%f,%f)' % (row, col, z[0], z[1], z[2]))
- elif isint:
- return('x=%i, y=%i, z=%i' % (row, col, z))
- else:
- #return('x=%i, y=%i, z=%1.4f' % (row, col, z))
- return('x=%i, y=%i, z=%f' % (row, col, z))
- else:
- return('x=%i, y=%i'%(y, x))
- ## Now redefine the default formatting function.
- try:
- from matplotlib.axes import Axes
- Axes.format_coord = format_coord
- except Exception:
- pass
- #def imshow(image, ax=None, box=None, title=None, **kwargs):
- # '''
- # Use matplotlib's `imshow()` function but add a `z=...` in the status bar.
- #
- # The function returns three items in a dictionary: the numpy vectors `x` and `y` giving
- # the abscissa and ordinate data, and `info`, a dictionary containing the header information.
- #
- # Parameters
- # ----------
- # image : ndarray
- # The image to display.
- # ax : matplotlib axis handle, optional
- # The axis handle.
- # box : ndarray
- # A 4-element array of (xlo,xhi,ylo,yhi) rectangle drawing coords.
- # '''
- #
- # import matplotlib.pyplot as plt
- #
- # image = asarray(image)
- # iscolor = (image.ndim == 3)
- # if (box != None):
- # box = asarray(box)
- # assert (alen(box) == 4), 'The input "box" should have 4 elements, not ' + str(alen(box)) + '.'
- #
- # if (ax == None):
- # ax = plt.gca()
- #
- # if ('extent' in kwargs):
- # xmin = kwargs['extent'][0]
- # xmax = kwargs['extent'][1]
- # ymin = kwargs['extent'][2]
- # ymax = kwargs['extent'][3]
- # else:
- # xmin = 0
- # xmax = Nx
- # ymin = 0
- # ymax = Ny
- #
- # def myformat_coord(x, y):
- # col = int(x+0.5)
- # row = int(y+0.5)
- # if iscolor:
- # (Nx,Ny,_) = image.shape
- # else:
- # (Nx,Ny) = image.shape
- #
- # if (col >= ymin) and (col < ymax) and (row >= xmin) and (row < xmax):
- # print('(xmin,xmax,ymin,ymax)=', (xmin,xmax,ymin,ymax))
- # if iscolor:
- # z = image[row,col,:]
- # else:
- # z = image[row,col]
- #
- # ## Check if the image is RGB color, and if so then make sure that `z` is an integer.
- # if iscolor or (z == int(z)):
- # if iscolor:
- # return('x=%i, y=%i, z=(%i,%i,%i)' % (row, col, z[0], z[1], z[2]))
- # else:
- # return('x=%i, y=%i, z=%i' % (row, col, z))
- # else:
- # return('x=%i, y=%i, z=%1.4f' % (row, col, z))
- # else:
- # return('x=%i, y=%i'%(y, x))
- #
- # plt.imshow(image, **kwargs)
- # ax.format_coord = myformat_coord
- # plt.axis('tight')
- #
- # if (title != None):
- # plt.title(title)
- #
- # if (box != None):
- # xlo = box[0]
- # xhi = box[1]
- # ylo = box[2]
- # yhi = box[3]
- # ax.plot([ylo,yhi,yhi,ylo,ylo], [xlo,xlo,xhi,xhi,xlo], 'r-')
- #
- # return
- ## =================================================================================================
- def find_nearest_brackets(x, value):
- '''
- Find the indices of the nearest pair of values to a search value in an ordered vector.
- Parameters
- ----------
- x : ndarray
- An ordered monotonic vector inside which to search for `value`.
- value : (int,float)
- The value to search for.
- Returns
- -------
- idx : int
- The index location of the exact value (if an exact match is found)
- [idx,idx+1] : (int,int)
- A list giving the pair of points before and after where the value is found.
- '''
- idx = (abs(x-value)).argmin()
- if (x[idx] > value):
- return([idx-1,idx])
- elif (x[idx] < value):
- return([idx,idx+1])
- else:
- return([idx])
- ## =================================================================================================
- def is_float(s):
- '''
- Check if a string (or list of strings) describes a (list of) floating-point type of number.
- Parameters
- ----------
- s : str
- The string or list of strings to analyze.
- Returns
- -------
- bool : bool
- Whether or not `s` contains a numeric value.
- '''
- if isinstance(s,tuple) or isinstance(s,list):
- if not all(isinstance(i,str) for i in s): raise TypeError("Input 's' is not a list of strings")
- if len(s) == 0:
- try:
- _ = float(s)
- except ValueError:
- return(False)
- else:
- res = list(True for i in range(0,len(s)))
- for i in range(0,len(s)):
- try:
- _ = float(s[i])
- except ValueError:
- res[i] = False
- return(res)
- else:
- if not isinstance(s,str): raise TypeError("Input 's' is not a string")
- try:
- _ = float(s)
- return(True)
- except ValueError:
- return(False)
- ## =================================================================================================
- def getcolors():
- '''
- Make a list of 16 discernably different colors that can be used for drawing plots.
- Returns
- -------
- mycolors : list of floats
- A 16x4 list of colors, with each color being a 4-vector (R,G,B,A).
- '''
- mycolors = [None]*16
- mycolors[0] = [0.0,0.0,0.0,1.0] ## black
- mycolors[1] = [1.0,0.0,1.0,1.0] ## fuchsia
- mycolors[2] = [0.0,0.0,0.5,1.0] ## navy blue
- mycolors[3] = [0.0,0.0,1.0,1.0] ## blue
- mycolors[4] = [0.0,0.5,0.5,1.0] ## teal
- mycolors[5] = [0.0,1.0,1.0,1.0] ## aqua
- mycolors[6] = [0.0,0.5,0.0,1.0] ## dark green
- mycolors[7] = [0.0,1.0,0.0,1.0] ## lime green
- mycolors[8] = [0.5,0.5,0.0,1.0] ## olive green
- mycolors[9] = [1.0,1.0,0.0,1.0] ## yellow
- mycolors[10] = [1.0,0.5,0.0,1.0] ## orange
- mycolors[11] = [1.0,0.0,0.0,1.0] ## red
- mycolors[12] = [0.5,0.0,0.0,1.0] ## maroon
- mycolors[13] = [0.5,0.0,0.5,1.0] ## purple
- mycolors[14] = [0.7,0.7,0.7,1.0] ## bright grey
- mycolors[15] = [0.4,0.4,0.4,1.0] ## dark grey
- return(mycolors)
- ## =================================================================================================
- def get_poly_coeffs(x, g, order):
- '''
- Perform a, nth order polynomial fit to a curve and return the coefficients.
- The return coefficient vector "c" fits the functional form\
- g = c[0] + c[1]*x + c[2]*x^2 + c[3]*x^3 + ...
- Parameters
- ----------
- x : ndarray
- The data abscissa.
- g : ndarray
- The data ordinate.
- order: int
- The integer order (>0) of polynomial to fit.
- Returns
- -------
- coeffs : ndarray
- The order+1 length vector of polynomial coefficients.
- '''
- #npts = size(g)
- #x = -1.0 + arange(npts) * 2.0 / (npts - 1.0)
- H = zeros((size(g),order+1))
- H[:,0] = 1.0
- for o in arange(1,order+1):
- H[:,o] = x**o
- coeffs = dot(linalg.pinv(H), g)
- #remainder = g
- #for n in arange(alen(coeffs)):
- # remainder -= coeffs[n] * x**n
- return(coeffs)
- ## =================================================================================================
- def polyeval_Horner(x, poly_coeffs):
- '''
- Use Horner's rule for polynomial evaluation.
- Assume a polynomial of the form \
- p = c[0] + (c[1] * x) + (c[2] * x**2) + (c[3] * x**3) + ... + (c[N] * x**N).
- Parameters
- ----------
- x : ndarray
- The abscissa at which to evaluate the polynomial.
- poly_coeffs : ndarray
- The vector of polynomial coefficients.
- Returns
- -------
- p : ndarray
- The polynomial evaluated at the points given in x.
- '''
- ncoeffs = alen(poly_coeffs)
- p = zeros(alen(x))
- for n in arange(ncoeffs-1,-1,-1):
- p = poly_coeffs[n] + (x * p)
- #print('n=%i, c=%f' % (n, coeffs[n]))
- return(p)
- ## =================================================================================================
- def get_parallax_mask(dcb, camera_pair1, camera_pair2, thresh=2.5, abs_thresh=None, erosions=0, dilations=0,
- saturation_mask=None, smooth=0, debug=False):
- '''
- Calculate the parallax mask from the datacube.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) multiplex datacube.
- camera_pair1 : (tuple of two integers)
- The two cameras to use for calculating the horizontal difference image (as input to the \
- parallax estimator).
- camera_pair2 : (tuple of two integers)
- The two cameras to use for calculating the vertical difference image (as input to the \
- parallax estimator).
- thresh : float
- The threshold value above which to say a given pixel belongs to the parallax mask. This is \
- calculated as distance from the mean in units of standard deviations.
- abs_thresh : float
- The absolute threshold, given in distance from 0.0 and not the mean. When this value is \
- define, it oevrrides the relative threshold.
- erosions : int
- The number of morphological erosion steps to use.
- dilations : int
- The number of morphological dilation steps to use.
- saturation_mask: ndarray
- An optional (Nx, Ny) mask indicating which pixels of the camera pairs are saturated.
- smooth : int
- The smoothing kernel size (0 = no smoothing).
- Returns
- -------
- parallax_mask : ndarray
- The boolean array parallax mask (True where parallax is above the detection threshold).
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- import numpy.ma as ma
- #from meanclip import meanclip
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3,3), 'bool')
- diff1 = dcb[:,:,camera_pair1[0]] - dcb[:,:,camera_pair1[1]]
- diff2 = dcb[:,:,camera_pair2[0]] - dcb[:,:,camera_pair2[1]]
- if (smooth > 0):
- diff1 = ndimage.uniform_filter(diff1, smooth)
- diff2 = ndimage.uniform_filter(diff2, smooth)
- (Nx,Ny) = diff1.shape
- supermask= zeros((Nx,Ny), 'bool')
- if saturation_mask != None:
- supermask[saturation_mask] = 1
- diff1_ma = ma.array(diff1, mask = supermask)
- diff2_ma = ma.array(diff2, mask = supermask)
- if (abs_thresh == None):
- center1 = mean(diff1_ma)
- center2 = mean(diff2_ma)
- #center1 = median(diff1)
- #center2 = median(diff2)
- std1 = std(diff1_ma)
- std2 = std(diff2_ma)
- thresh1 = thresh * std1
- thresh2 = thresh * std2
- else:
- thresh1 = abs_thresh
- thresh2 = abs_thresh
- if (abs_thresh == None):
- parallax1_mask = (abs(diff1_ma - center1) > thresh1)
- parallax2_mask = (abs(diff2_ma- center2) > thresh2)
- else:
- parallax1_mask = (abs(diff1_ma) > abs_thresh)
- parallax2_mask = (abs(diff2_ma) > abs_thresh)
- parallax_mask = (parallax1_mask | parallax2_mask)
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- plt.imshow(diff1)
- plt.title('parallax diff image 1 (input to mask)')
- plt.figure()
- plt.imshow(diff2)
- plt.title('parallax diff image 2 (input to mask)')
- plt.figure()
- plt.imshow(parallax1_mask)
- plt.title('parallax mask 1')
- plt.figure()
- plt.imshow(parallax2_mask)
- plt.title('parallax mask 2')
- plt.figure()
- plt.imshow(parallax_mask)
- plt.title('combined parallax mask - before morphology')
- if (erosions > 0):
- parallax_mask = binary_erosion(parallax_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(parallax_mask)
- plt.title('parallax_mask - after erosion')
- if (dilations > 0):
- parallax_mask = binary_dilation(parallax_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(parallax_mask)
- plt.title('parallax_mask - after dilation')
- plt.show()
- parallax_mask[supermask] = 0
- #parallax_mask = logical_not(parallax_mask)
- return(parallax_mask)
- ## =================================================================================================
- def get_vismotion_mask(meas_img, ref_img, thresh=2.5, dynamic_thresh=None, abs_thresh = None, saturation_mask=None, parallax_mask=None, erosions=0, dilations=0, smooth=0, debug=False):
- '''
- Calculate the parallax mask from the datacube.
- The input "vis" image is assumed to be *already* cropped to the size of the infrared overlay region.
- Parameters
- ----------
- meas_img : ndarray
- The (vis_Nx,vis_Ny) greyscale image from the measurement frame.
- ref_img : ndarray
- The (vis_Nx,vis_Ny) greyscale image from the reference frame.
- thresh : float
- The threshold value above which to say a given pixel belongs to the vismotion mask. This is \
- calculated as
- thresh : float
- The threshold value above which to say a given pixel belongs to the vismotion mask. This is \
- calculated as distance from the mean in units of standard deviations.
- dynamic_thresh: float
- The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
- calculated from the average of 30 frames standard deviation, calculated as distance from the mean.
- abs_thresh : float
- The absolute threshold, given in distance from 0.0 and not the mean. When this value is \
- define, it oevrrides the relative threshold.
- erosions : int
- The number of morphological erosion steps to use.
- dilations : int
- The number of morphological dilation steps to use.
- smooth : int
- The smoothing kernel size (0 = no smoothing).
- Returns
- -------
- motion_mask : ndarray
- The boolean array motion mask (True where motion is above the detection threshold).
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- import numpy.ma as ma
- #from meanclip import meanclip
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3,3), 'bool')
- ## DAISI uses the following formula for converting an RGB image to greyscale
- if (meas_img.ndim == 3):
- meas_grey = uint8(0.299*meas_img[:,:,0] + 0.587*meas_img[:,:,1] + 0.114*meas_img[:,:,2])
- else:
- meas_grey = meas_img
- if (ref_img.ndim == 3):
- ref_grey = uint8(0.299*ref_img[:,:,0] + 0.587*ref_img[:,:,1] + 0.114*ref_img[:,:,2])
- else:
- ref_grey = ref_img
- diff_img = float32(meas_grey) - float32(ref_grey)
- (Nx,Ny) = diff_img.shape
- supermask= zeros((Nx,Ny), 'bool')
- if saturation_mask != None:
- supermask[saturation_mask] = 1
- if (smooth > 0):
- diff_img = ndimage.uniform_filter(diff_img, smooth)
- diff_img_ma = ma.array(diff_img, mask=supermask)
- #(meanval, stdval) = meanclip(diff_img, clipsig=2.0)
- (centerval, stdval) = (nanmean(diff_img_ma), nanstd(diff_img))
- #(centerval, stdval) = (nanmedian(diff_img), nanstd(diff_img))
- motion_mask = (abs(diff_img_ma - centerval) > (thresh * stdval))
- #motion_mask = ((diff_img - meanval) > (thresh * stdval))
- if abs_thresh != None:
- motion_mask = (abs(diff_img_ma - centerval) > (abs_thresh))
- elif dynamic_thresh != None:
- motion_mask = (abs(diff_img_ma - centerval) > (dynamic_thresh))
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- plt.imshow(diff_img)
- plt.title('vis diff image (meas - ref)')
- plt.figure()
- plt.imshow(255*motion_mask)
- plt.title('motion mask - before morphology')
- if (erosions > 0):
- motion_mask = binary_erosion(motion_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*motion_mask)
- plt.title('motion_mask - after erosion')
- if (dilations > 0):
- motion_mask = binary_dilation(motion_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*motion_mask)
- plt.title('motion_mask - after dilation')
- plt.show()
- motion_mask[supermask] = 0
- #motion_mask = logical_not(motion_mask)
- return(motion_mask)
- ## =================================================================================================
- def get_irmotion_mask(dcb, ref_dcb, pos_channels, neg_channels, thresh=2.5, dynamic_thresh=None, erosions=0, dilations=0, abs_thresh=None,
- threshmin=None, smooth=0, saturation_mask = None, parallax_mask = None, debug=False):
- '''
- Calculate the parallax mask from the datacube.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) datacube from the measurement frame.
- ref_dcb : ndarray
- The (Nx,Ny,Nw) datacube from the reference frame.
- pos_channels : list
- ???
- neg_channels : list
- ???
- thresh : float
- The threshold value above which to say a given pixel belongs to the parallax mask. This is \
- calculated as
- erosions : int
- The number of morphological erosion steps to use.
- dilations : int
- The number of morphological dilation steps to use.
- abs_thresh : float
- ???
- dynamic_thresh: float
- The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
- calculated from the average of 30 frames standard deviation, calculated as distance from the mean.
- threshmin : float
- ???
- smooth : int
- The size of the smoothing kernel (0 = no smoothing).
- Returns
- -------
- motion_mask : ndarray
- The boolean array motion mask (True where motion is above the detection threshold).
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- import numpy.ma as ma
- #from meanclip import meanclip
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3,3), 'bool')
- (Nx,Ny,Nw) = dcb.shape
- diff_img = zeros((Nx,Ny))
- for c in pos_channels:
- diff_img += dcb[:,:,c].reshape(Nx, Ny)
- diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
- for c in neg_channels:
- diff_img -= dcb[:,:,c].reshape(Nx, Ny)
- diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
- if (smooth > 0):
- diff_img = ndimage.uniform_filter(diff_img, smooth)
- supermask= zeros((Nx,Ny), 'bool')
- if saturation_mask != None:
- supermask[saturation_mask] = 1
- if parallax_mask != None:
- supermask[parallax_mask] = 1
- diff_img_ma = ma.array(diff_img, mask = supermask)
- if (abs_thresh == None):
- #(centerval, stdval) = meanclip(diff_img, clipsig=2.0)
- (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
- #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
- imgthresh = thresh * stdval
- if (imgthresh < threshmin): imgthresh = threshmin
- motion_mask = (abs(diff_img_ma - centerval) > imgthresh)
- elif (abs_thresh != None):
- imgthresh = abs_thresh
- motion_mask = abs(diff_img_ma) > imgthresh
- elif (dynamic_thresh != None):
- imgthresh = dynamic_thresh
- motion_mask = abs(diff_img_ma) > imgthresh
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- plt.imshow(diff_img)
- plt.title('IR diff image (meas - ref)')
- plt.figure()
- plt.imshow(255*motion_mask)
- plt.title('IR motion mask - before morphology')
- if (erosions > 0):
- motion_mask = binary_erosion(motion_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*motion_mask)
- plt.title('IR motion_mask - after erosion')
- if (dilations > 0):
- motion_mask = binary_dilation(motion_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*motion_mask)
- plt.title('IR motion_mask - after dilation')
- #plt.show()
- motion_mask[supermask] = 0
- #motion_mask = logical_not(motion_mask)
- return(motion_mask)
- ## =================================================================================================
- def mask_overlay(img, mask1=None, mask2=None, mask3=None, alpha=1.0):
- '''
- Overlay a detection mask on top of a greyscale or color image.
- Parameters
- ----------
- img : ndarray
- The (img_Nx,img_Ny) image to use as the background against which to apply the overlay.
- mask1 : ndarray, optional
- A (Nx,Ny) detection mask --- a boolean array no larger than `img` --- to be displayed in RED.
- mask2 : ndarray, optional
- A (Nx,Ny) detection mask --- a boolean array no larger than `img` --- to be displayed in GREEN.
- mask3 : ndarray, optional
- A (Nx,Ny) detection mask --- a boolean array no larger than `img` --- to be displayed in BLUE.
- alpha : float
- A value for tuning how much the background is suppressed relative to the masks. If alpha==1.0, then the \
- masks are rendered invisible. If alpha==0.0, then the masks show as fully saturated pixels and the \
- background pixel value (underneath the mask) is rendered invisible.
- Returns
- -------
- overlay : ndarray
- The (img_Nx,img_Ny) image with detection mask overlaid in red.
- '''
- if (mask1 == None) and (mask2 == None) and (mask3 == None):
- return(img)
- assert (0.0 <= alpha <= 1.0), 'Input parameter "alpha" must be from 0.0 to 1.0.'
- ## If the input image is not 8-bit, truncate it to 8-bit first before applying the overlay.
- img = array(img)
- img = uint8(255.0 * (img - amin(img)) / amax(img - amin(img)))
- if (mask1 != None):
- mask1 = (asarray(mask1) == 1)
- (Nx,Ny) = mask1.shape
- if (mask2 != None):
- mask2 = (asarray(mask2) == 1)
- (Nx,Ny) = mask2.shape
- if (mask3 != None):
- mask3 = (asarray(mask3) == 1)
- (Nx,Ny) = mask3.shape
- ## Make a combined mask
- if (mask1 == None): mask1 = zeros((Nx,Ny), 'bool')
- if (mask2 == None): mask2 = zeros((Nx,Ny), 'bool')
- if (mask3 == None): mask3 = zeros((Nx,Ny), 'bool')
- mask = ((mask1 + mask2 + mask3) > 0)
- (img_Nx,img_Ny) = img.shape
- dx = (img_Nx - Nx) // 2
- dy = (img_Ny - Ny) // 2
- xoff = 0 if ((img_Nx - Nx) % 2 == 0) else 1
- yoff = 0 if ((img_Ny - Ny) % 2 == 0) else 1
- if (img.ndim == 3):
- r = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,0]))
- g = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,1]))
- b = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,2]))
- overlay = array(img)
- elif (img.ndim == 2):
- r = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff]))
- g = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff]))
- b = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff]))
- overlay = zeros((img_Nx,img_Ny,3), 'uint8')
- overlay[:,:,0] = img
- overlay[:,:,1] = img
- overlay[:,:,2] = img
- r[mask] = r[mask] * alpha
- g[mask] = g[mask] * alpha
- b[mask] = b[mask] * alpha
- r[mask1] += 255.0 * (1.0 - alpha)
- g[mask2] += 255.0 * (1.0 - alpha)
- b[mask3] += 255.0 * (1.0 - alpha)
- overlay[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,0] = uint8(r)
- overlay[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,1] = uint8(g)
- overlay[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,2] = uint8(b)
- return(overlay)
- ## =================================================================================================
- def draw_box(box, ax=None, color='r', w=0, **kwargs):
- '''
- Draw a box in the current figure.
- Parameters
- ----------
- box : ndarray
- ???
- ax : matplotlib.pyplot axis handle, optional
- ???
- color : str
- The color of the box to plot.
- w : int
- An index to the first dimension of `box`.
- '''
- box = asarray(box)
- if any(isnan(box)):
- return
- if (box.size == 4):
- (xlo,xhi,ylo,yhi) = box - array([0.5,0.5,0.5,0.5])
- else:
- (xlo,xhi,ylo,yhi) = box[w,:] - array([0.5,0.5,0.5,0.5])
- if (ax == None):
- import matplotlib.pyplot as plt
- ax = plt.gca()
- if (w == 0):
- ax.plot([ylo,yhi,yhi,ylo,ylo], [xlo,xlo,xhi,xhi,xlo], color+'-', **kwargs)
- else:
- ax.plot([ylo,yhi,yhi,ylo,ylo], [xlo,xlo,xhi,xhi,xlo], color+'-', **kwargs)
- return
- ## =================================================================================================
- def get_vis_steam_mask(vis, ref_vis, thresh=1.0, erosions=0, dilations=0, smooth=0, debug=False): #use thresh=4.0 with meanclip
- '''
- Calculate the parallax mask from the datacube.
- The input "vis" image is assumed to be *already* cropped to the size of the infrared overlay region.
- Parameters
- ----------
- vis : ndarray
- The (vis_Nx,vis_Ny) greyscale image from the measurement frame.
- ref_vis : ndarray
- The (vis_Nx,vis_Ny) greyscale image from the reference frame.
- thresh : float
- The threshold value above which to say a given pixel belongs to the parallax mask. This is \
- calculated as
- erosions : int
- The number of morphological erosion steps to use.
- dilations : int
- The number of morphological dilation steps to use.
- smooth : int
- The size of the smoothing kernel (0 = no smoothing).
- Returns
- -------
- steam_mask : ndarray
- The boolean array motion mask (True where motion is above the detection threshold).
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- #from meanclip import meanclip
- ## DAISI uses the following formula for converting an RGB image to greyscale
- if (vis.ndim == 3):
- meas_grey = uint8(0.299*vis[:,:,05] + 0.587*vis[:,:,1] + 0.114*vis[:,:,2])
- else:
- meas_grey = vis
- if (ref_vis.ndim == 3):
- ref_grey = uint8(0.299*ref_vis[:,:,0] + 0.587*ref_vis[:,:,1] + 0.114*ref_vis[:,:,2])
- else:
- ref_grey = ref_vis
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3,3), 'bool')
- diff_img = float32(meas_grey) - float32(ref_grey)
- if (smooth > 0):
- diff_img = ndimage.uniform_filter(diff_img, smooth)
- #(centerval, stdval) = meanclip(diff_img, clipsig=2.0)
- (centerval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
- #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
- steam_mask = ((diff_img - centerval) > (thresh * stdval))
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- plt.imshow(diff_img)
- plt.title('vis diff image (meas - ref)')
- plt.figure()
- plt.imshow(255*steam_mask)
- plt.title('motion mask - before morphology')
- if (erosions > 0):
- steam_mask = binary_erosion(steam_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*steam_mask)
- plt.title('steam_mask - after erosion')
- if (dilations > 0):
- steam_mask = binary_dilation(steam_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*steam_mask)
- plt.title('steam_mask - after dilation')
- plt.show()
- #steam_mask = logical_not(steam_mask)
- return(steam_mask)
- ## =================================================================================================
- def get_ir_steam_mask(dcb, ref_dcb, thresh=1.0, erosions=0, dilations=0, smooth=0, debug=False): #use thresh=7.0 with meanclip
- '''
- Calculate the parallax mask from the datacube.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) datacube from the measurement frame.
- ref_dcb : ndarray
- The (Nx,Ny,Nw) datacube from the reference frame.
- thresh : float
- The threshold value above which to say a given pixel belongs to the parallax mask.
- erosions : int
- The number of morphological erosion steps to use.
- dilations : int
- The number of morphological dilation steps to use.
- smooth : int
- The size of the smoothing kernel (0 = no smoothing).
- Returns
- -------
- steam_mask : ndarray
- The boolean array motion mask (True where motion is above the detection threshold).
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- #from meanclip import meanclip
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3,3), 'bool')
- diff_img = sum(dcb,2) - sum(ref_dcb,2)
- if (smooth > 0):
- diff_img = ndimage.uniform_filter(diff_img, smooth)
- #(meanval, stdval) = meanclip(diff_img, clipsig=2.0)
- (centerval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
- #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
- steam_mask = (abs(diff_img - centerval) > (thresh * stdval))
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- plt.imshow(diff_img)
- plt.title('IR diff image (meas - ref)')
- plt.figure()
- plt.imshow(255*steam_mask)
- plt.title('IR motion mask - before morphology')
- if (erosions > 0):
- steam_mask = binary_erosion(steam_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*steam_mask)
- plt.title('IR steam_mask - after erosion')
- if (dilations > 0):
- steam_mask = binary_dilation(steam_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*steam_mask)
- plt.title('IR steam_mask - after dilation')
- #plt.show()
- #steam_mask = logical_not(steam_mask)
- return(steam_mask)
- ## =================================================================================================
- def get_spectral_mask(dcb, ref_dcb, pos_channels, neg_channels, abs_thresh=None, dynamic_thresh=None, thresh=2.5,
- threshmin=0.0, erosions=0, dilations=0, parallax_mask=None,
- saturation_mask = None, comparison_type= 2, smooth=0, debug=False):
- '''
- Calculate the gas detection mask from the multiplex datacube.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) datacube from the measurement frame.
- ref_dcb : ndarray
- The (Nx,Ny,Nw) datacube from the reference frame.
- pos_channels : list
- ???
- neg_channels : list
- ???
- abs_thresh : float
- ???
- thresh : float
- The threshold value above which to say a given pixel belongs to the parallax mask.
- threshmin : float
- ???
- dynamic_thresh: float
- The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
- calculated from the average of 30 frames standard deviation, calculated as distance from 0.
- erosions : int
- The number of morphological erosion steps to use.
- parallax_mask : ndarray
- ???
- saturation_mask: ndarray
- ???
- dilations : int
- The number of morphological dilation steps to use.
- comparison_type: int
- Whether the comparison is double ended (2), positive single ended, (1) or negative single ended (-1), logical not double_ended (0)
- smooth : bool
- Whether to apply a 3x3 smoothing kernel to the difference image prior to calculating the mask.
- Returns
- -------
- detection_mask : ndarray
- The boolean array gas detection mask.
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- #from meanclip import meanclip
- #force comparison to default to 2.
- if comparison_type not in [-1, 0, 1, 2]: comparison_type = 2
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3,3), 'bool')
- (Nx,Ny,Nw) = dcb.shape
- diff_img = zeros((Nx,Ny))
- for c in pos_channels:
- diff_img += dcb[:,:,c].reshape(Nx, Ny)
- diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
- for c in neg_channels:
- diff_img -= dcb[:,:,c].reshape(Nx, Ny)
- diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
- if (smooth > 0):
- diff_img = ndimage.uniform_filter(diff_img, smooth)
- supermask= zeros((Nx,Ny), 'bool')
- if saturation_mask != None:
- supermask[saturation_mask] = 1
- if parallax_mask != None:
- supermask[parallax_mask] = 1
- diff_img_ma = ma.array(diff_img, mask = supermask)
- if (abs_thresh == None) and (dynamic_thresh == None):
- (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
- #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
- imgthresh = thresh * stdval
- if (imgthresh < threshmin): imgthresh = threshmin
- if comparison_type == 2:
- spectral_mask = abs(diff_img_ma- centerval) > imgthresh
- elif comparison_type == 1:
- spectral_mask = (diff_img_ma- centerval) > imgthresh
- elif comparison_type == 0:
- spectral_mask = abs(diff_img_ma- centerval) < imgthresh
- else:
- spectral_mask = (diff_img_ma- centerval) < (-1*imgthresh)
- elif (abs_thresh == None) and (dynamic_thresh !=None):
- (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
- imgthresh = dynamic_thresh
- if (imgthresh < threshmin): imgthresh = threshmin
- if comparison_type == 2:
- spectral_mask = abs(diff_img_ma- centerval) > imgthresh
- elif comparison_type == 1:
- spectral_mask = (diff_img_ma- centerval) > imgthresh
- elif comparison_type == 0:
- spectral_mask = abs(diff_img_ma- centerval) < imgthresh
- else:
- spectral_mask = (diff_img_ma- centerval) < (-1* imgthresh)
- else:
- (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
- imgthresh = abs_thresh
- if comparison_type == 2:
- spectral_mask = abs(diff_img_ma- centerval) > imgthresh
- elif comparison_type == 1:
- spectral_mask = (diff_img_ma- centerval) > imgthresh
- elif comparison_type == 0:
- spectral_mask = abs(diff_img_ma- centerval) < imgthresh
- else:
- spectral_mask = (diff_img_ma- centerval) < (-1* imgthresh)
- if debug:
- import matplotlib.pyplot as plt
- if (parallax_mask != None):
- plt.figure()
- plt.imshow(255*parallax_mask)
- plt.title('Parallax mask')
- plt.figure()
- plt.imshow(diff_img)
- if (abs_thresh == None):
- (meanval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
- maxval = nanmax(ravel(diff_img))
- plt.title('spectral diff image, mean=%f, std=%f, max=%f, thresh=%f' % (meanval, stdval, maxval, imgthresh), fontsize=10)
- plt.figure()
- plt.imshow(255*detection_mask)
- plt.title('gas detection mask - before morphology')
- plt.figure()
- vec = array(diff_img.flatten())
- meanval = nanmean(vec)
- medianval = nanmedian(vec)
- stdval = nanstd(vec)
- minval = nanmin(vec)
- maxval = nanmax(vec)
- diff_img[isnan(diff_img)] = 0.0 ## put pixel values back now that stats are done
- vec = array(diff_img.flatten())
- (n, bin_edges) = histogram(log10(vec), 40)
- ax = plt.subplot(111)
- (counts, edges, patches) = ax.hist(vec, 40, zorder=4, edgecolor='0.5', log=True)
- #statstr = 'mean='+('%f' % meanval) + '\nstd='+('%f' % stdval)
- statstr = ('median=%f\nstd=%f' % (medianval, stdval))
- ax.text(0.8, 0.8, statstr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), zorder=5)
- ax.axis([amin(edges), amax(edges), 0.5, amax(counts)*1.1])
- if (erosions > 0):
- spectral_mask = binary_erosion(spectral_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*spectral_mask)
- plt.title('IR detection_mask - after erosion')
- if (dilations > 0):
- spectral_mask = binary_dilation(spectral_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*spectral_mask)
- plt.title('IR detection_mask - after dilation')
- #plt.show()
- spectral_mask[supermask] = 0
- return(spectral_mask)
- ## =================================================================================================
- def get_camdiff_detection_mask(dcb, ref_dcb, pos_channels, neg_channels, thresh=2.5, erosions=0, dilations=0, smooth=0, \
- saturation_mask=None, parallax_mask=None, ir_motion_mask=None, vis_motion_mask=None, debug=False):
- '''
- Calculate the gas detection mask from the multiplex datacube.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) datacube from the measurement frame.
- ref_dcb : ndarray
- The (Nx,Ny,Nw) datacube from the reference frame.
- camera_pair : (tuple of two ints)
- The cameras used to generate the spectral difference image.
- thresh : float
- The threshold value above which to say a given pixel belongs to the parallax mask.
- erosions : int
- The number of morphological erosion steps to use.
- dilations : int
- The number of morphological dilation steps to use.
- smooth : int
- The size of the smoothing kernel (0 = no smoothing).
- Returns
- -------
- detection_mask : ndarray
- The boolean array gas detection mask.
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- import numpy.ma as ma
- #from meanclip import meanclip
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3, 3), dtype=bool)
- (Nx,Ny,Nw) = dcb.shape
- diff_img = zeros((Nx,Ny))
- for c in pos_channels:
- diff_img += dcb[:,:,c].reshape(Nx, Ny)
- diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
- for c in neg_channels:
- diff_img -= dcb[:,:,c].reshape(Nx, Ny)
- diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
- if (smooth > 0):
- diff_img = ndimage.uniform_filter(diff_img, smooth)
- #(centerval, stdval) = meanclip(diff_img, clipsig=2.0)
- (centerval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
- #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
- detection_mask = (abs(diff_img - centerval) > (thresh * stdval))
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- plt.imshow(diff_img)
- plt.title('spectral diff image meas(cam0 - cam1) - diff(cam0 - cam1)')
- plt.figure()
- plt.imshow(255*detection_mask)
- plt.title('gas detection mask - before morphology')
- if (erosions > 0):
- detection_mask = binary_erosion(detection_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*detection_mask)
- plt.title('IR detection_mask - after erosion')
- if (dilations > 0):
- detection_mask = binary_dilation(detection_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*detection_mask)
- plt.title('IR detection_mask - after dilation')
- #plt.show()
- return(detection_mask)
- ## =================================================================================================
- def get_spectral_detection_mask(dcb, ref_dcb, pos_channels, neg_channels, abs_thresh=None, dynamic_thresh=None, thresh=2.5,
- threshmin=0.0, erosions=0, dilations=0, parallax_mask=None,
- ir_motion_mask=None, ir_spectral_mask =None, saturation_mask = None, smooth=0, debug=False):
- '''
- Calculate the gas detection mask from the multiplex datacube.
- Parameters
- ----------
- dcb : ndarray
- The (Nx,Ny,Nw) datacube from the measurement frame.
- ref_dcb : ndarray
- The (Nx,Ny,Nw) datacube from the reference frame.
- pos_channels : list
- ???
- neg_channels : list
- ???
- abs_thresh : float
- ???
- thresh : float
- The threshold value above which to say a given pixel belongs to the parallax mask.
- threshmin : float
- ???
- dynamic_thresh: float
- The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
- calculated from the average of 30 frames standard deviation, calculated as distance from 0.
- erosions : int
- The number of morphological erosion steps to use.
- parallax_mask : ndarray
- ???
- ir_motion_mask : ndarray
- ???
- ir_spectral_mask : ndarray
- ???
- dilations : int
- The number of morphological dilation steps to use.
- smooth : int
- The size of the smoothing kernel (0 = no smoothing).
- Returns
- -------
- detection_mask : ndarray
- The boolean array gas detection mask.
- '''
- from scipy import ndimage
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- import numpy.ma as ma
- #from meanclip import meanclip
- erosions = int(erosions)
- dilations = int(dilations)
- struct = ones((3,3), 'bool')
- (Nx,Ny,Nw) = dcb.shape
- diff_img = zeros((Nx,Ny))
- supermask= zeros((Nx,Ny), 'bool')
- for c in pos_channels:
- diff_img += dcb[:,:,c].reshape(Nx, Ny)
- diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
- for c in neg_channels:
- diff_img -= dcb[:,:,c].reshape(Nx, Ny)
- diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
- if smooth:
- diff_img = ndimage.uniform_filter(diff_img, smooth)
- #handle masks
- if (ir_motion_mask != None):
- supermask[ir_motion_mask] = 1
- if (ir_spectral_mask != None):
- supermask[ir_spectral_mask] = 1
- if (saturation_mask != None):
- supermask[saturation_mask] = 1
- if (parallax_mask != None):
- supermask[parallax_mask] = 1
- diff_img_ma = ma.array(diff_img, mask = supermask)
- (centerval, stdval) = (mean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
- if (abs_thresh == None) and (dynamic_thresh == None):
- #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
- imgthresh = thresh * stdval
- if (imgthresh < threshmin): imgthresh = threshmin
- detection_mask = abs(diff_img_ma - centerval) > imgthresh
- elif (dynamic_thresh !=None):
- imgthresh = dynamic_thresh
- if (imgthresh < threshmin): imgthresh = threshmin
- detection_mask = abs(diff_img_ma) > imgthresh
- else:
- imgthresh = abs_thresh
- detection_mask = abs(diff_img_ma) > imgthresh
- if debug:
- import matplotlib.pyplot as plt
- if (ir_motion_mask != None):
- plt.figure()
- plt.imshow(255*ir_motion_mask)
- plt.title('IR motion mask')
- if (parallax_mask != None):
- plt.figure()
- plt.imshow(255*parallax_mask)
- plt.title('Parallax mask')
- plt.figure()
- plt.imshow(diff_img)
- if (abs_thresh == None):
- (meanval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
- maxval = nanmax(ravel(diff_img))
- plt.title('spectral diff image, mean=%f, std=%f, max=%f, thresh=%f' % (meanval, stdval, maxval, imgthresh), fontsize=10)
- plt.figure()
- plt.imshow(255*detection_mask)
- plt.title('gas detection mask - before morphology')
- plt.figure()
- vec = array(diff_img.flatten())
- meanval = nanmean(vec)
- medianval = nanmedian(vec)
- stdval = nanstd(vec)
- minval = nanmin(vec)
- maxval = nanmax(vec)
- diff_img[isnan(diff_img)] = 0.0 ## put pixel values back now that stats are done
- vec = array(diff_img.flatten())
- (n, bin_edges) = histogram(log10(vec), 40)
- ax = plt.subplot(111)
- (counts, edges, patches) = ax.hist(vec, 40, zorder=4, edgecolor='0.5', log=True)
- #statstr = 'mean='+('%f' % meanval) + '\nstd='+('%f' % stdval)
- statstr = ('median=%f\nstd=%f' % (medianval, stdval))
- ax.text(0.8, 0.8, statstr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), zorder=5)
- ax.axis([amin(edges), amax(edges), 0.5, amax(counts)*1.1])
- if (erosions > 0):
- detection_mask = binary_erosion(detection_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*detection_mask)
- plt.title('IR detection_mask - after erosion')
- if (dilations > 0):
- detection_mask = binary_dilation(detection_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*detection_mask)
- plt.title('IR detection_mask - after dilation')
- #plt.show()
- detection_mask[supermask] = 0
- return(detection_mask)
- ## =================================================================================================
- def get_alarm_mask(detection_mask, ir_motion_mask=None, vis_motion_mask=None, parallax_mask=None, ir_steam_mask=None,
- vis_steam_mask=None, erosions=0, dilations=0, debug=False):
- '''
- Calculate the alarm mask.
- Parameters
- ----------
- detection_mask : (bool ndarray)
- An (Nx,Ny) binary image 0=gas absent, 1=gas present.
- ir_motion_mask : (bool ndarray)
- An (Nx,Ny) binary image 0=motion absent, 1=motion present.
- vis_motion_mask : (bool ndarray)
- An (Nx,Ny) binary image 0=motion absent, 1=motion present.
- parallax_mask : (bool ndarray)
- An (Nx,Ny) binary image 0=parallax absent, 1=parallax present.
- ir_steam_mask : (bool ndarray)
- An (Nx,Ny) binary image 0=steam absent, 1=steam present.
- vis_steam_mask : (bool ndarray)
- An (Nx,Ny) binary image 0=steam absent, 1=steam present.
- erosions : int
- The number of morphological erosion steps to use.
- dilations : int
- The number of morphological dilation steps to use.
- Returns
- -------
- detection_mask : ndarray
- The boolean array gas detection mask.
- Notes
- -----
- Do we want to change the definitions of the input masks so that they correspond to the DAISI definitions?
- (That is, should we use their logical inverse from the definitions given above?)
- '''
- from scipy.ndimage.morphology import binary_erosion, binary_dilation
- struct = ones((3,3), 'bool')
- mask = ones_like(detection_mask)
- if (ir_motion_mask != None):
- mask = mask * logical_not(ir_motion_mask)
- if (vis_motion_mask != None):
- mask = mask * logical_not(vis_motion_mask)
- if (parallax_mask != None):
- mask = mask * logical_not(parallax_mask)
- if (ir_steam_mask != None):
- mask = mask * logical_not(ir_steam_mask)
- if (vis_steam_mask != None):
- mask = mask * logical_not(vis_steam_mask)
- alarm_mask = detection_mask * mask
- if debug:
- import matplotlib.pyplot as plt
- plt.figure()
- plt.imshow(255*alarm_mask)
- plt.title('Alarm mask - before morphology')
- if (erosions > 0):
- alarm_mask = binary_erosion(alarm_mask, structure=struct, iterations=erosions)
- if debug:
- plt.figure()
- plt.imshow(255*alarm_mask)
- plt.title('Alarm mask - after erosion')
- if (dilations > 0):
- alarm_mask = binary_dilation(alarm_mask, structure=struct, iterations=dilations)
- if debug:
- plt.figure()
- plt.imshow(255*alarm_mask)
- plt.title('Alarm mask - after dilation')
- #plt.show()
- return(alarm_mask)
- ## =================================================================================================
- def set_saturated_to_nan(dcb_input, ceiling_value, cameras=None):
- '''
- Set any saturated pixels (as determined by the ceiling value) to NaN.
- Parameters
- ----------
- dcb_input : ndarray
- The datacube for which we want to detect saturation and perform pixel replacement.
- ceiling_value : float
- The value above which we want to say the pixels are saturated.
- Returns
- -------
- dcb : ndarray
- The datacube with saturated pixels replaced with NaNs.
- '''
- dcb = array(dcb_input)
- ceiling_value = float(ceiling_value)
- if (cameras != None): ## apply to only a list of the camera numbers
- for w in cameras:
- img = dcb[:,:,w]
- img[img > ceiling_value] = nan
- dcb[:,:,w] = img
- else:
- dcb[dcb > ceiling_value] = nan
- return(dcb)
- ## =================================================================================================
- def get_downsampled_gas_spectrum(gasname, waves_um=None, serial_number=2, normtype='channel_width', debug=False):
- '''
- Get a gas spectrum sampled onto the GCI's spectral channels.
- Parameters
- ----------
- gasname : str
- The name of the gas whose spectrum you want. This name informs what filename in the repository \
- to look for: "cal/gas_spectra/gasname.jdx".
- waves_um : ndarray, optional
- The wavelengths at which to sample the spectrum prior to calculate the downsampling integral.
- serial_number : int
- The serial number of the GCI system to query.
- normtype : string ['L2','max=1','channel_width']
- What type of normalization to apply to the spectrum. With normtype='L2', the resulting vector will \
- have unit normtype (i.e. ideal for cross-correlation measurement). With normtype='max=1', it might \
- be ideal for display. With normtype='channel_width', the downsampled spectrum values should be
- scaled to the values given by the input continuous spectrum (likely ideal for display).
- Example
- -------
- (methane_spectrum, channel_boundaries) = get_downsampled_gas_spectrum('methane', serial_number=2, debug=False)
- '''
- from numpy.linalg import norm
- filterlist = get_filterlist(serial_number=serial_number)
- filter_data = get_filter_spectra(waves_um, filterlist=filterlist)
- waves = filter_data['waves']
- dw = mean(gradient(waves))
- ## The next step is to transform the measurement cube to a spectral cube.
- (H, channel_basisfcns, edgewaves, channel_boundaries) = calculate_hmatrix(filterlist, filter_data, return_data_structure=True)
- nchannels = alen(channel_boundaries) - 1
- gas_data = get_gas_spectra(waves=waves, gaslist=[gasname], skip_nonquant=False, debug=debug)
- gas_fullspectrum = gas_data[gasname]
- downspectrum = zeros(nchannels)
- for n in arange(nchannels):
- integral = sum(gas_fullspectrum * channel_basisfcns[:,n] * dw)
- if (normtype == 'channel_width'):
- normalizer = sum(channel_basisfcns[:,n] * dw)
- else:
- normalizer = 1.0
- downspectrum[n] = integral / normalizer
- if (normtype == 'max=1'):
- downspectrum = downspectrum / amax(downspectrum)
- elif (normtype == 'L2'):
- downspectrum = downspectrum / norm(downspectrum)
- return(downspectrum, channel_boundaries, gas_fullspectrum, waves)
- ## =================================================================================================
- def gci_file_ismotioncalib(filename, debug=False):
- '''
- Read in a GCI datafile's metadata contents.
- Parameters
- ----------
- filename : str
- The name of the `*.gci` file to read.
- Returns
- -------
- isbusy : (boolean or list of boolean plus 2-tuple)
- "isbusy" = Whether the current cube is in-motion or in-calib. \
- "pantilt_setpoint" = 2-tuple of the pan-tilt setpoint coordinates.
- Example
- -------
- ## Search for a reference frame by seeing the switch not_busy -> isbusy.
- dir = '/home/user/DATA/'
- files = sorted(glob(dir+'*.gci'))
- wasbusy = False
- for f in files:
- isbusy = gci_file_ismotioncalib(f, debug=False)
- if wasbusy and not isbusy:
- ref_file = f
- break
- wasbusy = isbusy
- '''
- import struct
- assert isinstance(filename, str), 'The input "filename" must be a string type.'
- assert os.path.exists(filename), 'The input filename ("' + filename + '") does not exist.'
- f = open(filename, 'rb')
- raw = f.read()
- f.close()
- total_nbytes = sys.getsizeof(raw)
- if debug: print('total_nbytes=', total_nbytes)
- n = 0
- msg = []
- nparts = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('nparts=', nparts)
- n += 4
- ## The first message part is the string label giving the ZMQ subscription topic.
- strlen = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('strlen=', strlen)
- n += 4
- topic = ''.join(struct.unpack('>'+str(strlen)+'c', raw[n:n+strlen]))
- msg.append(topic)
- if debug: print('subscription topic=', topic)
- n += strlen
- ## Loop through the subsequent parts, defining each part of the msg list.
- msg_nbytes = uint32(struct.unpack('>I', raw[n:n+4]))[0]
- if debug: print('msg_nbytes=', msg_nbytes)
- n += 4
- msg.append(raw[n:n+msg_nbytes])
- isbusy = gci_msg_ismotioncalib(msg, debug=debug)
- return(isbusy)
- ## =================================================================================================
- def gci_msg_ismotioncalib(msg, give_pantilt_setpoint=False, debug=False):
- '''
- Return True/False about whether the current msg indicates the system is in-motion or in-calib.
- If one cube is in-motion or in-calib, while the next cube is not, it indicates that the second
- cube is a reference cube.
- Parameters
- ----------
- msg : list of str
- The message delivered by the GCI's zmq network interface, or through the contents of a \
- `*.gci` file.
- give_pantilt_setpoint : bool, optional
- Whether or not to include the pan-tilt setpoint values in the return.
- Returns
- -------
- isbusy : (boolean or list of boolean plus 2-tuple)
- "isbusy" = Whether the current cube is in-motion or in-calib. \
- "pantilt_setpoint" = 2-tuple of the pan-tilt setpoint coordinates.
- '''
- assert isinstance(msg, (list, tuple, ndarray))
- topic = msg[0]
- if (topic != '') and debug: print('topic = ' + topic)
- d = json.loads(msg[1]) ## the "payload" dictionary
- inmotion = d['gci-meta']['metadata']['pan-tilt-in-motion']
- incalib = (d['gci-meta']['metadata']['calibration-state'].lower() != 'calibration-idle')
- isbusy = (inmotion or incalib)
- if give_pantilt_setpoint:
- pantilt_setpoint = float32(d['gci-meta']['metadata']['pan-tilt-setpoint'])
- return(isbusy, pantilt_setpoint)
- else:
- return(isbusy)
- ## =================================================================================================
- def to_json(python_object):
- '''
- Allows Numpy arrays to be JSON-seralizable.
- Parameters
- ----------
- python_object : any
- The object to be serialized.
- Returns
- -------
- d : dict
- A Python dictionary describing the object structure and contents.
- Example
- -------
- print(json.dumps(array(data), default=gci.to_json))
- '''
- if isinstance(python_object, ndarray):
- d = {'__class__':'ndarray', '__value__':python_object.tolist()}
- return(d)
- raise TypeError(repr(python_object) + ' is not JSON serializable')
- ## =================================================================================================
- def serial_number_from_filterlist(filterlist, debug=False):
- '''
- Calculate the serial number of the system given knowledge of the filterlist.
- Parameters
- ----------
- filterlist : list of str
- The list of filter present on the system, given in order of the cameras present.
- Returns
- -------
- sn : int
- The serial number of the system.
- '''
- if ('SG_LP8110_06Aug12_0' in filterlist):
- sn = 3
- elif ('NOC_SP11578-000278_02Jan13_0' in filterlist):
- sn = 2
- else:
- raise ValueError('The filterlist does not contain a known library of filters.')
- if debug: print('GCI serial number = ' + str(sn))
- return(sn)
- ## =================================================================================================
- def send_keepalive_command(topic, debug=False):
- '''
- Send a command to the detection algorithm process to emit detection result messages on a given topic.
- Parameters
- ----------
- topic : str
- The topic you want to listen for.
- '''
- ctx = zmq.Context()
- pub = ctx.socket(zmq.PUB)
- pub.connect('tcp://10.1.10.11:9550')
- ## Build the msg
- msg = list()
- msg.append(topic)
- msg.append('JSON_ASYNC_REQ')
- msg.append(json.dumps({'cmd-name':'keep-alive'})) ## payload
- msg.append(json.dumps({'return_addr':'script-keep-alive'})) ## return info
- if debug: print('Sending keep alive command:', msg)
- pub.send_multipart(msg)
- pub.close()
- return
- ## =================================================================================================
- def send_trigger_cal_command(use_1pt_or_2pt, verbose=False):
- '''
- Send an ASYNC command to the local network to perform a 2pt calibration.
- This is a temporary thing until the "send_command" function is back up.
- Parameters
- ----------
- use_1pt_or_2pt : int, (1,2)
- Whether to request a 1pt calibration or a 2pt calibration.
- '''
- use_1pt_or_2pt = int(use_1pt_or_2pt)
- if (use_1pt_or_2pt == 1):
- sub_topic = 'jar-trigger-1pt'
- elif (use_1pt_or_2pt == 2):
- sub_topic = 'jar-trigger-2pt'
- ctx = zmq.Context()
- pub = ctx.socket( zmq.PUB )
- pub.connect('tcp://10.1.10.11:9550')
- topic = 'calibration-control'
- marker = 'JSON_ASYNC_REQ'
- ret_info = dict()
- ret_info['return_addr'] = sub_topic
- payload = dict()
- payload['cmd-name'] = 'trigger_2pt_calibration'
- request = [topic, marker, json.dumps(payload), json.dumps(ret_info)]
- if verbose:
- print('Sending ' + str(use_1pt_or_2pt) + 'pt calibration command:')
- print(request)
- pub.send_multipart(request)
- pub.close()
- return
- ## =================================================================================================
- ## =================================================================================================
- if __name__ == "__main__":
- print('GCIDIR = ' + GCIDIR)
- print('NOTHING TO DO!')