PageRenderTime 47ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 0ms

/gci.py

https://bitbucket.org/rmallery/gci_algorithms
Python | 5993 lines | 5821 code | 47 blank | 125 comment | 58 complexity | 235216d8a21c72726dfe7d1fde242c86 MD5 | raw file
  1. #! /usr/bin/env python
  2. # -*- coding: UTF-8 -*-
  3. # pylint: disable-msg=C0301,C0321,R0912,R0913,R0914,R0915
  4. '''
  5. This module provides a library of functions for use with the GCI system.
  6. GLOBAL CONSTANTS
  7. ================
  8. GCIDIR
  9. GCI_SERIAL_NUMBER
  10. TODO
  11. ====
  12. 1. Make "serial_number" a required argument for a bunch of these functions in order to ensure
  13. consistency.
  14. FUNCTION DESCRIPTIONS
  15. =====================
  16. '''
  17. from __future__ import print_function, division
  18. from numpy import *
  19. import zmq
  20. import platform
  21. import os
  22. import sys
  23. import glob
  24. import struct
  25. import simplejson as json
  26. import pdb #zzz
  27. from scipy.stats import nanmean, nanmedian, nanstd
  28. #global GCI_SERIAL_NUMBER
  29. #global GCIDIR
  30. GCI_SERIAL_NUMBER = 2
  31. ## Note that os.path.realpath() here converts a rlative path into an absolute one.
  32. thisfile = os.path.realpath(sys._current_frames().values()[0].f_globals['__file__'])
  33. GCIDIR = os.path.dirname(thisfile) + '/'
  34. __all__ = ['decode_gci_msg_meta','acquire_raw','acquire_data','acquire_dcb','acquire_vis','crop_dcb','crop_hypercube',
  35. 'show_dcb','get_offsets_and_gains','receive_command_replies','get_control_data','move_shutter',
  36. 'set_heater_setpoint','set_heater_state','replace_bad_pixels','degc_to_atsensor_radiance',
  37. 'gci_sequence_to_hcb','get_filter_spectra','get_filter_edge_wavelength','send_command',
  38. 'show_image_with_projections','dynamically_correct_dcb','read_gcifile_meta','read_gcifile',
  39. 'dcb_objtemp_to_atsensor_radiance','get_filterlist','acquire_manually_calibrated_hypercube',
  40. 'get_gas_spectra','calculate_hmatrix','get_filter_type',
  41. 'planck_blackbody','get_degc_to_atsensor_radiance_lut','dcb_atsensor_radiance_to_objtemp',
  42. 'read_rectanglesfile','update_rectangles_dict','write_rectanglesfile','draw_block_spectrum',
  43. 'get_mean_spect','dcb_region_stats','daisi_rect_to_xycoords','xycoords_to_daisi_rect','print_rects',
  44. 'xcorr','xcorr3d','convert_radiancecube_to_spectralcube','send_asynch_command','set_gci_serial_number',
  45. 'get_gci_serial_number','get_calib_dictionary','jcamp_reader','jcamp_spectrum','format_coord',
  46. 'find_nearest_brackets','is_float','getcolors','get_poly_coeffs','polyeval_Horner','get_parallax_mask',
  47. 'get_vismotion_mask','get_irmotion_mask','mask_overlay','draw_box','get_vis_steam_mask',
  48. 'get_ir_steam_mask','get_spectral_mask','get_detection_mask','get_spectral_detection_mask','set_saturated_to_nan','pullcube',
  49. 'gci_file_ismotioncalib','gci_msg_ismotioncalib','to_json','serial_number_from_filterlist',
  50. 'send_keepalive_command','send_trigger_cal_command']
  51. __docformat__ = 'restructuredtext en'
  52. ## =================================================================================================
  53. def decode_gci_msg(msg, use_ir=True, use_vis=True, use_masks=True, save_header=False, debug=False):
  54. '''
  55. Convert a GCI message string to a dictionary of numpy objects, reading only the metadata and \
  56. ignoring images and datacubes.
  57. Parameters
  58. ----------
  59. msg : list of str
  60. The message delivered by the GCI's zmq network interface, or through the contents of \
  61. a `*.gci` file
  62. use_ir : bool
  63. Whether to decode any datacubes. (Setting to False improves decoding speed.)
  64. use_vis : bool
  65. Whether to decode any visible images. (Setting to False improves decoding speed.)
  66. use_masks : bool
  67. Whether to decode any masks. (Setting to False improves decoding speed.)
  68. save_header : bool
  69. Whether to save the text-version of the metadata as "header".
  70. Returns
  71. -------
  72. data : dict
  73. A dictionary containing all data elements from the decoded gci msg. If present in the gci \
  74. file, the list of dictionary keys can include
  75. * "cool_shutter_temperatures"
  76. * "heated_shutter_temperatures"
  77. * "timestamp"
  78. * "accumulated_frames_list"
  79. * "gain_cube_filename"
  80. * "offset_cube_filename"
  81. * "cropping_xy"
  82. * "filter_to_camera_trans"
  83. * "serial_number"
  84. * ...
  85. '''
  86. ## The "algorithm output" image uses different subscription topics, shown in msg[0],
  87. ## to tell which one is being sent at a given time.
  88. assert isinstance(msg, (list, tuple, ndarray))
  89. topic = msg[0]
  90. if (topic != '') and debug: print('topic = ' + topic)
  91. d = json.loads(msg[1]) ## the "payload" dictionary
  92. ## Set up to grab the system data.
  93. meta = {}
  94. meta['topic'] = topic
  95. if save_header:
  96. import yaml
  97. meta['header'] = yaml.dump(d, indent=3, width=130)
  98. if debug:
  99. import yaml
  100. print(yaml.dump(d, indent=3, width=130))
  101. if ('gci-meta' in d) and ('metadata' in d['gci-meta']):
  102. ## The first three items are always present.
  103. meta['cool_shutter_temperatures'] = float32(d['gci-meta']['metadata']['unheated-temps-list'])
  104. meta['warm_shutter_temperatures'] = float32(d['gci-meta']['metadata']['heated-temps-list'])
  105. meta['accumulated_frames_list'] = d['gci-meta']['metadata']['frames-accum-list']
  106. meta['timestamp'] = d['gci-meta']['metadata']['ts-ms-since-epoch']
  107. Nw = alen(meta['accumulated_frames_list'])
  108. meta['Nw'] = Nw
  109. if ('cube' in d) and ('metadata' in d['cube']):
  110. meta['Nx'] = d['cube']['metadata']['height']
  111. else:
  112. meta['Nx'] = 240
  113. if ('cube-width' in d['gci-meta']['metadata']):
  114. meta['Ny'] = d['gci-meta']['metadata']['cube-width']
  115. if ('unheated-radiances-list' in d['gci-meta']['metadata']):
  116. meta['cool_shutter_radiances'] = float32(d['gci-meta']['metadata']['unheated-radiances-list'])
  117. if ('heated-radiances-list' in d['gci-meta']['metadata']):
  118. meta['warm_shutter_radiances'] = float32(d['gci-meta']['metadata']['heated-radiances-list'])
  119. if ('gain-cube-filename' in d['gci-meta']['metadata']):
  120. meta['gain_cube_filename'] = d['gci-meta']['metadata']['gain-cube-filename']
  121. if ('offset-cube-filename' in d['gci-meta']['metadata']):
  122. meta['offset_cube_filename'] = d['gci-meta']['metadata']['offset-cube-filename']
  123. if ('static-offset-type' in d['gci-meta']['metadata']):
  124. ## This can be either "cool" or "warm".
  125. meta['static_offset_type'] = d['gci-meta']['metadata']['static-offset-type'].lower()
  126. if ('reg-data-str' in d['gci-meta']['metadata']):
  127. ## Had to hard-code the Nx value here. Need a better structure for data to make it
  128. ## flexible for *any* detector array dimensions.
  129. rect_dict = d['gci-meta']['metadata']['reg-data-str']['IR']
  130. meta['rd'] = {}
  131. meta['rd']['cropping_xy'] = daisi_rect_to_xycoords(rect_dict, 240, meta['Nw'])
  132. #if ('scene-reference-rect' in d['gci-meta']['metadata']):
  133. scene_xy = daisi_rect_to_xycoords(d['gci-meta']['metadata']['scene-reference-rect'], meta['Nx'], 0)
  134. meta['rd']['scene_xy'] = scene_xy
  135. if ('aper-mean-temps' in d['gci-meta']['metadata']):
  136. ## Since not all w's are represented in the aperture values, you need to initialize the
  137. ## array and fill in only those values which are defined.
  138. meta['aper_mean_temps'] = float32([None]*Nw)
  139. for key in d['gci-meta']['metadata']['aper-mean-temps']:
  140. w = int(key)
  141. meta['aper_mean_temps'][w] = float32(d['gci-meta']['metadata']['aper-mean-temps'][key])
  142. if ('scene-mean-temps' in d['gci-meta']['metadata']):
  143. meta['sceneref_mean_temps'] = zeros(Nw, 'float32')
  144. for key in d['gci-meta']['metadata']['scene-mean-temps']:
  145. w = int(key)
  146. meta['sceneref_mean_temps'][w] = float32(d['gci-meta']['metadata']['scene-mean-temps'][key])
  147. if ('aper-offset-deltas' in d['gci-meta']['metadata']):
  148. meta['aper_offset_deltas'] = float32([None]*Nw)
  149. for key in d['gci-meta']['metadata']['aper-offset-deltas']:
  150. w = int(key)
  151. meta['aper_offset_deltas'][w] = float32(d['gci-meta']['metadata']['aper-offset-deltas'][key])
  152. if ('cropped-page-mean-temps' in d['gci-meta']['metadata']):
  153. meta['cropped_page_mean_temps'] = float32(d['gci-meta']['metadata']['cropped-page-mean-temps'])
  154. if ('spec-radiance-means' in d['gci-meta']['metadata']):
  155. meta['spec_radiance_means'] = float32(d['gci-meta']['metadata']['spec-radiance-means'])
  156. meta['Nc'] = alen(meta['spec_radiance_means'])
  157. if ('version-info' in d['gci-meta']['metadata']):
  158. meta['version_info'] = d['gci-meta']['metadata']['version-info']
  159. if ('calibration-state' in d['gci-meta']['metadata']):
  160. ## This string can be {'calibration-2-pt-heating','calibration-2-pt-running',
  161. ## 'calibration-1-pt-running','calibration-idle'}.
  162. meta['calibration_state'] = d['gci-meta']['metadata']['calibration-state'].lower()
  163. if ('pan-tilt-in-motion' in d['gci-meta']['metadata']):
  164. meta['pan_tilt_in_motion'] = d['gci-meta']['metadata']['pan-tilt-in-motion']
  165. if ('pan-tilt-position' in d['gci-meta']['metadata']):
  166. meta['pan_tilt_position'] = float32(d['gci-meta']['metadata']['pan-tilt-position'])
  167. if ('pan-tilt-setpoint' in d['gci-meta']['metadata']):
  168. meta['pan_tilt_setpoint'] = float32(d['gci-meta']['metadata']['pan-tilt-setpoint'])
  169. if ('gain-cube-timestamp' in d['gci-meta']['metadata']):
  170. meta['gain_cube_timestamp'] = d['gci-meta']['metadata']['gain-cube-timestamp']
  171. if ('offset-cube-timestamp' in d['gci-meta']['metadata']):
  172. meta['offset_cube_timestamp'] = d['gci-meta']['metadata']['offset-cube-timestamp']
  173. if ('heated-temp-ts-ms' in d['gci-meta']['metadata']):
  174. meta['heated_temp_timestamp'] = d['gci-meta']['metadata']['heated-temp-ts-ms']
  175. if ('scene-ref-timestamp' in d['gci-meta']['metadata']):
  176. meta['scene_ref_timestamp'] = d['gci-meta']['metadata']['scene-ref-timestamp']
  177. if ('is-scene-ref' in d['gci-meta']['metadata']):
  178. meta['is_scene_ref'] = d['gci-meta']['metadata']['is-scene-ref']
  179. if ('num-pixels-masked-by-ir-mc' in d['gci-meta']['metadata']):
  180. meta['npix_masked_ir_mc'] = d['gci-meta']['metadata']['num-pixels-masked-by-ir-mc']
  181. if ('num-pixels-masked-by-ir-steam' in d['gci-meta']['metadata']):
  182. meta['num_pixels_masked_by_ir_steam'] = d['gci-meta']['metadata']['num-pixels-masked-by-ir-steam']
  183. if ('num-pixels-masked-by-parallax' in d['gci-meta']['metadata']):
  184. meta['num_pixels_masked_by_parallax'] = d['gci-meta']['metadata']['num-pixels-masked-by-parallax']
  185. if ('num-pixels-masked-by-vis-mc' in d['gci-meta']['metadata']):
  186. meta['num_pixels_masked_by_vis_mc'] = d['gci-meta']['metadata']['num-pixels-masked-by-vis-mc']
  187. if ('num-pixels-masked-by-vis-steam' in d['gci-meta']['metadata']):
  188. meta['num_pixels_masked_by_vis_steam'] = d['gci-meta']['metadata']['num-pixels-masked-by-vis-steam']
  189. ## Look inside the filter map to determine which GCI you're talking to.
  190. if ('filt-chan-map' in d['gci-meta']['metadata']):
  191. if ('propylene0' in d['gci-meta']['metadata']['filt-chan-map']):
  192. filterlist = get_filterlist(d['gci-meta']['metadata']['filt-chan-map'])
  193. meta['filterlist'] = filterlist
  194. else:
  195. meta['filterlist'] = []
  196. for key in d['gci-meta']['metadata']['filt-chan-map']:
  197. w = int(key)
  198. meta['filterlist'].append(d['gci-meta']['metadata']['filt-chan-map'][key]['uniqueID'])
  199. meta['serial_number'] = serial_number_from_filterlist(meta['filterlist'], debug=debug)
  200. if ('shutter-state-info' in d['gci-meta']['metadata']):
  201. sh = d['gci-meta']['metadata']['shutter-state-info']
  202. if 'heater_enabled' in sh: meta['heater_enabled'] = sh['heater_enabled'] ## bool
  203. if 'heater_on_cond' in sh: meta['heater_on_cond'] = sh['heater_on_cond'] ## bool
  204. if 'heater_set_point' in sh: meta['heater_set_point'] = float32(sh['heater_set_point'])
  205. if 'cool_set_open' in sh: meta['cool_shutter_set_open'] = sh['cool_set_open'] ## bool
  206. if 'cool_pos' in sh: meta['cool_shutter_position'] = sh['cool_pos'].lower() ## can be 'open', 'closed', 'transit', 'invalid'
  207. if 'warm_set_open' in sh: meta['warm_shutter_set_open'] = sh['warm_set_open'] ## bool
  208. if 'warm_pos' in sh: meta['warm_shutter_position'] = sh['warm_pos'].lower() ## can be 'open', 'closed', 'transit', 'invalid'
  209. if ('force-temporal-ref-reset' in d['gci-meta']['metadata']):
  210. meta['force_temporal_ref_reset'] = d['gci-meta']['metadata']['force-temporal-ref-reset']
  211. ## Some heuristics to guess which port was the source for the frame.
  212. if ('version-info' in d['gci-meta']['metadata']):
  213. #if ('cube_chan_viewer' in d['gci-meta']['metadata']['version-info']):
  214. if ('detection-mask' in d):
  215. meta['source'] = '9501'
  216. elif ('spectral_cube_gen' in d['gci-meta']['metadata']['version-info']):
  217. meta['source'] = '9005'
  218. elif ('mask_gen' in d['gci-meta']['metadata']['version-info']):
  219. meta['source'] = '9004'
  220. elif ('degC_to_rad' in d['gci-meta']['metadata']['version-info']):
  221. meta['source'] = '9003'
  222. elif ('dynamic_cal' in d['gci-meta']['metadata']['version-info']):
  223. meta['source'] = '9002'
  224. elif ('rotate_vis_cam' in d['gci-meta']['metadata']['version-info']):
  225. meta['source'] = '9001'
  226. else:
  227. meta['source'] = '9000'
  228. elif('gain-cube-filename' in d['gci-meta']['metadata']):
  229. meta['source'] = '9000'
  230. else:
  231. meta['source'] = '6009'
  232. if debug: print('Input GCI message contains a "' + meta['source'] + '" type frame.')
  233. ## Grab the datacube metadata.
  234. if use_ir and ('cube' in d) and ('buffer' in d['cube']) and ('metadata' in d['cube']):
  235. #meta['Nx'] = d['cube']['metadata']['height']
  236. meta['Ny'] = d['cube']['metadata']['width']
  237. meta['Nz'] = d['cube']['metadata']['depth']
  238. meta['dcb'] = decode_datacube(d['cube'], msg)
  239. meta['dcb'+meta['source']] = meta['dcb']
  240. if (topic in ('spectral-cube','absorption-cube','normalized-absorption-cube','snr-cube','stdev-cube')):
  241. meta['Nc'] = meta['Nz']
  242. ## Grab the absorption cube metadata.
  243. if use_ir and ('absorption-cube' in d) and ('buffer' in d['absorption-cube']) and ('metadata' in d['absorption-cube']):
  244. meta['Ny'] = d['absorption-cube']['metadata']['width']
  245. meta['Nz'] = d['absorption-cube']['metadata']['depth']
  246. meta['dcb_absorption'] = decode_datacube(d['absorption-cube'], msg)
  247. #zzz
  248. #pdb.set_trace()
  249. ## Grab the standard-deviation cube metadata.
  250. if use_ir and ('std-dev-cube' in d) and ('buffer' in d['std-dev-cube']) and ('metadata' in d['std-dev-cube']):
  251. meta['Ny'] = d['std-dev-cube']['metadata']['width']
  252. meta['Nz'] = d['std-dev-cube']['metadata']['depth']
  253. meta['dcb_stdev'] = decode_datacube(d['std-dev-cube'], msg)
  254. ## Grab the SNR cube metadata.
  255. if use_ir and ('snr-cube' in d) and ('buffer' in d['snr-cube']) and ('metadata' in d['snr-cube']):
  256. meta['Ny'] = d['snr-cube']['metadata']['width']
  257. meta['Nz'] = d['snr-cube']['metadata']['depth']
  258. meta['dcb_snr'] = decode_datacube(d['snr-cube'], msg)
  259. ## Grab the visible image metadata.
  260. if use_vis and ('image' in d) and ('buffer' in d['image']) and ('metadata' in d['image']):
  261. meta['vis_Nx'] = d['image']['metadata']['height']
  262. meta['vis_Ny'] = d['image']['metadata']['width']
  263. meta['vis'] = decode_image(d['image'], msg)
  264. ## Grab the saturation mask.
  265. if use_masks and ('saturation-mask' in d) and ('buffer' in d['saturation-mask']) and ('metadata' in d['saturation-mask']):
  266. meta['saturation_mask'] = decode_image(d['saturation-mask'], msg) // 255
  267. ## Grab the IR motion mask.
  268. if use_masks and ('ir-motion-canc-mask' in d) and ('buffer' in d['ir-motion-canc-mask']) and ('metadata' in d['ir-motion-canc-mask']):
  269. meta['irmotion_mask'] = decode_image(d['ir-motion-canc-mask'], msg) // 255
  270. ## Grab the parallax mask.
  271. if use_masks and ('parallax-mask' in d) and ('buffer' in d['parallax-mask']) and ('metadata' in d['parallax-mask']):
  272. meta['parallax_mask'] = decode_image(d['parallax-mask'], msg) // 255
  273. ## Grab the VIS motion mask.
  274. if use_masks and ('vis-motion-canc-mask' in d) and ('buffer' in d['vis-motion-canc-mask']) and ('metadata' in d['vis-motion-canc-mask']):
  275. meta['vismotion_mask'] = decode_image(d['vis-motion-canc-mask'], msg) // 255
  276. ## Grab the detection mask.
  277. if use_masks and ('detection-mask' in d) and ('buffer' in d['detection-mask']) and ('metadata' in d['detection-mask']):
  278. meta['detmask'] = decode_image(d['detection-mask'], msg) // 255
  279. ## Grab the difference image. Note! When the detection algorithm is a Xcorr type, then the "difference image"
  280. ## is actually a xcorr image.
  281. if ('difference-image' in d) and ('buffer' in d['difference-image']) and ('metadata' in d['difference-image']):
  282. meta['diffimg'] = decode_image(d['difference-image'], msg)
  283. ## Grab the visible *overlay* image.
  284. if use_vis and ('vis-image' in d) and ('buffer' in d['vis-image']) and ('metadata' in d['vis-image']):
  285. meta['overlay'] = decode_image(d['vis-image'], msg)
  286. if debug:
  287. ## The line below cannot be implemented as-is, because the JSON module does not know how
  288. ## to serialize numpy objects.
  289. #print(json.dumps(meta, indent=4, sort_keys=True))
  290. #print(meta)
  291. pass
  292. #dynamic_gains = (meta['sceneref_mean_temps'][9] - meta['aper_mean_temps'][9]) / (meta['sceneref_mean_temps'] - meta['aper_mean_temps'])
  293. #dynamic_offsets = meta['aper_mean_temps'][9] + meta['aper_offset_deltas']
  294. ## Can't use cropped_mean_temps for calculating dynamic offsets of the referenced cameras
  295. ## because these values are *after* the dynamic cal was applied. And if we can't get the
  296. ## offsets, then we might as well skip the gains, because they're just 1.0.
  297. #rd = read_rectanglesfile(meta['serial_number'])
  298. #for key in rd['refcam']:
  299. # dynamic_gains[int(key)] = 1.0
  300. # #dynamic_offsets[int(key)] = meta['cropped_page_mean_temps'][9] - meta['cropped_page_mean_temps'][int(key)]
  301. #print('dynamic offsets = ', dynamic_offsets)
  302. #print('dynamic gains = ', dynamic_gains)
  303. return(meta)
  304. ## =================================================================================================
  305. def decode_datacube(cubedata, msg, debug=False):
  306. datatype = cubedata['type'].lower()
  307. raw_str = msg[cubedata['buffer']['index']]
  308. Nx = cubedata['metadata']['height']
  309. Ny = cubedata['metadata']['width']
  310. Nz = cubedata['metadata']['depth']
  311. if debug: print('%s: Nx, Ny, Nz = %3i %3i %3i' % (cubedata['topic'], Nx, Ny, Nz))
  312. if (datatype == 'ipp16u-cube'):
  313. bytesize = 2
  314. dcb = zeros((Nx,Ny,Nz), 'uint16')
  315. q = 0
  316. for z in arange(Nz):
  317. xvalues = Nx - 1 - arange(Nx)
  318. for x in xvalues:
  319. row = uint16(struct.unpack('<'+str(Ny)+'H',raw_str[q:q+(Ny*bytesize)]))
  320. dcb[x,:,z] = row
  321. q += Ny * bytesize
  322. elif (datatype == 'ipp32f-cube'):
  323. bytesize = 4
  324. dcb = zeros((Nx,Ny,Nz), 'float32')
  325. q = 0
  326. for z in arange(Nz):
  327. xvalues = Nx - 1 - arange(Nx)
  328. for x in xvalues:
  329. row = struct.unpack('<'+str(Ny)+'f',raw_str[q:q+(Ny*bytesize)])
  330. dcb[x,:,z] = row
  331. q += Ny * bytesize
  332. else:
  333. raise NotImplementedError('The data type "' + datatype + '" is not implemented.')
  334. ## If scaling is needed, then apply the gain and offset.
  335. if ('gain' in cubedata['metadata']):
  336. cubedata['dcb_dtype'] = 'float32'
  337. scalegain = float(cubedata['metadata']['gain'])
  338. scaleoffset = float(cubedata['metadata']['offset'])
  339. if (float(cubedata['metadata']['gain']) > 0.0):
  340. dcb = (float32(dcb) / scalegain) + scaleoffset
  341. return(dcb)
  342. ## =================================================================================================
  343. def decode_image(cubedata, msg, debug=False):
  344. raw_str = msg[cubedata['buffer']['index']]
  345. Nx = cubedata['metadata']['height']
  346. Ny = cubedata['metadata']['width']
  347. if (cubedata['metadata']['imageTypeString'] == 'RGB_UINT8'):
  348. bytesize = 1
  349. vis = zeros((Nx,Ny,3), 'uint8')
  350. q = 0
  351. xvalues = Nx - 1 - arange(Nx)
  352. for x in xvalues:
  353. row = struct.unpack('<'+str(Ny*3)+'B', raw_str[q:q+(Ny*3*bytesize)])
  354. vis[x,:,0] = row[0::3]
  355. vis[x,:,1] = row[1::3]
  356. vis[x,:,2] = row[2::3]
  357. q += Ny * 3 * bytesize
  358. elif (cubedata['metadata']['imageTypeString'] == 'MONO_UINT8'):
  359. bytesize = 1
  360. vis = zeros((Nx,Ny), 'uint8')
  361. q = 0
  362. xvalues = Nx - 1 - arange(Nx)
  363. for x in xvalues:
  364. row = struct.unpack('<'+str(Ny)+'B', raw_str[q:q+(Ny*bytesize)])
  365. vis[x,:] = row
  366. q += Ny * bytesize
  367. elif (cubedata['metadata']['imageTypeString'] == 'MONO_FLOAT32'):
  368. bytesize = 4
  369. vis = zeros((Nx,Ny), 'float32')
  370. q = 0
  371. xvalues = Nx - 1 - arange(Nx)
  372. for x in xvalues:
  373. row = struct.unpack('<'+str(Ny)+'f',raw_str[q:q+(Ny*bytesize)])
  374. vis[x,:] = row
  375. q += Ny * bytesize
  376. else:
  377. raise NotImplementedError('That visible image data type ("' + cubedata['metadata']['imageTypeString'] + \
  378. '") is not implemented.')
  379. return(vis)
  380. ## =================================================================================================
  381. def acquire_raw(nframes, port=6009, gcinum=3, username=None, keyfile=None, topic='', verbose=False):
  382. '''
  383. Acquire a raw data sequence from the GCI. This is the lowest-level acquisition function, called
  384. by everything that wants live data from hardware.
  385. Parameters
  386. ----------
  387. nframes : int
  388. The number of frames of data to acquire
  389. port : int
  390. The port you want to pull from (i.e. 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9006, or 9501).
  391. gcinum : int, optional
  392. The GCI serial number.
  393. username : str, optional
  394. The username to use for connecting to the remote host
  395. keyfile : str, optional
  396. The RSA keyfile file to use for connecting to the remote host.
  397. topic : str
  398. The GCI message topic to wait for. This is used primarily when listening to ports \
  399. 9005 (which can have topics "spectral-cube","absorption-cube", or "normalized-absorption-cube") \
  400. and 9501 (which can have a variety of topics, such as "methane_binary", etc --- see the \
  401. detection algorithm config file).
  402. Returns
  403. -------
  404. raw : dict
  405. A dictionary whose keys are "0", "1", etc. labelling the time index of the frames.\
  406. raw['0'] contains the GCI message string that can be used as input to `decode_gci_msg()`.
  407. Example
  408. -------
  409. import gci
  410. ## To acquire data from hardware connected to the local network.
  411. raw = gci.acquire_raw(9002)
  412. ## To acquire data from a remote system.
  413. raw = gci.acquire_raw(9002, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa')
  414. ## Or even using an SSH tunnel to a locally-connected system.
  415. raw = gci.acquire_raw(9002, 3, 'nhagen', '/home/nhagen/.ssh/nhagen_lab_rsa')
  416. ## Or, for acquiring a detection mask from a remote system.
  417. raw = gci.acquire_raw(9501, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa', topic='methane_binary')
  418. '''
  419. assert (nframes == int(nframes)), '"nframes" must be an integer type.'
  420. assert isinstance(port, basestring) or isinstance(port, int), '"units" must be either a string or integer type.'
  421. gcinum = int(gcinum)
  422. assert gcinum in (2,3), 'Only GCI serial numbers 2 and 3 are mapped to known systems.'
  423. ctx = zmq.Context()
  424. subsock = ctx.socket(zmq.SUB)
  425. subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
  426. subsock.setsockopt(zmq.HWM, nframes)
  427. ## If the input uses a units string rather than a port number, then translate to a number.
  428. trans = {'counts':6009, '6009':6009, 'degc':9000, '9000':9000, 'corrected_degc':9002, '9002':9002,
  429. 'corrected_radiance':9003, '9003':9003, 'radiance':9003, 'mask':9004, '9004':9004,
  430. 'spectrum':9005, '9005':9005, '9006':9006, 'detection':9501, '9501':9501}
  431. if isinstance(port, str):
  432. port = trans[port.lower()]
  433. if (port not in (6009,9000,9001,9002,9003,9004,9005,9006,9007,9501)):
  434. raise ValueError('That port number ("' + str(port) + '") is not recognized.')
  435. if (port == 9005):
  436. if (topic == ''): topic = 'spectral-cube'
  437. if (port == 9501):
  438. send_keepalive_command(topic, debug=verbose)
  439. if (username == None) or (keyfile == None):
  440. ## Use the local network to acquire data.
  441. if (port == 6009):
  442. ## Talk to the embedded system.
  443. address = 'tcp://10.1.10.10:'
  444. else:
  445. ## Talk to the server system.
  446. address = 'tcp://10.1.10.11:'
  447. #address = 'tcp://localhost:'
  448. subsock.connect(address + str(port))
  449. raw = {}
  450. for t in arange(nframes):
  451. #raw[str(t)] = subsock.recv_multipart()
  452. poller = zmq.Poller()
  453. poller.register(subsock, zmq.POLLIN)
  454. REQUEST_TIMEOUT = 2500
  455. q = 1 ## counter to see how many requests for messages are made
  456. while True:
  457. sockets = dict(poller.poll(REQUEST_TIMEOUT))
  458. q += 1
  459. if (subsock in sockets) and (sockets[subsock] == zmq.POLLIN):
  460. msg = subsock.recv_multipart()
  461. if (topic != '') and (msg[0] != topic): continue
  462. raw[str(t)] = msg
  463. break
  464. if (q > 5):
  465. subsock.close()
  466. raise ImportError('Failed to receive a datacube message.')
  467. if verbose: print('Frame index = ' + str(t))
  468. subsock.close()
  469. return(raw)
  470. else:
  471. ## Here we try to build an SSH tunnel to a remotely-connected GCI system.
  472. import subprocess
  473. import signal
  474. import time
  475. username = str(username)
  476. keyfile = str(keyfile)
  477. if not os.path.exists(keyfile): raise LookupError, 'Cannot find file "' + keyfile + '"'
  478. try:
  479. if (gcinum == 2): ## Toledo system
  480. if (port == 6009):
  481. connect_str = 'ssh -i %s -p 22001 %s@24.52.111.98 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
  482. address = 'tcp://localhost:6009'
  483. else:
  484. connect_str = 'ssh -i %s -p 22000 %s@24.52.111.98 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
  485. address = 'tcp://localhost:' + str(port)
  486. elif (gcinum == 3): ## lab system
  487. if (port == 6009):
  488. connect_str = 'ssh -i %s -p 22 %s@10.1.10.11 -L %i:10.1.20.10:%i -N' % (keyfile, username, port, port)
  489. address = 'tcp://localhost:6009'
  490. else:
  491. connect_str = 'ssh -i %s -p 22 %s@10.1.10.11 -L %i:127.0.0.1:%i -N' % (keyfile, username, port, port)
  492. address = 'tcp://localhost:' + str(port)
  493. ## Note: SSH creates a shell, and so any commands running in the shell are its children. When you try to
  494. ## kill the shell, its children are not terminated with it, leaving them orphan. One way around this is
  495. ## to create the shell and its children as a process group (via "os.setsid") and send a command to kill
  496. ## all processes in that group. This works on Linux, but not on MS Windows, which doesn't have os.setsid
  497. ## available.
  498. raw = {}
  499. ssh_process = subprocess.Popen(connect_str, shell=True, preexec_fn=os.setsid)
  500. time.sleep(2.0) ## give it some time to connect
  501. ctx = zmq.Context()
  502. subsock = ctx.socket(zmq.SUB)
  503. subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
  504. subsock.setsockopt(zmq.HWM, 1)
  505. subsock.connect(address)
  506. raw = {}
  507. for t in arange(nframes):
  508. poller = zmq.Poller()
  509. poller.register(subsock, zmq.POLLIN)
  510. REQUEST_TIMEOUT = 2500
  511. q = 1 ## counter to see how many requests for messages are made
  512. while True:
  513. sockets = dict(poller.poll(REQUEST_TIMEOUT))
  514. q += 1
  515. if (subsock in sockets) and (sockets[subsock] == zmq.POLLIN):
  516. msg = subsock.recv_multipart()
  517. if (msg[0] != topic): continue
  518. raw[str(t)] = msg
  519. break
  520. if (q > 5):
  521. subsock.close()
  522. raise ImportError('Failed to receive a datacube message with topic="' + topic + '".')
  523. if verbose: print('Frame index = ' + str(t))
  524. subsock.close()
  525. time.sleep(0.1) ## give it some time to close the connection before you terminate the SSH process
  526. ## Send the terminate signal to all the processes in the group. This apparently doesn't work on MS Windows.
  527. ## Note that ssh_process.kill() does *not* work, since that is a child process and not a parent.
  528. os.killpg(ssh_process.pid, signal.SIGTERM)
  529. except (OSError,ImportError), errmsg:
  530. print('Tunnel failed:', errmsg)
  531. time.sleep(0.1) ## give it some time to close the connection before you terminate the SSH process
  532. ## Send the terminate signal to all the processes in the group. This apparently doesn't work on MS Windows.
  533. ## Note that ssh_process.kill() does *not* work, since that is a child process and not a parent.
  534. os.killpg(ssh_process.pid, signal.SIGTERM)
  535. raise Exception(errmsg)
  536. return(raw)
  537. ## =================================================================================================
  538. def acquire_data(port=6009, gcinum=3, username=None, keyfile=None, use_ir=True, use_vis=True, use_masks=True,
  539. topic='', save_header=False, debug=False):
  540. '''
  541. Acquire a *single frame* of data containing the datacube, vis image, and all associated metadata.
  542. Parameters
  543. ----------
  544. port : int
  545. The port to acquire data from. (This can be 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9051).
  546. gcinum : int, optional
  547. The serial number of the GCI to talk to.
  548. username : str, optional
  549. The username to use for connecting to the remote host
  550. keyfile : str, optional
  551. The RSA keyfile file to use for connecting to the remote host
  552. use_ir : bool, optional
  553. Whether to load the IR datacube. (Set to false to improve loading speed.)
  554. use_vis : bool, optional
  555. Whether to load the visible image. (Set to false to improve loading speed.)
  556. use_masks : bool, optional
  557. Whether to load the full set of masks. (Set to false to improve loading speed.)
  558. topic : str
  559. The GCI message topic to wait for. This is used primarily when listening to ports \
  560. 9005 (which can have topics "spectral-cube","absorption-cube", or "normalized-absorption-cube") \
  561. and 9501 (which can have a variety of topics, such as "methane_binary", etc --- see the \
  562. detection algorithm config file).
  563. save_header : bool
  564. Whether to save the text version of the metadata as "header".
  565. Returns
  566. -------
  567. data : dict
  568. A dictionary containing keys 'dcb', 'vis'', and all the other elements returned by\
  569. `decode_gci_msg()`.
  570. Example
  571. -------
  572. import gci
  573. ## To acquire data from hardware connected to the local network.
  574. data = gci.acquire_data(9002)
  575. ## To acquire data from a remote system.
  576. data = gci.acquire_data(9002, 2, 'nhagen', '/home/nhagen/.ssh/nhagen_toledo_rsa')
  577. ## Or even using an SSH tunnel to a locally-connected system.
  578. data = gci.acquire_data(9002, 3, 'nhagen', '/home/nhagen/.ssh/nhagen_lab_rsa')
  579. '''
  580. raw = acquire_raw(1, port=port, gcinum=gcinum, username=username, keyfile=keyfile, topic=topic, verbose=debug)
  581. data = decode_gci_msg(raw['0'], use_ir=use_ir, use_vis=use_vis, use_masks=use_masks, save_header=save_header, debug=debug)
  582. return(data)
  583. ## =================================================================================================
  584. def acquire_dcb(ncubes, port=6009, debug=False):
  585. '''
  586. Acquire a sequence of infrared datacubes (i.e. hypercube if ncubes>1) from the GCI. This throws away
  587. all of the associated metadata and keeps only the final cubes.
  588. Parameters
  589. ----------
  590. ncubes : int
  591. the number of datacubes to acquire
  592. port : int
  593. The port to acquire data from. (This can be 6009, 9000, 9001, 9002, 9003, 9004, 9005, 9051).
  594. Returns
  595. -------
  596. hcb : (list of ndarrays)
  597. If ncubes==1, a single datacube is returned; if ncubes>1, a Nt-long list of them is
  598. returned.
  599. '''
  600. assert (ncubes == int(ncubes)), '"ncubes" must be an integer type.'
  601. raw = acquire_raw(ncubes, port)
  602. gcidata = decode_gci_msg(raw['0'], use_ir=True, use_vis=False, use_masks=False, debug=debug)
  603. if (ncubes == 1):
  604. hcb = gcidata['dcb']
  605. else:
  606. hcb = []
  607. hcb.append(gcidata['dcb'])
  608. for t in arange(1,ncubes):
  609. gcidata = decode_gci_msg(raw[str(t)], use_ir=True, use_vis=False, use_masks=False, debug=debug)
  610. hcb.append(gcidata['dcb'])
  611. return(hcb)
  612. ## =================================================================================================
  613. def acquire_vis(nframes, debug=False):
  614. '''
  615. Acquire a sequence of visible images (i.e. hypercube if nframes>1) from the GCI. This throws away
  616. all of the associated metadata and keeps only the final images.
  617. Parameters
  618. ----------
  619. nframes : int
  620. The number of images to acquire
  621. Returns
  622. -------
  623. hcb : ndarray
  624. * If `nframes==1`, and using a color camera: a (Nx,Ny,3) image is returned
  625. * If `nframes>1`, and using a color camera: a (Nx,Ny,3,Nt) image is returned
  626. * If `nframes==1`, and using a monochrome camera: a (Nx,Ny) image is returned
  627. * If `nframes>1`, and using a monochrome camera: a (Nx,Ny,Nt) image is returned
  628. '''
  629. assert (nframes == int(nframes)), '"nframes" must be an integer type.'
  630. raw = acquire_raw(nframes)
  631. gcidata = decode_gci_msg(raw['0'], use_ir=False, use_vis=True, use_masks=False, debug=debug)
  632. if (nframes == 1):
  633. vcb = gcidata['vis']
  634. else:
  635. if (gcidata['vis'].ndim == 2):
  636. (vis_Nx,vis_Ny) = gcidata['vis'].shape
  637. vcb = zeros((vis_Nx,vis_Ny,nframes), gcidata['vis'].dtype)
  638. for t in arange(1,nframes):
  639. gcidata = decode_gci_msg(raw[str(t)], use_ir=False, use_vis=True, use_masks=False, debug=debug)
  640. vcb[:,:,t] = gcidata['vis']
  641. elif (gcidata['vis'].ndim == 3):
  642. (vis_Nx,vis_Ny,_) = gcidata['vis'].shape
  643. vcb = zeros((vis_Nx,vis_Ny,3,nframes), gcidata['vis'].dtype)
  644. for t in arange(1,nframes):
  645. gcidata = decode_gci_msg(raw[str(t)], use_ir=False, use_vis=True, use_masks=False, debug=debug)
  646. vcb[:,:,:,t] = gcidata['vis']
  647. return(vcb)
  648. ## =================================================================================================
  649. ## Convert all of the GCI files in a folder to a Numpy hypercube, and save as .npz.
  650. def gci_sequence_to_hcb(path, debug=True):
  651. '''
  652. Glob a directory for `*.gci` files, and decode each file into an infrared datacube and (if use_vis==True)
  653. a visible image.
  654. If there is more than one file in the directory, assemble the datacubes and images
  655. into an infrared hypercube and a visible hypercube. Throw away all of the associated metadata.
  656. Parameters
  657. ----------
  658. path : str
  659. The directory where to find all of the *.gci files.
  660. Returns
  661. -------
  662. hcb : (list of ndarrays)
  663. A list of datacubes.
  664. '''
  665. assert isinstance(path, str), 'The input "path" must be a string type.'
  666. if os.path.isfile(path):
  667. f = path
  668. nfiles = 1
  669. elif os.path.isdir(path):
  670. gci_files = sorted(glob.glob(path+'*.gci'))
  671. nfiles = len(gci_files)
  672. f = gci_files[0]
  673. if debug: print('Converting "' + os.path.basename(f) + '" (' + str(1) + ' of ' + str(nfiles) + ') ...')
  674. else:
  675. print('No files found in folder "' + path + '". Exiting ...')
  676. return
  677. ## Open the first file nd grab the dcb and vis image to figure out the data dimensions. This will allow you to create the
  678. ## hcb and vcb objects, which we can later fill in by looping over the remaining files.
  679. print('Decoding image #1 from "' + os.path.basename(f) + '"')
  680. gcidata = decode_gci_msg(f, use_vis=False, use_masks=False, debug=False)
  681. if (nfiles == 1):
  682. return(gcidata['dcb'])
  683. (Nx,Ny,Nw) = gcidata['dcb'].shape
  684. if debug: print('(Nx,Ny,Nw)=', (Nx,Ny,Nw))
  685. hcb = []
  686. hcb.append(gcidata['dcb'])
  687. for t in arange(1,nfiles):
  688. print('Decoding image #' + str(t+1) + ' from "' + os.path.basename(gci_files[t]) + '"')
  689. gcidata = decode_gci_msg(gci_files[t], use_vis=False, use_masks=False, debug=False)
  690. hcb.append(gcidata['dcb'])
  691. return(hcb)
  692. ## =================================================================================================
  693. def crop_dcb(dcb, xycoords):
  694. '''
  695. Crop a datacube using a matrix of cropping coordinates.
  696. Parameters
  697. ----------
  698. dcb : ndarray
  699. The (Nx,Ny,Nw) datacube to be cropped
  700. xycoords : ndarray
  701. The (Nw,4) array of (xlo,xhi,ylo,yhi) cropping coordinates for all Nw cameras
  702. Returns
  703. -------
  704. dcb_cropped : ndarray
  705. The cropped datacube, with x-dimension equal to xycoords[w,1]-xycoords[w,0],\
  706. y-dimension equal to xycoords[w,3]-xycoords[w,2], and w-dimension equal to Nw.
  707. '''
  708. dcb = asarray(dcb)
  709. assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
  710. xycoords = asarray(xycoords)
  711. (_,_,Nw) = dcb.shape
  712. Nx = xycoords[0,1] - xycoords[0,0]
  713. Ny = xycoords[0,3] - xycoords[0,2]
  714. dcb_cropped = zeros((Nx,Ny,Nw), dtype(dcb[0,0,0]))
  715. for w in arange(Nw):
  716. xlo = xycoords[w,0]
  717. xhi = xycoords[w,1]
  718. ylo = xycoords[w,2]
  719. yhi = xycoords[w,3]
  720. dcb_cropped[:,:,w] = dcb[xlo:xhi,ylo:yhi,w]
  721. return(dcb_cropped)
  722. ## =================================================================================================
  723. def crop_hypercube(hcb, xycoords):
  724. '''
  725. Crop a hypercube (datacube sequence) using a matrix of cropping coordinates.
  726. Parameters
  727. ----------
  728. hcb : ndarray
  729. The (Nx,Ny,Nw,Nt) hypercube to be cropped
  730. xycoords : ndarray
  731. The (Nw,4) array of (xlo,xhi,ylo,yhi) cropping coordinates for all Nw cameras
  732. Returns
  733. -------
  734. hcb_cropped : ndarray
  735. The cropped hypercube, with x-dimension equal to `xycoords[w,1]-xycoords[w,0]`, y-dimension \
  736. equal to `xycoords[w,3]-xycoords[w,2]`, w-dimension equal to Nw, and t-dimension equal to \
  737. Nt. Here `w` can be any index value 0 through Nw-1.
  738. '''
  739. hcb = asarray(hcb)
  740. assert (hcb.ndim == 4), 'Input "hcb" must have three dimensions.'
  741. xycoords = asarray(xycoords)
  742. (_,_,Nw,Nt) = hcb.shape
  743. Nx = xycoords[0,1] - xycoords[0,0]
  744. Ny = xycoords[0,3] - xycoords[0,2]
  745. hcb_cropped = zeros((Nx,Ny,Nw,Nt), dtype(hcb[0,0,0,0]))
  746. for t in arange(Nt):
  747. for w in arange(Nw):
  748. xlo = xycoords[w,0]
  749. xhi = xycoords[w,1]
  750. ylo = xycoords[w,2]
  751. yhi = xycoords[w,3]
  752. hcb_cropped[:,:,w] = hcb[xlo:xhi,ylo:yhi,w,t]
  753. return(hcb_cropped)
  754. ## =================================================================================================
  755. ## keyword "plottype" allows options ['image', 'histogram']
  756. ## The "box1", "box2", "box3", and "box4" optional arguments can be used to draw boxes in the datacube slices images.
  757. ## Each argument can be either a 4-element vector (i.e. same box coordinates in each wavelength image) or a Nw*4
  758. ## matrix (giving different box coordinates for each wavelength image).
  759. def show_dcb(dcb, waves=None, figsize=None, title='', plottype='image', showstats=False, \
  760. box1=None, box2=None, box3=None, box4=None, invert_display=False, show=False,
  761. use_colorbars=True, **kwargs):
  762. '''
  763. Display a datacube in a single window, showing each w-plane of the cube as an image.
  764. Parameters
  765. ----------
  766. dcb : ndarray
  767. A (Nx,Ny,Nw) datacube.
  768. waves : ndarray, optional
  769. A vector of Nw values with which to label the datacube images.
  770. figsize : tuple, optional
  771. The size of the plotting window.
  772. title : str, optional
  773. The figure title.
  774. plottype : str, optional ('image','histogram','difference1','difference2')
  775. What type of figure to draw --- the images themselves, their histograms, or their \
  776. difference cubes.
  777. showstats : bool
  778. Whether or now to show the image stats as text overlaid on the image.
  779. box1 : ndarray or list, optional
  780. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box1 is drawn in blue.
  781. box2 : ndarray or list, optional
  782. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box2 is drawn in red.
  783. box3 : ndarray or list, optional
  784. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box3 is drawn in green.
  785. box4 : ndarray or list, optional
  786. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) rectangle drawing coords. Box4 is drawn in yellow.
  787. invert_display : bool, optional
  788. Whether or not to draw the datacube planes with 0 at the bottom (invert_display==True), \
  789. or 0 at the top (invert_display==False).
  790. show : bool, optional
  791. Whether or not to call matplotlib's `show()` function.
  792. use_colorbars : bool, optional
  793. Whether to display colorbars with the datacube images.
  794. **kwargs: any, optional
  795. Ay keyword arguments that can be used by matplotlib's `imshow()` function.
  796. Notes
  797. -----
  798. Also, the inputs to the `box1`, `box2`, `box3`, and `box4` keywords can be 4-element vectors \
  799. rather than (Nw,4) size matrices. This forces the box coordinates to be the same for all \
  800. `w`.
  801. The dark gray shaded regions of the histograms extend out to the 1*sigma value, while the \
  802. light gray shaded regions of the histograms extend out to the 2*sigma value. The central \
  803. white line in the background shows the position of the mean.
  804. Setting `log=True` will allow logarithmic scaling of the histogram plots.
  805. '''
  806. import matplotlib.pyplot as plt
  807. import matplotlib.cm as cm
  808. from matplotlib.ticker import MaxNLocator
  809. import matplotlib.patheffects as PathEffects
  810. #print('PLATFORM = ', platform.system())
  811. #dcb = asarray(dcb)
  812. assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
  813. assert (plottype.lower() in ['image','histogram','difference1','difference2']), 'That plottype value ("' + plottype + '") is not recognized.'
  814. (Nx,Ny,Nw) = dcb.shape
  815. if (waves != None):
  816. waves = asarray(waves)
  817. assert (alen(waves) == Nw), 'The input "waves" should have Nw elements, not ' + str(alen(waves)) + '.'
  818. if (plottype == 'difference1'):
  819. dcb = array(dcb[:,:,1:] - dcb[:,:,:-1])
  820. Nw -= 1
  821. print('DIFFERENCE1')
  822. elif (plottype == 'difference2'):
  823. dcb = array(dcb - dcb[:,:,::-1])
  824. print('DIFFERENCE2')
  825. aspect_ratio = Ny / float(Nx)
  826. if (Nw == 1):
  827. if (figsize == None): figsize=(6*1.65*aspect_ratio,6)
  828. plt.figure(figsize=figsize)
  829. if (plottype.lower() == 'image'):
  830. plt.imshow(dcb[:,:,0], zorder=1, **kwargs)
  831. plt.axis('tight')
  832. if use_colorbars: plt.colorbar()
  833. elif (plottype.lower() == 'histogram'):
  834. ## Note: there is a matplotlib bug for logarithmic plotting when NaNs are present.
  835. (n, bin_edges) = histogram(log10(dcb[:,:,0].flatten()), 40)
  836. meanval = mean(dcb[:,:,0].flatten())
  837. stdval = std(dcb[:,:,0].flatten())
  838. ax = plt.subplot(111)
  839. ax.fill_between([meanval-2.0*stdval, meanval+2.0*stdval, meanval+2.0*stdval, meanval-2.0*stdval, meanval-2.0*stdval], [0, 0, amax(n)*1.1, amax(n)*1.1, 0], 0.5, color='0.75', zorder=1)
  840. ax.fill_between([meanval-stdval, meanval+stdval, meanval+stdval, meanval-stdval, meanval-stdval], [0, 0, amax(n)*1.1, amax(n)*1.1, 0], 0.5, color='0.5', zorder=2)
  841. ax.plot([meanval, meanval], [0, amax(n)*1.1], 'w-', linewidth=1.5, zorder=3)
  842. (counts, edges, patches) = ax.hist(dcb[:,:,0].flatten(), 40, zorder=4, edgecolor='0.5', log=True, **kwargs)
  843. if (meanval > 100):
  844. statstr = 'mean='+('%.0f' % meanval) + '\nstd='+('%.0f' % stdval)
  845. else:
  846. statstr = 'mean='+('%.2f' % meanval) + '\nstd='+('%.2f' % stdval)
  847. ax.text(0.8, 0.8, statstr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), zorder=5)
  848. ax.axis([amin(edges), amax(edges), 0.5, amax(counts)*1.1])
  849. else:
  850. if (figsize == None): figsize=(8*1.65*aspect_ratio,8)
  851. ncols = (ceil(sqrt(Nw)))
  852. nrows = (ceil(Nw / ncols))
  853. plt.figure(figsize=figsize)
  854. dimstr = '(Nx,Ny,Nw)=(' + str(Nx) + ',' + str(Ny) + ',' + str(Nw) + ')'
  855. plt.suptitle(dimstr + ' ' + title)
  856. for w in arange(Nw):
  857. if invert_display:
  858. c = int(w % ncols)
  859. r = int((Nw - w - 1) // ncols)
  860. else:
  861. c = int(w % ncols)
  862. r = int(w // ncols)
  863. if (plottype == 'image') or (plottype == 'histogram'):
  864. titlestr = 'w='+str(w) if (waves == None) else 'w='+str(waves[w])
  865. elif (plottype == 'difference1'):
  866. titlestr = '(w=' + str(w+1) + ') - (w=' + str(w) + ') difference'
  867. elif (plottype == 'difference2'):
  868. titlestr = '(w=' + str(Nw-1-w) + ') - (w=' + str(w) + ') difference'
  869. ## The "sharex=ax0" and "sharey=ax0" business is needed in order for zooming of one of the
  870. ## subplots is to propagate into all of the subplots. Don't turn this on for the histogram
  871. ## plot type, since we want to allow the axes to be different for each plot in that case.
  872. if (w == 0):
  873. ax0 = plt.subplot2grid((int(nrows), int(ncols)), (r,c))
  874. elif (w != 0) and (plottype.lower() == 'histogram'):
  875. ax = plt.subplot2grid((int(nrows), int(ncols)), (r,c))
  876. else:
  877. ax = plt.subplot2grid((int(nrows), int(ncols)), (r,c), sharex=ax0, sharey=ax0)
  878. if (plottype.lower() in ('image','difference1','difference2')):
  879. if (w == 0):
  880. plt.imshow(dcb[:,:,w], zorder=1, **kwargs)
  881. else:
  882. plt.imshow(dcb[:,:,w], zorder=1, **kwargs)
  883. plt.axis('tight')
  884. if use_colorbars:
  885. cb = plt.colorbar()
  886. cb.formatter.set_useOffset(False) ## turn off any offset that it may apply
  887. cb.update_ticks() ## update the new ticks for no-offset
  888. if (w == 0):
  889. ax0.set_yticklabels([])
  890. ax0.set_xticklabels([])
  891. else:
  892. ax.set_yticklabels([])
  893. ax.set_xticklabels([])
  894. plt.gca().axes.get_xaxis().set_visible(False)
  895. plt.gca().axes.get_yaxis().set_visible(False)
  896. if use_colorbars:
  897. cl = plt.getp(cb.ax, 'ymajorticklabels')
  898. plt.setp(cl, fontsize=10)
  899. plt.title(titlestr)
  900. if showstats:
  901. minval = min(dcb[:,:,w].flatten())
  902. meanval = mean(dcb[:,:,w].flatten())
  903. maxval = max(dcb[:,:,w].flatten())
  904. stdval = std(dcb[:,:,w].flatten())
  905. if isnan(meanval):
  906. sigdig = 0
  907. else:
  908. logmean = log10(abs(meanval))
  909. sigdig = 2 if (logmean >= -1) else int(-floor(logmean))
  910. fmt = '%.0f' if (meanval > 100) else '%.' + str(sigdig) + 'f'
  911. statstr = (('min=' + fmt + '\nmean=' + fmt + '\nmax=' + fmt + '\nstd=' + fmt) % (minval, meanval, maxval, stdval))
  912. if (w == 0):
  913. stattext = ax0.text(0.4, 0.4, statstr, horizontalalignment='center', transform=ax0.transAxes, zorder=2, color='k', fontsize=10)
  914. stattext.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='w')])
  915. else:
  916. stattext = ax.text(0.4, 0.4, statstr, horizontalalignment='center', transform=ax.transAxes, zorder=2, color='k', fontsize=10)
  917. stattext.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='w')])
  918. elif (plottype.lower() == 'histogram'):
  919. (counts, edges) = histogram(dcb[:,:,w].flatten(), 40)
  920. meanval = mean(dcb[:,:,w].flatten())
  921. stdval = std(dcb[:,:,w].flatten())
  922. if isnan(meanval):
  923. sigdig = 0
  924. else:
  925. logmean = log10(abs(meanval))
  926. sigdig = 2 if (logmean >= -1) else int(-floor(logmean))+1
  927. ymin = 1 if ('log' in kwargs) else 0
  928. if (w == 0):
  929. ax0.fill([meanval-2.0*stdval, meanval+2.0*stdval, meanval+2.0*stdval, meanval-2.0*stdval, meanval-2.0*stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.75', zorder=1)
  930. ax0.fill([meanval-stdval, meanval+stdval, meanval+stdval, meanval-stdval, meanval-stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.5', zorder=2)
  931. plt.plot([meanval, meanval], [0, amax(counts)*1.1], 'w-', linewidth=1.5, zorder=3)
  932. ax0.hist(dcb[:,:,w].flatten(), edges, zorder=4, edgecolor='0.5', **kwargs)
  933. ax0.tick_params(axis='both', which='major', labelsize=8)
  934. ax0.xaxis.set_major_locator(MaxNLocator(6))
  935. if (meanval > 100):
  936. titlestr += ':\nmean='+('%.0f' % meanval) + '\nstd='+('%.0f' % stdval)
  937. else:
  938. titlestr += ':\nmean='+(('%.' + str(sigdig) + 'f') % meanval) + '\nstd='+(('%.' + str(sigdig) + 'f') % stdval)
  939. ax0.axis([amin(edges), amax(edges), ymin, amax(counts)*1.1])
  940. ax0.text(0.75, 0.6, titlestr, horizontalalignment='center', transform=ax0.transAxes, bbox=dict(facecolor='white', alpha=0.5), fontsize='small', zorder=5)
  941. else:
  942. ax.fill([meanval-2.0*stdval, meanval+2.0*stdval, meanval+2.0*stdval, meanval-2.0*stdval, meanval-2.0*stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.75', zorder=1)
  943. ax.fill([meanval-stdval, meanval+stdval, meanval+stdval, meanval-stdval, meanval-stdval], [ymin, ymin, amax(counts)*1.1, amax(counts)*1.1, ymin], color='0.5', zorder=2)
  944. plt.plot([meanval, meanval], [0, amax(counts)*1.1], 'w-', linewidth=1.5, zorder=3)
  945. ax.hist(dcb[:,:,w].flatten(), edges, zorder=4, edgecolor='0.5', **kwargs)
  946. ax.tick_params(axis='both', which='major', labelsize=8)
  947. ax.xaxis.set_major_locator(MaxNLocator(6))
  948. if (meanval > 100):
  949. titlestr += ':\nmean='+('%.0f' % meanval) + '\nstd='+('%.0f' % stdval)
  950. else:
  951. titlestr += ':\nmean='+(('%.' + str(sigdig) + 'f') % meanval) + '\nstd='+(('%.' + str(sigdig) + 'f') % stdval)
  952. ax.axis([amin(edges), amax(edges), ymin, amax(counts)*1.1])
  953. ax.text(0.75, 0.6, titlestr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), fontsize='small', zorder=5)
  954. if (plottype.lower() == 'image') and (box1 != None):
  955. draw_box(box1, color='b', w=w)
  956. if (plottype.lower() == 'image') and (box2 != None):
  957. draw_box(box2, color='r', w=w)
  958. if (plottype.lower() == 'image') and (box3 != None):
  959. draw_box(box3, color='g', w=w)
  960. if (plottype.lower() == 'image') and (box4 != None):
  961. draw_box(box4, color='y', w=w)
  962. if show: plt.show()
  963. return
  964. ## =================================================================================================
  965. def get_offsets_and_gains(cool_dcb, warm_dcb, cool_temps_degC, warm_temps_degC, units='radiance',
  966. poly_approx=False, debug=False):
  967. '''
  968. Find out the offset and gain values to convert raw detector counts into degC or radiance units.
  969. Parameters
  970. ----------
  971. cool_dcb : ndarray
  972. The (Nx,Ny,Nw) datacube captured when viewing the cool shutter.
  973. warm_dcb : ndarray
  974. The (Nx,Ny,Nw) datacube captured when viewing the warm shutter.
  975. cool_temps_degC : ndarray
  976. Either a scalar value or a Nw-length vector of temperatures of the cool shutter during capture.
  977. warm_temps_degC : ndarray
  978. Either a scalar value or a Nw-length vector of temperatures of the warm shutter during capture.
  979. poly_approx : bool
  980. Whether to use the polynomial approximation to the degC->radiance conversion.
  981. units : str
  982. Whether the input cube is in units of 'radiance' or 'degC'.
  983. Returns
  984. -------
  985. offset_dcb : ndarray
  986. the (Nx,Ny,Nw) datacube of offset values.
  987. gain_dcb : ndarray
  988. the (Nx,Ny,Nw) datacube of gain values.
  989. '''
  990. cool_dcb = asarray(cool_dcb)
  991. assert (cool_dcb.ndim == 3), 'Input "cool_dcb" must have three dimensions.'
  992. warm_dcb = asarray(warm_dcb)
  993. assert (warm_dcb.ndim == 3), 'Input "warm_dcb" must have three dimensions.'
  994. assert (units.lower() in ['radiance','degc']), '"units" input must be either "radiance" or "degC".'
  995. assert all(cool_temps_degC < 150.0) and all(cool_temps_degC > -150.0), 'Cool shutter temperatures fall outside the valid range!'
  996. assert all(warm_temps_degC < 150.0) and all(warm_temps_degC > -150.0), 'Warm shutter temperatures fall outside the valid range!'
  997. (Nx,Ny,Nw) = cool_dcb.shape
  998. assert ((alen(cool_temps_degC) == Nw) or (alen(cool_temps_degC) == 1)), 'The number of cool shutter temperatures must be either 1 or Nw.'
  999. assert ((alen(warm_temps_degC) == Nw) or (alen(warm_temps_degC) == 1)), 'The number of warm shutter temperatures must be either 1 or Nw.'
  1000. ## Verify that the inputs are reasonable.
  1001. if any((cool_temps_degC - warm_temps_degC) > 0.0):
  1002. print("deltaT's = ", cool_temps_degC - warm_temps_degC)
  1003. raise ValueError('get_offsets_and_gains(): There is no positive temperature delta between cool and warm shutters!')
  1004. if all(isnan(cool_dcb)) or all(isnan(warm_dcb)):
  1005. return(cool_dcb*0.0, cool_dcb*0.0)
  1006. if (alen(cool_temps_degC) == 1):
  1007. cool_temps_degC = ones(Nw) * cool_temps_degC
  1008. if (alen(warm_temps_degC) == 1):
  1009. warm_temps_degC = ones(Nw) * warm_temps_degC
  1010. if debug:
  1011. print('cool_temps=', cool_temps_degC)
  1012. print('warm_temps=', warm_temps_degC)
  1013. if (units == 'degC'):
  1014. deltaT = warm_temps_degC - cool_temps_degC
  1015. scale_offsets = cool_temps_degC
  1016. offset_dcb = cool_dcb
  1017. gain_dcb = zeros_like(cool_dcb)
  1018. for w in arange(Nw):
  1019. gain_dcb[:,:,w] = (warm_dcb[:,:,w] - cool_dcb[:,:,w]) / deltaT[w]
  1020. elif (units == 'radiance'):
  1021. filterlist = get_filterlist()
  1022. filter_data = get_filter_spectra(filterlist=filterlist)
  1023. Ecool = zeros(Nw)
  1024. Ewarm = zeros(Nw)
  1025. if poly_approx:
  1026. (LUT, temps) = get_degc_to_atsensor_radiance_lut(filterlist=filterlist, filter_data=filter_data)
  1027. new_temps = temps[1:-1]
  1028. for w in arange(Nw):
  1029. coeffs = get_poly_coeffs(new_temps, LUT[1:-1,w], 5)
  1030. if debug:
  1031. if (w == 0): print('camera coeffs[0] coeffs[1] coeffs[2] coeffs[3] coeffs[4] coeffs[5]')
  1032. print('%2i %15.6e %15.6e %15.6e %15.6e %15.6e %15.6e' % (w, coeffs[0], coeffs[1], coeffs[2], coeffs[3], coeffs[4], coeffs[5]))
  1033. Tcool = cool_temps_degC[w]
  1034. Twarm = warm_temps_degC[w]
  1035. Ecool[w] = coeffs[0] + (coeffs[1] * Tcool) + (coeffs[2] * Tcool**2) + (coeffs[3] * Tcool**3) + (coeffs[4] * Tcool**4) + (coeffs[5] * Tcool**5)
  1036. Ewarm[w] = coeffs[0] + (coeffs[1] * Twarm) + (coeffs[2] * Twarm**2) + (coeffs[3] * Twarm**3) + (coeffs[4] * Twarm**4) + (coeffs[5] * Twarm**5)
  1037. #Ecool[w] = polyeval_Horner(cool_temps_degC[w], coeffs)
  1038. #Ewarm[w] = polyeval_Horner(warm_temps_degC[w], coeffs)
  1039. else:
  1040. sensitivity = filter_data['tamarisk_responsivity'] * filter_data['tamarisk_lens_transmission'] * filter_data['germanium_window_transmission']
  1041. for w in arange(Nw):
  1042. Ecool[w] = degc_to_atsensor_radiance(cool_temps_degC[w], filter_data['waves'], filter_data[filterlist[w]], sensitivity)
  1043. Ewarm[w] = degc_to_atsensor_radiance(warm_temps_degC[w], filter_data['waves'], filter_data[filterlist[w]], sensitivity)
  1044. if debug:
  1045. print('cool_radiances=', Ecool)
  1046. print('warm_radiances=', Ewarm)
  1047. deltaE = Ewarm - Ecool
  1048. scale_offsets = Ecool
  1049. offset_dcb = cool_dcb
  1050. gain_dcb = zeros_like(cool_dcb)
  1051. for w in arange(Nw):
  1052. gain_dcb[:,:,w] = (warm_dcb[:,:,w] - cool_dcb[:,:,w]) / deltaE[w]
  1053. ## Since the gains are used in the divisor when converting to temperatures, make sure that
  1054. ## there are no zeros in the gain estimate.
  1055. if any(logical_not(isfinite(gain_dcb))):
  1056. raise ValueError('get_offsets_and_gains(): Elements in the gain cube are undefined. How did this happen?')
  1057. if any(gain_dcb == 0.0):
  1058. print('SETTING ZERO-VALUED ELEMENTS OF THE GAIN CUBE TO 1 ...')
  1059. gain_dcb[gain_dcb == 0.0] = 1.0
  1060. #if any(gain_dcb < 0.0):
  1061. # print('SETTING NEGATIVE ELEMENTS OF THE GAIN CUBE TO 1 ...')
  1062. # gain_dcb[gain_dcb < 0.0] = 1.0
  1063. return(offset_dcb, gain_dcb, scale_offsets)
  1064. ## =================================================================================================
  1065. def receive_command_replies(ctx, ncameras=12, silent=False):
  1066. '''
  1067. Given a ZMQ context, set up a subscription socket and wait for `ncameras` number of replies.
  1068. Assemble these replies into a list of messages.
  1069. Parameters
  1070. ----------
  1071. ctx : ZMQ context object
  1072. ncameras : int, optional
  1073. The number of replies to listen for
  1074. silent : bool, optional
  1075. Whether or not to print the replies as they come
  1076. Returns
  1077. -------
  1078. allreplies : list
  1079. The list of reply messages
  1080. '''
  1081. ## Socket for receiving command replies.
  1082. subsock = ctx.socket(zmq.SUB)
  1083. subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
  1084. #subsock.connect('tcp://10.1.10.10:6002')
  1085. subsock.connect('tcp://10.1.10.11:9550')
  1086. allreplies = []
  1087. #nreplies = 0
  1088. for n in arange(ncameras):
  1089. reply = subsock.recv_multipart()
  1090. if not silent: print(reply)
  1091. nreplies += 1
  1092. allreplies.append(reply)
  1093. subsock.close()
  1094. return(allreplies)
  1095. ## =================================================================================================
  1096. def get_control_data(debug=False):
  1097. '''
  1098. Open a ZMQ subscription socket and grab data from all four available channels connected
  1099. to the heater/shutter system. Place the (translated) results into a single dictionary.
  1100. Returns
  1101. -------
  1102. newdict : dict
  1103. A dictionary of data labels containing the various shutter/heater values. `newdict` has the
  1104. form {`heater_setpoint`:_, `heater_levels`:_, `heater_control_enabled`:_, `warm_shutter_position`:_,\
  1105. `cool_shutter_position`:_, `warm_shutter_temperatures`:_, `warm_shutter_board_temperatures`:_,\
  1106. `warm_shutter_chassis_temperatures`:_, `cool_shutter_temperatures`:_,\
  1107. `cool_shutter_board_temperatures`:_, `cool_shutter_chassis_temperatures`:_}
  1108. Returns
  1109. -------
  1110. newdict : dict
  1111. A dictionary containing the system information.
  1112. Notes
  1113. -----
  1114. TODO: add an option to the function to call only one of the four channels and not all four, to get
  1115. quicker responses?
  1116. '''
  1117. ctx = zmq.Context()
  1118. reply_dict = {}
  1119. ## Socket for receiving shutter/heater/thermister data. Set up the high water mark for
  1120. ## 25 messages, rather than 4, because sometimes one of the messages is VERY slow in
  1121. ## coming. And before it finally arrives, ZMQ keeps sending the other three messages
  1122. ## over and over. This mainly happens when one or more cameras is misbehaving, slowing
  1123. ## down the whole network.
  1124. subsock = ctx.socket(zmq.SUB)
  1125. subsock.setsockopt(zmq.HWM, 10)
  1126. subsock.setsockopt_string(zmq.SUBSCRIBE, u'') ## subscribe to all five channels, and wait for replies from each one.
  1127. subsock.connect('tcp://10.1.10.10:6255')
  1128. for i in arange(20):
  1129. reply = subsock.recv_multipart()
  1130. if (reply[0] in reply_dict): continue
  1131. reply_dict[reply[0]] = json.loads(reply[1])
  1132. if ('heated_heater' in reply_dict) and ('heated_shutter' in reply_dict) and \
  1133. ('unheated_shutter' in reply_dict) and ('heated_temperatures' in reply_dict) and \
  1134. ('unheated_temperatures' in reply_dict):
  1135. break
  1136. subsock.close()
  1137. if debug:
  1138. #print(reply_dict)
  1139. import yaml
  1140. #d = json.dumps(reply_dict) ## the "payload" dictionary
  1141. print(yaml.dump(reply_dict, indent=3, width=140))
  1142. ## The entries commented out here are ones I doubt we will have use for. We can turn them
  1143. ## on any time they may be useful...
  1144. newdict = {}
  1145. newdict['heater_setpoint'] = reply_dict['heated_heater']['heater_set_point']['value']
  1146. newdict['heater_setpoint_changed'] = reply_dict['heated_heater']['heater_set_point']['changed']
  1147. newdict['heater_levels'] = reply_dict['heated_heater']['heater_levels']
  1148. newdict['heater_control_enabled'] = reply_dict['heated_heater']['heater_control']['enabled']
  1149. newdict['heater_control_changed'] = reply_dict['heated_heater']['heater_control']['changed']
  1150. newdict['warm_shutter_position'] = reply_dict['heated_shutter']['shutter_position']
  1151. newdict['warm_shutter_command'] = 'open' if reply_dict['heated_shutter']['shutter_command']['open'] else 'closed'
  1152. newdict['warm_shutter_command_changed'] = reply_dict['heated_shutter']['shutter_command']['changed']
  1153. newdict['cool_shutter_position'] = reply_dict['unheated_shutter']['shutter_position']
  1154. newdict['cool_shutter_command'] = 'open' if reply_dict['unheated_shutter']['shutter_command']['open'] else 'closed'
  1155. newdict['cool_shutter_command_changed'] = reply_dict['unheated_shutter']['shutter_command']['changed']
  1156. newdict['warm_shutter_temperatures'] = array(reply_dict['heated_temperatures']['shutter_temperatures'])
  1157. newdict['warm_shutter_chassis_temperatures'] = array(reply_dict['heated_temperatures']['chassis_temperatures'])
  1158. newdict['warm_shutter_board_temperatures'] = array(reply_dict['heated_temperatures']['board_temperatures'])
  1159. newdict['cool_shutter_temperatures'] = array(reply_dict['unheated_temperatures']['shutter_temperatures'])
  1160. newdict['cool_shutter_chassis_temperatures'] = array(reply_dict['unheated_temperatures']['chassis_temperatures'])
  1161. newdict['cool_shutter_board_temperatures'] = array(reply_dict['unheated_temperatures']['board_temperatures'])
  1162. return(newdict)
  1163. ## =================================================================================================
  1164. ## shutter_name: can be ['heated_shutter', 'heated', 'warm'] or ['unheated_shutter', 'unheated', 'cool']
  1165. ## position: can be ['in', 'closed', 'down'] or ['out', 'open', 'up']
  1166. def move_shutter(shutter_name, position, debug=False):
  1167. '''
  1168. Move one of the GCI shutters in/out.
  1169. Parameters
  1170. ----------
  1171. shutter_name : str
  1172. {'heated_shutter', 'heated', 'warm'} if the warm shutter,\
  1173. {'unheated_shutter', 'unheated', 'cool'} if the cool shutter
  1174. position : str
  1175. {'in', 'closed', 'close', 'down'} if moving the shutter in, {'out', 'open', 'up'}\
  1176. if moving the shutter out.
  1177. '''
  1178. assert isinstance(shutter_name, str), 'Input "shutter_name" must be a string.'
  1179. assert isinstance(position, str), 'Input "position" must be a string.'
  1180. if (shutter_name.lower() in ['heated_shutter','warm','heated']):
  1181. shutter = 'heated'
  1182. elif (shutter_name.lower() in ['unheated_shutter','cool','unheated']):
  1183. shutter = 'unheated'
  1184. elif (shutter_name.lower() == 'both'):
  1185. shutter = 'both'
  1186. else:
  1187. raise ValueError('move_shutter(): The shutter name "' + shutter_name + '" is not recognized.')
  1188. ## "pos" is used to send a command to move the shutter, while "query_pos" is used to
  1189. ## ask the system where the shutter is now. Note that pos != query_pos for the "CLOSE"
  1190. ## command!
  1191. if (position.lower() in ['in', 'closed', 'close', 'down']):
  1192. pos = 'CLOSE'
  1193. query_pos = 'CLOSED'
  1194. elif (position.lower() in ['out', 'open', 'up']):
  1195. pos = 'OPEN'
  1196. query_pos = pos
  1197. else:
  1198. raise ValueError('move_shutter(): The input position "' + position + '" is not recognized.')
  1199. print('COMMAND: shutter=', shutter, ', pos=', pos)
  1200. ctx = zmq.Context()
  1201. reqrepsock = ctx.socket(zmq.REQ)
  1202. reqrepsock.connect('tcp://10.1.10.10:6256')
  1203. ## First check if the other shutter is already in. If so, move it out before moving this shutter in.
  1204. #data = get_control_data(debug=debug)
  1205. if (shutter == 'heated') or (shutter == 'both'):
  1206. ## It's tempting to move this "reqrepsock" definition above the if-block, but doing so produces
  1207. ## an error when shutter=="both".
  1208. cmd = [b'heated_shutter', b'shutter='+pos]
  1209. reqrepsock.send_multipart(cmd)
  1210. reqrepsock.close()
  1211. ## Next watch the shutter position state and wait until it reaches the desired position.
  1212. subsock = ctx.socket(zmq.SUB)
  1213. subsock.setsockopt(zmq.HWM, 5)
  1214. subsock.setsockopt_string(zmq.SUBSCRIBE, u'heated_shutter')
  1215. subsock.connect('tcp://10.1.10.10:6255')
  1216. for i in arange(5):
  1217. reply = subsock.recv_multipart()
  1218. reply_dict = json.loads(reply[1])
  1219. print(i, reply_dict)
  1220. if (reply_dict['shutter_position'].upper() == query_pos):
  1221. break
  1222. subsock.close()
  1223. if (shutter == 'unheated') or (shutter == 'both'):
  1224. reqrepsock = ctx.socket(zmq.REQ)
  1225. reqrepsock.connect('tcp://10.1.10.10:6256')
  1226. cmd = [b'unheated_shutter', b'shutter='+pos]
  1227. reqrepsock.send_multipart(cmd)
  1228. #reply = reqrepsock.recv_multipart()
  1229. reqrepsock.close()
  1230. ## Next watch the shutter position state and wait until it reaches the desired position.
  1231. subsock = ctx.socket(zmq.SUB)
  1232. subsock.setsockopt(zmq.HWM, 5)
  1233. subsock.setsockopt_string(zmq.SUBSCRIBE, u'unheated_shutter')
  1234. subsock.connect('tcp://10.1.10.10:6255')
  1235. for i in arange(5):
  1236. reply = subsock.recv_multipart()
  1237. reply_dict = json.loads(reply[1])
  1238. print(i, reply_dict)
  1239. if (reply_dict['shutter_position'].upper() == query_pos):
  1240. break
  1241. subsock.close()
  1242. return
  1243. ## =================================================================================================
  1244. def set_heater_setpoint(setpoint):
  1245. '''
  1246. Change the heater setpoint value.
  1247. Parameters
  1248. ----------
  1249. setpoint : float
  1250. The temperature (in degC) to set the heater setpoint to.
  1251. '''
  1252. ctx = zmq.Context()
  1253. reqrepsock = ctx.socket(zmq.REQ)
  1254. reqrepsock.connect('tcp://10.1.10.10:6256')
  1255. cmd = [b'heated_heater', b'setpoint='+str(setpoint)]
  1256. reqrepsock.send_multipart(cmd)
  1257. reply = reqrepsock.recv_multipart()
  1258. #print('reply=', reply)
  1259. reqrepsock.close()
  1260. return
  1261. ## =================================================================================================
  1262. def set_heater_state(on_or_off):
  1263. '''
  1264. Change the heater state on/off.
  1265. Parameters
  1266. ----------
  1267. on_or_off : str
  1268. {'on','off'} turn the heater on or off.
  1269. '''
  1270. state = on_or_off.upper()
  1271. assert (state in ['ON','OFF']), 'The input "on_or_off" value ("' + on_or_off + '") is not recognized.'
  1272. ctx = zmq.Context()
  1273. reqrepsock = ctx.socket(zmq.REQ)
  1274. reqrepsock.connect('tcp://10.1.10.10:6256')
  1275. cmd = [b'heated_heater', b'heater='+state]
  1276. reqrepsock.send_multipart(cmd)
  1277. reply = reqrepsock.recv_multipart()
  1278. print('set_heater_state() reply=', reply)
  1279. reqrepsock.close()
  1280. return
  1281. ## =================================================================================================
  1282. def replace_bad_pixels(badimg, xbad, ybad):
  1283. '''
  1284. Replace any bad pixels in an image with interpolated values from their neighbors.
  1285. Parameters
  1286. ----------
  1287. badimg : ndarray
  1288. The 2D input image (containing bad pixels).
  1289. xbad : ndarray
  1290. The vector of x-locations of bad pixels.
  1291. ybad : ndarray
  1292. The vector of y-locations of bad pixels.
  1293. Returns
  1294. -------
  1295. img : ndarray
  1296. A copy of the input image with bad pixels replaced.
  1297. Notes
  1298. -----
  1299. The tricky part here is all in the geometry. For any given bad pixel, one must search the
  1300. surrounding 8 pixels to make sure that these pixels (which are used to interpolate)
  1301. are not themselves bad pixels. And, if at the edge of an array or a corner, then the
  1302. search space is smaller than 8 pixels. Thus all of the if-elif statements below.
  1303. '''
  1304. badimg = asarray(badimg)
  1305. xbad = asarray(xbad)
  1306. ybad = asarray(ybad)
  1307. img = badimg.copy()
  1308. dims = img.shape
  1309. nbad = alen(xbad)
  1310. badlist = zip(ybad,xbad)
  1311. ## First figure out whether the pixel we are examing is at an edge (top,left,bottom, or right)
  1312. ## or at a corner. The pixels to interpolate across are different in each case.
  1313. for i in arange(nbad):
  1314. xlo = (xbad[i] == 0)
  1315. xhi = (xbad[i] == dims[1]-1)
  1316. ylo = (ybad[i] == 0)
  1317. yhi = (ybad[i] == dims[0]-1)
  1318. xok = (not xlo) and (not xhi)
  1319. yok = (not ylo) and (not yhi)
  1320. if xok and yok:
  1321. xj = [-1,0,1,-1,1,-1,0,1]
  1322. yj = [-1,-1,-1,0,0,1,1,1]
  1323. elif xlo and yok:
  1324. xj = [0,1,1,0,1]
  1325. yj = [-1,-1,0,1,1]
  1326. elif xok and ylo:
  1327. xj = [-1,1,-1,0,1]
  1328. yj = [0,0,1,1,1]
  1329. elif xhi and yok:
  1330. xj = [-1,0,-1,-1,0]
  1331. yj = [-1,-1,0,1,1]
  1332. elif xok and yhi:
  1333. xj = [-1,0,1,-1,1]
  1334. yj = [-1,-1,-1,0,0]
  1335. elif xlo and ylo:
  1336. xj = [1,0,1]
  1337. yj = [0,1,1]
  1338. elif xhi and yhi:
  1339. xj = [-1,0,-1]
  1340. yj = [-1,-1,0]
  1341. elif xlo and yhi:
  1342. xj = [0,1,1]
  1343. yj = [-1,-1,0]
  1344. elif xhi and ylo:
  1345. xj = [-1,-1,0]
  1346. yj = [0,1,1]
  1347. else:
  1348. print('ERROR! How did you wind up here? Maybe a bad definition of the dead pixels?')
  1349. xj = asarray(xj)
  1350. yj = asarray(yj)
  1351. z = zip(ybad[i]+yj,xbad[i]+xj)
  1352. w = 1.0 / sqrt(asarray(xj)**2 + asarray(yj)**2)
  1353. ## Determine whether any of the surrounding pixels (the ones we wish to use for interpolating
  1354. ## to replace the bad pixel) are themselves bad. If bad, then skip them when calculating the
  1355. ## average. Also, give direct neighbors a weight of 1 while diagonal pixels get a weight of
  1356. ## 1/sqrt(2). The final interpolated average is a weighted average between them.
  1357. bad = [item in badlist for item in z]
  1358. newval = 0.0
  1359. weight = 0.0
  1360. for j in range(alen(xj)):
  1361. if not bad[j]:
  1362. newval += (img[z[j]]) * w[j]
  1363. weight += w[j]
  1364. newval = round(newval / weight)
  1365. #print('(ybad,xbad)=(' + str(badlist[i]) + '): bad = ' + str(bad) + ', weight=' + str(weight) + ', newval=' + str(newval))
  1366. img[badlist[i]] = newval
  1367. return(img)
  1368. ## =================================================================================================
  1369. def degc_to_atsensor_radiance(T_celsius, waves_um, filter_spectrum, sensitivity, emissivity=1.0):
  1370. '''
  1371. Convert from degC temperature units to at-sensor radiance units, using the Planck blackbody
  1372. curve, the emissivity of the surface, the sensitivity spectrum (responsivity tmies optical
  1373. transmission) of the detector, and the filter transmission spectrum.
  1374. Parameters
  1375. ----------
  1376. T_celsius : float
  1377. The surface temperature in degC.
  1378. waves_um : ndarray
  1379. The vector of wavelengths over which to analyze the spectrum.
  1380. filter_spectrum : ndarray
  1381. The transmission spectrum of the filter. The spectrum must be sampled at the wavelength \
  1382. points given by `waves_um`.
  1383. sensitivity : ndarray
  1384. The sensitivity spectrum of the detector. The spectrum must be sampled at the wavelength \
  1385. points given by `waves_um`.
  1386. emissivity : ndarray
  1387. The emissivity spectrum of the surface. The spectrum must be sampled at the wavelength \
  1388. points given by `waves_um`.
  1389. Returns
  1390. -------
  1391. watts_per_cm2_per_sr : float
  1392. The at-sensor radiance integrated across the full spectral range given by `waves_um`.
  1393. '''
  1394. T_celsius = asarray(T_celsius)
  1395. waves_um = asarray(waves_um)
  1396. assert (alen(filter_spectrum) == alen(waves_um)), 'The input "filter_spectrum" must be the same length as "waves_um".'
  1397. assert (amin(filter_spectrum) >= 0.0) and (amax(filter_spectrum) <= 1.0), 'The input "filter_spectrum" must everywhere be in the range 0.0-1.0.'
  1398. assert (alen(sensitivity) == alen(waves_um)), 'The input "sensitivity" must be the same length as "waves_um".'
  1399. assert (amin(sensitivity) >= 0.0) and (amax(sensitivity) <= 1.0), 'The input "sensitivity" spectrum must everywhere be in the range 0.0-1.0.'
  1400. assert (alen(emissivity) in (1, alen(waves_um))), 'The input "emissivity" must be the either length 1 or the same length as "waves_um".'
  1401. assert (amin(emissivity) >= 0.0) and (amax(emissivity) <= 1.0), 'The input emissivity must be in the range 0.0-1.0.'
  1402. planck_spectral_radiance = planck_blackbody(waves_um, T_celsius)
  1403. watts_per_cm2_per_sr = sum(planck_spectral_radiance * filter_spectrum * sensitivity * emissivity * gradient(waves_um))
  1404. return(watts_per_cm2_per_sr)
  1405. ## =================================================================================================
  1406. def get_filter_spectra(waves=None, filterlist=None, path=None, debug=False):
  1407. '''
  1408. Get the transmission spectra corresponding to a list of filters. If the list is not given, then just grab
  1409. all transmission spectra located in the data archive directory.
  1410. The function returns a dictionary whose keys are the generic filter names (i.e. "LP-11450", etc),
  1411. and whose values are the transmission spectra associated with the filter. Also included in the dictionary is
  1412. the "waves" key which gives the wavelengths of all of the associated spectra (equal to the "waves" input
  1413. argument of the function). All of the spectra have been interpolated onto the "waves" sampling grid. Also
  1414. included in the dictionary are the non-filters "transmission" (of the atmosphere), "emission" (of the
  1415. atmosphere), the "optical_transmission", and "responsivity" (of the Tamarisk sensor), since these spectra are also included as `*.dat`
  1416. files in the spectral data directory. If "filterlist" is included as an input argument, then any filter
  1417. uniqueIDs that are *not* the filter list will be ignored and not included in the dictionary returned.
  1418. Parameters
  1419. ----------
  1420. waves : ndarray, optional
  1421. A vector of wavelengths (in microns) at which to sample the spectra.
  1422. filterlist : list, optional
  1423. A list of strings giving the uniqueIDs of the filters of interest. Ignore any \
  1424. filters not in this list.
  1425. path : str
  1426. A directory giving a nonstandard location for getting the spectral data.
  1427. Returns
  1428. -------
  1429. datadict : dict
  1430. A dictionary whose keys are the filter uniqueIDs, and whose values contain the corresponding \
  1431. transmission spectra. Added to the dictionary is an extra `waves` key giving the wavelength \
  1432. sampling of all spectra included in the dictionary.
  1433. Notes
  1434. -----
  1435. In general, the filter spectra located in the given path are assumed to be encoded in two-column text
  1436. format, with wavelengths in the first column and transmission in the second. (This can be either %
  1437. transmission or absolute transmission --- the code below will convert the former to absolute
  1438. transmission.) The transmission spectra are not assumed to be sampled on a uniform grid, and so the
  1439. code below interpolates the input from the file onto a uniformly-sampled grid.
  1440. In addition, any unphysical values (trans < 0.0 or trans > 1.0) are truncated to the physical limit.
  1441. '''
  1442. from scipy.interpolate import interp1d
  1443. fdir = GCIDIR + 'cal/filter_spectra/' if (path == None) else path
  1444. if debug: print('Loading files from ' + fdir)
  1445. files = glob.glob(os.path.normpath(fdir + '*.dat'))
  1446. nfiles = alen(files)
  1447. if debug: print('Found ' + str(nfiles) + ' files')
  1448. datadict = {}
  1449. if (waves == None):
  1450. #waves = linspace(6.0, 20.0, 500)
  1451. waves = linspace(7.0, 18.0, 500)
  1452. else:
  1453. waves = asarray(waves)
  1454. nwaves = alen(waves)
  1455. datadict['waves'] = waves
  1456. wavemin = amin(waves)
  1457. wavemax = amax(waves)
  1458. datadict['none'] = ones(nwaves)
  1459. ids = []
  1460. labels = []
  1461. filelist = []
  1462. extras = ('tamarisk_responsivity', 'tamarisk_lens_transmission', 'germanium_window_transmission')
  1463. ## Process the filename to extract a string that represents the filter's designed "center wavelength". Use that string
  1464. ## label as a key for each dictionary entry (i.e. for each wavelength/intensity spectrum).
  1465. for i,f in enumerate(files):
  1466. (head,tail) = os.path.split(f)
  1467. uniqueID = tail[:-4]
  1468. if (filterlist != None) and (uniqueID not in filterlist) and (uniqueID not in extras): continue
  1469. uniqueID_elements = uniqueID.split('_')
  1470. filtername = uniqueID_elements[1]
  1471. if (filtername.count('-') > 1):
  1472. idx = filtername.rindex('-')
  1473. filtername = filtername[:idx]
  1474. ## Make a map of uniqueID to generic name.
  1475. if debug: print(i, 'filtername=', filtername, uniqueID)
  1476. ids = append(ids, uniqueID)
  1477. labels = append(labels, filtername)
  1478. filelist = append(filelist, f)
  1479. ## Here (wav,spect) are from the data read from the file, and (wave,spectrum) are the *interpolated*
  1480. ## results we want as output.
  1481. for i,f in enumerate(filelist):
  1482. data = genfromtxt(f)
  1483. (wav,spect) = transpose(data)
  1484. ## Transpose the data if given in descending order.
  1485. if (wav[0] > wav[-1]):
  1486. wav = wav[::-1]
  1487. spect = spect[::-1]
  1488. if (amin(wav) > 1000.0): wav = wav / 1000.0 ## convert nm to um
  1489. if (amin(spect) < 0.0): spect[spect < 0.0] = 0.0 ## fix unphysical negative values
  1490. if (amax(spect) > 2.0): spect = spect / 100.0 ## convert % to fraction transmission
  1491. if (amax(spect) > 1.0): spect[spect > 1.0] = 1.0 ## fix unphysical values
  1492. ## If the raw data does not support the full desired spectral range, then extrapolate the
  1493. ## ends of the spectral to the full spectral range.
  1494. if (amin(wav) > wavemin):
  1495. wav = append(wavemin, wav)
  1496. spect = append(spect[0], spect)
  1497. if (amax(wav) < wavemax):
  1498. wav = append(wav, wavemax)
  1499. spect = append(spect, spect[-1])
  1500. if debug:
  1501. print(i, f)
  1502. print('wavemin=%f, wavemax=%f, amin(wav)=%f, amax(wav)=%f' % (wavemin, wavemax, amin(wav), amax(wav)))
  1503. #zzz
  1504. ## If you want to test changing the amplitude of any given filter, then the easiest way is probably to
  1505. ## use something like the commented lines below:
  1506. #if ('LP11000' in f): spect = 0.5 * spect
  1507. #if ('SP11578' in f): spect = 0.5 * spect
  1508. #if ('LP8305' in f): spect = 0.5 * spect
  1509. func = interp1d(wav, spect)
  1510. spectrum = func(waves)
  1511. datadict[ids[i]] = spectrum
  1512. #datadict[labels[i]] = spectrum
  1513. if debug:
  1514. for key in datadict:
  1515. if (key == 'waves'): continue
  1516. if (alen(datadict[key]) != nwaves): continue
  1517. import matplotlib.pyplot as plt
  1518. plt.figure()
  1519. plt.plot(datadict['waves'], datadict[key])
  1520. plt.title(key)
  1521. plt.show()
  1522. if ('tamarisk_responsivity') in datadict:
  1523. datadict['responsivity'] = datadict['tamarisk_responsivity']
  1524. return(datadict)
  1525. ## =================================================================================================
  1526. def get_filter_edge_wavelength(waves, trans, filtertype='LP'):
  1527. '''
  1528. For a given input pair of wavelengths and transmission spectrum, locate the (interpolated) wavelength
  1529. which we can call the "edge wavelength" --- where the transmission passes through the "halfway point".
  1530. For a perfect filter, the halfway point is at exactly transmission==0.5. For a real filter, we take the
  1531. the halfway point to be 0.5 times the filter's mean transmission along its passband region.
  1532. Parameters
  1533. ----------
  1534. waves : ndarray
  1535. A vector of wavelengths at which the filter transmission spectrum is sampled.
  1536. trans : ndarray
  1537. The filter transmission spectrum.
  1538. filtertype : str
  1539. {'longpass','shortpass','broadband'} the type of filter
  1540. Returns
  1541. -------
  1542. waveval : float
  1543. The wavelength of the filter halfway point.
  1544. spectval : float
  1545. The transmission value of the filter at the halfway point.
  1546. '''
  1547. waves = asarray(waves)
  1548. assert (alen(trans) == alen(waves)), 'The input "trans" must be the same length as "waves".'
  1549. ## Make a first guess for the edge wavelength, then get the mean transmission of the filter
  1550. ## above/below the edge wavelength.
  1551. if (filtertype == 'LP'):
  1552. edge = (trans > 0.5*amax(trans))
  1553. edgewave = waves[edge][0]
  1554. top = (waves > (edgewave+0.5)) * (waves < 15.0)
  1555. mean_trans = mean(trans[top])
  1556. idx = find_nearest_brackets(trans[waves < 15.0], 0.5*mean_trans)
  1557. elif (filtertype == 'SP'):
  1558. edge = (trans > 0.5*amax(trans))
  1559. edgewave = waves[edge][-1]
  1560. bot = (waves < (edgewave-0.5))
  1561. mean_trans = mean(trans[bot])
  1562. idx = find_nearest_brackets(trans, 0.5*mean_trans)
  1563. else:
  1564. return(nan, nan)
  1565. slope = (trans[idx[1]] - trans[idx[0]]) / (waves[idx[1]] - waves[idx[0]])
  1566. spectval = 0.5 * mean_trans
  1567. if (slope == 0.0):
  1568. waveval = waves[idx[1]]
  1569. else:
  1570. waveval = waves[idx[0]] + (spectval - trans[idx[0]]) / slope
  1571. return(waveval, spectval)
  1572. ## =================================================================================================
  1573. def send_command(cmd, arg=None, read_replies=0, delay=0, debug=False):
  1574. '''
  1575. Issue one of the "miscellaneous" command messages to the GCI system. This function only uses
  1576. "publication" sockets, which is why the heater commands do not appear here.
  1577. Parameters
  1578. ----------
  1579. cmd : str
  1580. any one of the following strings:
  1581. * "enable_auto_calibration": turn off the CPU-based autocalibration (not the on-camera autocal).
  1582. * "disable_auto_calibration": turn on the CPU-based autocalibration (not the on-camera autocal).
  1583. * "enable_preheat": turn on the pre-heating leading to an autocal.
  1584. * "disable_preheat": turn off the pre-heating leading to an autocal.
  1585. * "trigger_1pt_calibration": tell the system to perform a 1pt calibration (not on-camera but with the CPU).
  1586. * "trigger_2pt_calibration": tell the system to perform a 1pt calibration (not on-camera but with the CPU).
  1587. * "enable_camera_autocal": tell all cameras to enable their regular (on-camera) 1pt autocal.
  1588. * "disable_camera_autocal": tell all cameras to disable their regular (on-camera) 1pt autocal.
  1589. * "enable_autocal_activity_control": turn on all cameras' control for performing range-change-induced on-camera autocals.
  1590. * "disable_autocal_activity_control": turn off all cameras' control for performing range-change-induced on-camera autocals.
  1591. * "get_system_status'": [?]
  1592. * "query_calibration_pending'": query all cameras to see if they are requesting an autocal.
  1593. * "get_exposure_mode": ask for the VIS camera's exposure mode.
  1594. * "set_exposure_mode": set the VIS camera's exposure mode. The possible values are {'Off', 'Timed',\
  1595. 'TriggerWidth', and 'IOExposureControl'}. If you want to allow manual changes to the exposure time,\
  1596. the mode needs to be 'Timed'.
  1597. * "set_vis_exposure_time": set the visible camera's exposure time in microseconds.
  1598. * "get_vis_exposure_time": get the visible camera's exposure time in microseconds.
  1599. * "get_vis_camera_setup_file": get the XML-formatted file which lists all of the VIS camera setup\
  1600. parameters, their data types, and what options are available for their settings. If "arg" is\
  1601. set to a filename, then the XML data will also be saved to that file.
  1602. * "get_vis_whitebalance_mode": get the VIS camera's white balance mode.
  1603. * "set_vis_whitebalance_mode": set the VIS camera's white balance mode. The possible values are 'Off',\
  1604. 'Once', 'Auto', and 'Manual'.
  1605. arg : any
  1606. Some commands require an input argument (can be any data type).
  1607. read_replies : int
  1608. 0 if you don't want to check or wait for replies from the cameras; set to some number
  1609. 0 < # <= Nw to say how many cameras' replies you wish to read (i.e. if one camera is having
  1610. communication problems, you may want to set "read_replies=(Nw-1)").
  1611. delay : float
  1612. The amount of time in seconds to delay after sending the message before returning from the function.
  1613. '''
  1614. import time
  1615. assert isinstance(cmd, str), 'The input "cmd" must be a string type.'
  1616. ctx = zmq.Context()
  1617. pubsock = ctx.socket(zmq.PUB)
  1618. if (cmd in ['enable_auto_calibration','disable_auto_calibration','enable_preheat','disable_preheat',\
  1619. 'trigger_1pt_calibration','trigger_2pt_calibration']):
  1620. command_type = 'system'
  1621. elif (cmd in ['enable_camera_autocal', 'disable_camera_autocal', 'enable_autocal_activity_control', \
  1622. 'disable_autocal_activity_control', 'field_calibrate', 'get_system_status', 'query_calibration_pending']):
  1623. command_type = 'IR_cameras'
  1624. elif (cmd in ['get_exposure_mode', 'set_exposure_mode', 'set_camera_param', 'get_genicam_xml', 'get_camera_image_settings']):
  1625. command_type = 'VIS_camera'
  1626. else:
  1627. raise ValueError('The command "' + cmd + '" is not recognized.')
  1628. ## Assemble the ZMQ message.
  1629. if (command_type == 'system'):
  1630. pubsock.connect('tcp://10.1.10.10:5001')
  1631. ## The six "calibration" commands do not issue replies, so turn "read_replies" off.
  1632. read_replies = 0
  1633. zmq_msg = [b'calibration-control', b'GENERIC_CMD']
  1634. zmq_msg.append(bytearray(cmd))
  1635. elif (command_type == 'IR_cameras'):
  1636. #pubsock.connect('tcp://10.1.10.10:6001')
  1637. pubsock.connect('tcp://10.1.10.11:9550')
  1638. zmq_msg = [b'IR', b'SERIAL_CMD']
  1639. if (cmd == 'enable_camera_autocal'):
  1640. zmq_msg.append(b'auto_calibration_toggle')
  1641. zmq_msg.append(b'Enable')
  1642. elif (cmd == 'disable_camera_autocal'):
  1643. zmq_msg.append(b'auto_calibration_toggle')
  1644. zmq_msg.append(b'Disable' )
  1645. elif (cmd == 'enable_autocal_activity_control'):
  1646. zmq_msg.append(b'auto_cal_activity_control')
  1647. zmq_msg.append(b'Enable' )
  1648. elif (cmd == 'disable_autocal_activity_control'):
  1649. zmq_msg.append(b'auto_cal_activity_control')
  1650. zmq_msg.append(b'Disable' )
  1651. elif (cmd == 'field_calibrate'):
  1652. zmq_msg.append(b'field_calibrate')
  1653. zmq_msg.append(b'1point_cal_shutter_disabled')
  1654. elif (cmd == 'get_system_status'):
  1655. read_replies = 12
  1656. zmq_msg.append(b'system_status_get')
  1657. elif (cmd == 'query_calibration_pending'):
  1658. read_replies = 12
  1659. zmq_msg.append(b'query_calibration_pending')
  1660. elif (command_type == 'VIS_camera'):
  1661. #pubsock.connect('tcp://10.1.10.10:6001')
  1662. pubsock.connect('tcp://10.1.10.11:9550')
  1663. zmq_msg = [b'VIS_0', b'GENERIC_CMD']
  1664. if (cmd == 'get_exposure_mode'):
  1665. read_replies = 1
  1666. zmq_msg.append(b'get_camera_param')
  1667. zmq_msg.append(b'ExposureMode:enum')
  1668. elif (cmd == 'set_exposure_mode'):
  1669. mode = 'Timed' if (arg == None) else arg
  1670. zmq_msg.append(b'set_camera_param')
  1671. zmq_msg.append(b'ExposureMode:enum:'+mode)
  1672. if (delay < 0.2): delay = 0.2
  1673. elif (cmd == 'get_exposure_time'):
  1674. read_replies = 1
  1675. zmq_msg.append(b'get_camera_param')
  1676. zmq_msg.append(b'ExposureTimeRaw:int')
  1677. elif (cmd == 'set_exposure_time'):
  1678. exposure_time_microsec = 10000 if (arg == None) else int(arg)
  1679. zmq_msg.append(b'set_camera_param')
  1680. zmq_msg.append(b'ExposureTimeRaw:int:'+str(exposure_time_microsec))
  1681. if (delay < 0.2): delay = 0.2
  1682. elif (cmd =='get_vis_camera_setup_file'):
  1683. zmq_msg.append(b'get_genicam_xml')
  1684. elif (cmd == 'get_vis_whitebalance_mode'):
  1685. read_replies = 1
  1686. zmq_msg.append(b'get_camera_param')
  1687. zmq_msg.append(b'WhiteBalanceMode:enum')
  1688. elif (cmd == 'set_vis_whitebalance_mode'):
  1689. mode = 'Off' if (arg == None) else arg
  1690. zmq_msg.append(b'set_camera_param')
  1691. zmq_msg.append(b'WhiteBalanceMode:enum:'+mode)
  1692. if (delay < 0.2): delay = 0.2
  1693. elif (cmd == 'get_camera_image_settings'):
  1694. zmq_msg.append(b'get_camera_param')
  1695. zmq_msg.append(b'ImageSizeControl:enum')
  1696. ## Now go ahead and send the message.
  1697. pubsock.send_multipart(zmq_msg)
  1698. pubsock.close()
  1699. ## After sending the command, now listen for replies.
  1700. if (read_replies > 0):
  1701. if (cmd == 'get_system_status'):
  1702. allreplies = receive_command_replies(ctx, ncameras=read_replies, silent=True)
  1703. all_status = {}
  1704. for reply in allreplies:
  1705. d = {}
  1706. if ('ERROR' in reply) or ('EXTVID' not in reply):
  1707. d['EXTVID'] = 'ERROR'
  1708. d['CAL'] = 'ERROR'
  1709. d['AGC'] = 'ERROR'
  1710. d['SHUTTER'] = 'ERROR'
  1711. d['POL'] = 'ERROR'
  1712. d['Manual Gain'] = 'ERROR'
  1713. d['Manual Level'] = 'ERROR'
  1714. d['Gain Bias'] = 'ERROR'
  1715. d['Level Bias'] = 'ERROR'
  1716. else:
  1717. idx = reply.index('EXTVID')
  1718. d[reply[idx]] = reply[idx+1]
  1719. idx = reply.index('CAL')
  1720. d[reply[idx]] = reply[idx+1]
  1721. idx = reply.index('AGC')
  1722. d[reply[idx]] = reply[idx+1]
  1723. idx = reply.index('SHUTTER')
  1724. d[reply[idx]] = reply[idx+1]
  1725. idx = reply.index('POL')
  1726. d[reply[idx]] = reply[idx+1]
  1727. idx = reply.index('Manual Gain')
  1728. d[reply[idx]] = reply[idx+1]
  1729. idx = reply.index('Manual Level')
  1730. d[reply[idx]] = reply[idx+1]
  1731. idx = reply.index('Gain Bias')
  1732. d[reply[idx]] = reply[idx+1]
  1733. idx = reply.index('Level Bias')
  1734. d[reply[idx]] = reply[idx+1]
  1735. if reply[0].startswith('IR_'):
  1736. cameraname = reply[0]
  1737. elif reply[0].startswith('IR') and reply[3].startswith('IR_'):
  1738. cameraname = reply[3]
  1739. else:
  1740. cameraname = 'UNKNOWN'
  1741. all_status[cameraname] = d
  1742. if debug:
  1743. for k in all_status: print(k + ': ' + str(all_status[k]))
  1744. time.sleep(delay)
  1745. return(all_status)
  1746. elif (cmd == 'query_calibration_pending'):
  1747. allreplies = receive_command_replies(ctx, ncameras=read_replies, silent=True)
  1748. all_status = {}
  1749. for reply in allreplies:
  1750. if reply[0].startswith('IR_'):
  1751. cameraname = reply[0]
  1752. elif reply[0].startswith('IR') and reply[3].startswith('IR_'):
  1753. cameraname = reply[3]
  1754. else:
  1755. cameraname = 'UNKNOWN'
  1756. if ('query_calibration_pending' in reply):
  1757. all_status[cameraname] = reply[4]
  1758. else:
  1759. all_status[cameraname] = 'ERROR'
  1760. time.sleep(delay)
  1761. return(all_status)
  1762. elif (cmd in ['get_exposure_mode', 'get_exposure_time', 'get_vis_whitebalance_mode', 'get_camera_image_settings']):
  1763. reply = receive_command_replies(ctx, ncameras=read_replies, silent=False)
  1764. print(reply)
  1765. if debug:
  1766. for k in all_status: print(k + ': ' + str(all_status[k]))
  1767. time.sleep(delay)
  1768. return(reply[5])
  1769. elif (cmd == 'get_vis_camera_setup_file'):
  1770. subsock = ctx.socket(zmq.SUB)
  1771. subsock.setsockopt_string(zmq.SUBSCRIBE, u'')
  1772. #subsock.connect('tcp://10.1.10.10:6002')
  1773. subsock.connect('tcp://10.1.10.11:9551')
  1774. reply = subsock.recv_multipart()
  1775. xmlmsg = reply[4]
  1776. if (arg != None):
  1777. f = open(arg,'w', encoding='utf-8')
  1778. f.write(xmlmsg)
  1779. f.close()
  1780. subsock.close()
  1781. time.sleep(delay)
  1782. return(xmlmsg)
  1783. else:
  1784. receive_command_replies(ctx, ncameras=read_replies, silent=False)
  1785. time.sleep(delay)
  1786. return
  1787. else:
  1788. ## This section is typically reached when using the "system" commands. After setting one
  1789. ## of these, we should add a delay of at least 0.1 sec.
  1790. if (delay < 0.1): delay = 0.1
  1791. time.sleep(delay)
  1792. return
  1793. ## =======================================================================================
  1794. def show_image_with_projections(img, title='', windowsize='normal', **kwargs):
  1795. '''
  1796. Display the image together with its x- and y-sums (or "marginals") displayed on the side.
  1797. This is very useful for manually detecting the field reference aperture edges.
  1798. Parameters
  1799. ----------
  1800. img : ndarray
  1801. The 2D image to display.
  1802. title : str
  1803. Any title to put above the image
  1804. windowsize : str
  1805. {'small','normal','large','xlarge'}
  1806. **kwargs : any
  1807. Any keyword arguments that can be used by matplotlib's `imshow()` function.
  1808. '''
  1809. import matplotlib.pyplot as plt
  1810. import matplotlib.gridspec as gridspec
  1811. img = asarray(img)
  1812. (Nx,Ny) = img.shape
  1813. if windowsize == 'small':
  1814. plt.figure(figsize=(9,6))
  1815. elif windowsize == 'normal':
  1816. plt.figure(figsize=(12,8))
  1817. elif (windowsize == 'large'):
  1818. plt.figure(figsize=(15,10))
  1819. elif (windowsize == 'xlarge'):
  1820. plt.figure(figsize=(17,12))
  1821. gs = gridspec.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[1,3])
  1822. gs.update(hspace=0.05, wspace=0.05)
  1823. ax_xproj = plt.subplot(gs[0])
  1824. ## Note: for the weird "100" and "50" in the aspect ratio, these are values which approximate the
  1825. ## extra space needed to produce the tick labels and axis labels. These are apparently not taken
  1826. ## into account by the plt.subplot() routine when determining the aspect ratio of the plot.
  1827. aspect_ratio = float(Ny-100) / Nx
  1828. ax_image = plt.subplot(gs[2], aspect=aspect_ratio)
  1829. ## Draw the main image.
  1830. #ax_image.imshow(img, aspect=aspect_ratio, **kwargs)
  1831. plt.imshow(img, aspect=aspect_ratio, **kwargs)
  1832. plt.xlabel('y')
  1833. plt.ylabel('x')
  1834. ax_yproj = plt.subplot(gs[3])
  1835. ## Make ticklabels invisible for the x-projection's x-axis and y-projection's y-axis.
  1836. for tl in ax_xproj.get_xticklabels():
  1837. tl.set_visible(False)
  1838. #for tl in ax_image.get_xticklabels() + ax_image.get_yticklabels():
  1839. # tl.set_visible(False)
  1840. for tl in ax_yproj.get_yticklabels():
  1841. tl.set_visible(False)
  1842. ## Only allow up to four ticks in the y-projection. Otherwise the labels run together.
  1843. #ax_yproj.locator_params(nbins=5)
  1844. ## Set the image display limits to fit exactly.
  1845. ax_image.set_xlim((0,Ny-1))
  1846. ax_image.set_ylim((0,Nx-1))
  1847. xmarginal = sum(img, axis=0)
  1848. ymarginal = sum(img, axis=1)
  1849. ax_xproj.plot(arange(Ny), xmarginal)
  1850. ax_yproj.plot(ymarginal, arange(Nx))
  1851. ax_xproj.set_xlim(ax_image.get_xlim())
  1852. ax_yproj.set_ylim(ax_image.get_ylim())
  1853. if (title != ''): plt.title(title)
  1854. return
  1855. ## =================================================================================================
  1856. def dynamically_correct_dcb(dcb_meas, dcb_cal=None, rect_dict=None, refcam=9, serial_number=None, \
  1857. saturation_ceiling=90.0, delta_output=None, beta_output=None,
  1858. debug=False):
  1859. '''
  1860. Apply the dynamic correction algorithm to the datacube. The input datacube should be in at-sensor
  1861. radiance units.
  1862. Parameters
  1863. ----------
  1864. dcb_meas : ndarray
  1865. The (Nx,Ny,Nw) datacube of a valid scene (in at-sensor radiance units)
  1866. dcb_cal : ndarray, optional
  1867. The (Nx,Ny,Nw) datacube of the calibration scene (different than the measurement scene) \
  1868. in at-sensor radiance units.
  1869. rect_dict : str or dict, optional
  1870. The rectangles dictionary, or the filename of it. This contains the coordinates of the \
  1871. field reference aperture and the scene reference aperture.
  1872. refcam : int
  1873. Wich camera index to use for the reference camera
  1874. silent : bool
  1875. Whether to silently truncate data that extends beyond the interpolation table. If False, \
  1876. this case throws an exception.
  1877. saturation_ceiling : float
  1878. The value above which all the datacube pixels are truncated.
  1879. delta_output: ndarray
  1880. An output array to hold the dynamic cal's offset values.
  1881. beta_output: ndarray
  1882. An output array to hold the dynamic cal's gain values.
  1883. Returns
  1884. -------
  1885. corr_meas_dcb : ndarray
  1886. The corrected datacube (also in degC units).
  1887. Notes
  1888. -----
  1889. We use the lookup table to convert at-sensor radiance back to estimated object temperature by \
  1890. assuming the light is emitted by a blackbody with no path radiance or path absorption. Currently \
  1891. all cameras are assigned to match camera #9 by default; change the `refcam` input argument to \
  1892. change that. For abbreviation, we use `rd` = "rectangles dictionary".
  1893. '''
  1894. dcb_meas = asarray(dcb_meas)
  1895. assert (dcb_meas.ndim == 3), 'Input "dcb_meas" must have three dimensions.'
  1896. if (dcb_cal == None): dcb_cal = dcb_meas
  1897. dcb_cal = asarray(dcb_cal)
  1898. assert (dcb_cal.ndim == 3), 'Input "dcb_cal" must have three dimensions.'
  1899. assert (dcb_meas.shape == dcb_cal.shape), 'Input "dcb_meas" and "dcb_cal" must have the same shape.'
  1900. gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
  1901. (Nx,Ny,Nw) = dcb_meas.shape
  1902. if debug: print('(Nx,Ny,Nw)=', (Nx,Ny,Nw))
  1903. if (rect_dict == None):
  1904. rd = read_rectanglesfile(serial_number=serial_number)
  1905. elif isinstance(rect_dict, str):
  1906. rd = get_calib_dictionary(rect_dict, serial_number=gci_serial_number)
  1907. if ('aperture_rects' in rd): rd = daisi_rect_to_xycoords(rd, Nx)
  1908. elif isinstance(rect_dict, dict):
  1909. rd = rect_dict
  1910. if ('aperture_rects' in rd): rd = daisi_rect_to_xycoords(rd, Nx)
  1911. else:
  1912. raise TypeError('Input "rect_dict" must be either a filename or a dictionary.')
  1913. ## If any of the aperture coordinates lie outside the image, then raise an exception. Most
  1914. ## likely, the rectangles are wrong or a cropped datacube was used as input.
  1915. if any(rd['aperture_xy'] < 0):
  1916. raise ValueError('One or more of the "aperture_xy" coordinates is negative!')
  1917. elif any(rd['aperture_xy'][:,1] > Nx):
  1918. raise ValueError('One or more of the "aperture_xy" x-coordinates is > Nx!')
  1919. elif any(rd['aperture_xy'][:,3] > Ny):
  1920. raise ValueError('One or more of the "aperture_xy" y-coordinates is > Ny!')
  1921. if any(rd['unregistered_scene_xy'] < 0):
  1922. raise ValueError('One or more of the "unregistered_scene_xy" coordinates is negative!')
  1923. elif any(rd['unregistered_scene_xy'][:,1] > Nx):
  1924. raise ValueError('One or more of the "unregistered_scene_xy" x-coordinates is > Nx!')
  1925. elif any(rd['unregistered_scene_xy'][:,3] > Ny):
  1926. raise ValueError('One or more of the "unregistered_scene_xy" y-coordinates is > Ny!')
  1927. ## Apply the saturation_ceiling to all cameras that have reference cameras or are themselves reference
  1928. ## cameras. On gci2, this means cameras 5, 6, and 9.
  1929. refcam_list = rd['refcam'].keys()
  1930. for k in rd['refcam']:
  1931. if rd['refcam'][k] not in refcam_list:
  1932. refcam_list.append(rd['refcam'][k])
  1933. ## Create the saturation mask using numpy's masked arrays.
  1934. dcb_meas = ma.masked_greater_equal(dcb_meas, saturation_ceiling)
  1935. dcb_cal = ma.masked_greater_equal(dcb_cal, saturation_ceiling)
  1936. ## Grab the mean temperatures from each region.
  1937. meas_aper_temps = get_mean_spect(dcb_meas, rd['aperture_xy'])
  1938. ref_aper_temps = get_mean_spect(dcb_cal, rd['aperture_xy'])
  1939. ref_scene_temps = get_mean_spect(dcb_cal, rd['unregistered_scene_xy'])
  1940. meas_whole_temps = get_mean_spect(dcb_meas, rd['cropping_xy'])
  1941. ref_whole_temps = get_mean_spect(dcb_cal, rd['cropping_xy'])
  1942. aper_offset_deltas = ref_aper_temps - ref_aper_temps[9]
  1943. if debug:
  1944. import matplotlib.pyplot as plt
  1945. import matplotlib.cm as cm
  1946. show_dcb(array(dcb_cal), box1=rd['aperture_xy'], box2=rd['cropping_xy'],
  1947. box3=rd['unregistered_scene_xy'], title='1: original calib. dcb (est. obj temp)',
  1948. cmap=cm.jet)
  1949. #plt.show()
  1950. ## Correct the aperture-occluded cameras to have their temperature estimates agree with refcam.
  1951. ## The non-occluded center cameras have to be corrected in a different way --- I've chosen to average the
  1952. ## whole image as a pretend reference aperture temperature. Then they can be matched to a reference
  1953. ## camera that shares their same spectral bandpass.
  1954. refcam_meas_aper_temps = meas_aper_temps[refcam] * ones(Nw)
  1955. meas_aper_temps[isnan(meas_aper_temps)] = ref_whole_temps[isnan(meas_aper_temps)]
  1956. ## In "rd['refcam']", the keys are the center camera numbers, and the values are their
  1957. ## corresponding reference cameras.
  1958. for key in rd['refcam']:
  1959. aper_offset_deltas[key] = 0.0
  1960. refcam_meas_aper_temps[key] = ref_whole_temps[rd['refcam'][key]]
  1961. delta = refcam_meas_aper_temps + aper_offset_deltas
  1962. beta = (ref_scene_temps[refcam] - ref_aper_temps) / (ref_scene_temps - ref_aper_temps)
  1963. ## For the referenced cameras (5 and 6), force the beta to be 1.0.
  1964. for key in rd['refcam']:
  1965. beta[key] = 1.0
  1966. ## Apply the correction with the estimated delta and beta variables.
  1967. corr_meas_dcb = zeros_like(dcb_meas)
  1968. for w in arange(Nw):
  1969. if w in rd['refcam']:
  1970. corr_meas_dcb[:,:,w] = (dcb_meas[:,:,w] - meas_whole_temps[w]) * beta[w] + delta[w]
  1971. else:
  1972. corr_meas_dcb[:,:,w] = (dcb_meas[:,:,w] - meas_aper_temps[w]) * beta[w] + delta[w]
  1973. if debug:
  1974. apert_new_temps = get_mean_spect(corr_meas_dcb, rd['aperture_xy'])
  1975. for key in rd['refcam']:
  1976. meas_aper_temps[key] = meas_whole_temps[key]
  1977. whole_new_temps = get_mean_spect(corr_meas_dcb, rd['cropping_xy'])
  1978. scene_new_temps = (ref_scene_temps - meas_aper_temps) * beta + delta
  1979. print('aper_offset_deltas=', aper_offset_deltas)
  1980. print('aperture_xy=', rd['aperture_xy'])
  1981. print('cropping_xy=', rd['cropping_xy'])
  1982. print('scene_xy=', rd['scene_xy'])
  1983. print('unregistered_scene_xy=', rd['unregistered_scene_xy'])
  1984. print('')
  1985. print('Mean temperatures before/after dynamic correction:')
  1986. print(' ---------- BEFORE --------- ----- CALIB ----- ---------- AFTER ----------')
  1987. print(' w aper scene whole delta beta aper scene whole')
  1988. for w in arange(Nw):
  1989. if w in rd['refcam']:
  1990. print('%2i %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f' %
  1991. (w, nan, ref_scene_temps[w], ref_whole_temps[w], delta[w], beta[w], apert_new_temps[w], scene_new_temps[w], whole_new_temps[w]))
  1992. else:
  1993. print('%2i %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f' %
  1994. (w, meas_aper_temps[w], ref_scene_temps[w], ref_whole_temps[w], delta[w], beta[w], apert_new_temps[w], scene_new_temps[w], whole_new_temps[w]))
  1995. print('')
  1996. show_dcb(corr_meas_dcb, box3=rd['unregistered_scene_xy'], title='dynamically corrected dcb (degC)', showstats=True)
  1997. temp_dcb = crop_dcb(corr_meas_dcb, rd['cropping_xy'])
  1998. show_dcb(temp_dcb, box3=rd['scene_xy'], title='dynamically corrected dcb (degC) -- cropped', showstats=True)
  1999. show_dcb(temp_dcb, plottype='difference1', title='dynamically corrected dcb (degC)', showstats=True)
  2000. plt.show()
  2001. ## If the user called the function asking the delta and beta values, then set them here. These are assigned
  2002. ## in a loop rather than using vectorized assignment because the latter would actually move the pointer to the
  2003. ## object from the one passed in to the local 'delta' here.
  2004. if (delta_output != None):
  2005. for w in arange(Nw):
  2006. delta_output[w] = delta[w]
  2007. if (beta_output != None):
  2008. for w in arange(Nw):
  2009. beta_output[w] = beta[w]
  2010. ## Finally, apply the saturation ceiling by filling in the masked values.
  2011. corr_meas_dcb = array(ma.fix_invalid(corr_meas_dcb, fill_value=saturation_ceiling))
  2012. return(corr_meas_dcb)
  2013. ## =================================================================================================
  2014. def read_gcifile_meta(filename, debug=False, save_header = False):
  2015. '''
  2016. Read in a GCI datafile;s metadata contents.
  2017. Parameters
  2018. ----------
  2019. filename : str
  2020. The name of the `*.gci` file to read
  2021. Returns
  2022. -------
  2023. data : dict
  2024. A dictionary containing all data elements from the decoded gci msg. If present in the gci \
  2025. file, the list of dictionary keys can include "cool_shutter_temperatures", "heated_shutter_temperatures", \
  2026. "timestamp", "accumulated_frames_list", "gain_cube_filename", "offset_cube_filename", \
  2027. "cropping_xy", "filter_to_camera_trans", "serial_number", ....
  2028. '''
  2029. import struct
  2030. assert isinstance(filename, str), 'The input "filename" must be a string type.'
  2031. assert os.path.exists(filename), 'The input filename ("' + filename + '") does not exist.'
  2032. f = open(filename, 'rb')
  2033. raw = f.read()
  2034. f.close()
  2035. total_nbytes = sys.getsizeof(raw)
  2036. if debug: print('total_nbytes=', total_nbytes)
  2037. n = 0
  2038. msg = []
  2039. nparts = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  2040. if debug: print('nparts=', nparts)
  2041. n += 4
  2042. ## The first message part is the string label giving the ZMQ subscription topic.
  2043. strlen = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  2044. if debug: print('strlen=', strlen)
  2045. n += 4
  2046. topic = ''.join(struct.unpack('>'+str(strlen)+'c', raw[n:n+strlen]))
  2047. msg.append(topic)
  2048. if debug: print('subscription topic=', topic)
  2049. n += strlen
  2050. ## Loop through the subsequent parts, defining each part of the msg list.
  2051. msg_nbytes = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  2052. if debug: print('msg_nbytes=', msg_nbytes)
  2053. n += 4
  2054. msg.append(raw[n:n+msg_nbytes])
  2055. meta = decode_gci_msg_meta(msg, debug=debug)
  2056. return(meta)
  2057. ## =================================================================================================
  2058. def read_gcifile(filename, use_ir=True, use_vis=True, use_masks=True, debug=False, return_msg = False, save_header = False):
  2059. '''
  2060. Read in a GCI datafile and convert the contents to Numpy arrays. Note that `*.cub` files
  2061. use the same format as the `*.gci` files, but are just missing some of the metadata.
  2062. Parameters
  2063. ----------
  2064. filename : str
  2065. The name of the `*.gci` file to read.
  2066. use_ir : bool
  2067. Whether to include the IR dcb in the returned dictionary.
  2068. use_vis : bool
  2069. Whether to include the VIS image in the returned dictionary.
  2070. use_masks : bool
  2071. Whether to include the masks in the returned dictionary.
  2072. Returns
  2073. -------
  2074. data : dict
  2075. A dictionary containing all data elements from the decoded gci msg. If present in the gci \
  2076. file, the list of dictionary keys can include "cool_shutter_temperatures", "heated_shutter_temperatures", \
  2077. "timestamp", "accumulated_frames_list", "gain_cube_filename", "offset_cube_filename", \
  2078. "cropping_xy", "filter_to_camera_trans", "serial_number", "dcb", "vis".
  2079. '''
  2080. import struct
  2081. assert isinstance(filename, str), 'The input "filename" must be a string type.'
  2082. assert os.path.exists(filename), 'The input filename ("' + filename + '") does not exist.'
  2083. f = open(filename, 'rb')
  2084. raw = f.read()
  2085. f.close()
  2086. total_nbytes = sys.getsizeof(raw)
  2087. if debug: print('total_nbytes=', total_nbytes)
  2088. n = 0
  2089. msg = []
  2090. nparts = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  2091. if debug: print('nparts=', nparts)
  2092. n += 4
  2093. ## The first message part is the string label giving the ZMQ subscription topic.
  2094. strlen = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  2095. if debug: print('strlen=', strlen)
  2096. n += 4
  2097. topic = ''.join(struct.unpack('>'+str(strlen)+'c', raw[n:n+strlen]))
  2098. msg.append(topic)
  2099. if debug: print('subscription topic=', topic)
  2100. n += strlen
  2101. ## Loop through the subsequent parts, defining each part of the msg list.
  2102. for i in arange(nparts-1):
  2103. msg_nbytes = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  2104. if debug: print('msg_nbytes=', msg_nbytes)
  2105. n += 4
  2106. msg.append(raw[n:n+msg_nbytes])
  2107. n += msg_nbytes
  2108. if return_msg:
  2109. return(msg)
  2110. else:
  2111. gcidata = decode_gci_msg(msg, use_ir=use_ir, use_vis=use_vis, use_masks=use_masks, debug=debug, save_header=save_header)
  2112. return(gcidata)
  2113. ## =================================================================================================
  2114. def dcb_objtemp_to_atsensor_radiance(dcb_temp, LUT=None, LUTtemps=None, poly_approx=False, \
  2115. serial_number=None, debug=False):
  2116. '''
  2117. Convert object temperature to at-sensor radiance values, given the filter set, their
  2118. transmission spectra, and the detector responsivity spectrum.
  2119. Parameters
  2120. ----------
  2121. dcb_temp : ndarray
  2122. The datacube in degC units.
  2123. LUT : ndarray
  2124. A (ntemps,Nw) matrix of lookup table elements to use for interpolation.
  2125. LUTtemps : ndarray
  2126. A (ntemps) long vector of the degC values used to construct the lookup table.
  2127. poly_approx : bool
  2128. Whether to use the polynomial approximation to the LUT.
  2129. serial_number : int
  2130. The serial number of the GCI being analyzed. Not needed if LUT and LUTtemps are inputs.
  2131. Returns
  2132. -------
  2133. dcb_radiance : ndarray
  2134. The datacube converted to at-sensor radiance units.
  2135. '''
  2136. from scipy.interpolate import interp1d
  2137. gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
  2138. if (LUT == None):
  2139. (LUT, LUTtemps) = get_degc_to_atsensor_radiance_lut(poly_approx=poly_approx, serial_number=gci_serial_number)
  2140. (Nx,Ny,Nw) = dcb_temp.shape
  2141. LUT_mintemp = int(amin(LUTtemps))
  2142. LUT_maxtemp = int(amax(LUTtemps))
  2143. dcb_radiance = zeros_like(dcb_temp)
  2144. for w in arange(Nw):
  2145. lut_radiance = LUT[:,w]
  2146. if any(isnan(dcb_temp[:,:,w])):
  2147. raise ValueError('NaN vales in the datacube!')
  2148. if debug:
  2149. print('[after] w=%i: amin(dcb)=%f, amax(dcb)=%f' % (w, amin(dcb_temp[:,:,w]), amax(dcb_temp[:,:,w])))
  2150. dcb_mintemp = amin(dcb_temp[:,:,w])
  2151. dcb_maxtemp = amax(dcb_temp[:,:,w])
  2152. if (dcb_mintemp < LUT_mintemp):
  2153. ## Need to make a 3D mask for indexing even though we're using 2D slices.
  2154. mask = zeros((Nx,Ny,Nw), 'bool')
  2155. mask[:,:,w] = (dcb_temp[:,:,w] < LUT_mintemp)
  2156. dcb_temp[mask] = LUT_mintemp
  2157. print('Truncating datacube values below %f in w=%i ...' % (LUT_mintemp, w))
  2158. #raise ValueError('One or more corrected datacube temperatures are below ' + str(LUT_mintemp) + ' degC!')
  2159. elif (dcb_maxtemp > LUT_maxtemp):
  2160. ## Need to make a 3D mask for indexing even though we're using 2D slices.
  2161. mask = zeros((Nx,Ny,Nw), 'bool')
  2162. mask[:,:,w] = (dcb_temp[:,:,w] > LUT_maxtemp)
  2163. dcb_temp[mask] = LUT_maxtemp
  2164. print('Truncating datacube values above %f in w=%i ...' % (LUT_maxtemp, w))
  2165. #raise ValueError('One or more corrected datacube temperatures are above ' + str(LUT_maxtemp) + ' degC!')
  2166. f = interp1d(LUTtemps, lut_radiance)
  2167. dcb_radiance[:,:,w] = f(dcb_temp[:,:,w])
  2168. if debug:
  2169. show_dcb(dcb_radiance, title='degC -> atsensor radiance converted dcb')
  2170. return(dcb_radiance)
  2171. ## =================================================================================================
  2172. def get_filterlist(d=None, serial_number=None, debug=False):
  2173. '''
  2174. Get the Nw-length list of filter uniqueIDs associated with each of the cameras.
  2175. Parameters
  2176. ----------
  2177. d : dict, optional
  2178. The "filter_to_channel_map" dictionary.
  2179. serial_number : int
  2180. The serial number of the GCI whose filterlist you want to look up.
  2181. Returns
  2182. -------
  2183. filterlist : list
  2184. A list of the filter uniqueIDs.
  2185. '''
  2186. gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
  2187. print('gci_serial_number=', gci_serial_number)
  2188. if (d == None):
  2189. d = get_calib_dictionary('filter_to_channel_map.js', serial_number=gci_serial_number)
  2190. else:
  2191. assert isinstance(d, dict), 'Input "d" must be a dictionary.'
  2192. if (gci_serial_number == 2):
  2193. if ('system_serial_number' in d): del d['system_serial_number']
  2194. if debug:
  2195. print(json.dumps(d, sort_keys=True, indent=4))
  2196. ## First, find out how many cameras there are.
  2197. maxcam = 0
  2198. for key in d:
  2199. camval = int(d[key]['camera'])
  2200. ## Check if `camval` is an integer.
  2201. if (camval == int(camval)): maxcam = max((maxcam, int(camval)))
  2202. ## Now assemble the list of filters installed.
  2203. Nw = maxcam + 1
  2204. #nfilters = len(d)
  2205. filterlist = ['']*Nw
  2206. for key in d:
  2207. for w in arange(Nw):
  2208. if (d[key]['camera'] == str(w)):
  2209. filterlist[w] = d[key]['uniqueID']
  2210. else:
  2211. ## Assemble the list of filters installed on the system.
  2212. Nw = len(d.keys())
  2213. filterlist = ['']*Nw
  2214. for key in d:
  2215. filterlist[int(key)] = d[key]['uniqueID']
  2216. for w in arange(Nw):
  2217. if (d[key] == str(w)):
  2218. filterlist[w] = d[key]['uniqueID']
  2219. return(filterlist)
  2220. ## =================================================================================================
  2221. def acquire_manually_calibrated_hypercube(ncubes, do_registration=True, cropping_xy=None, units='radiance', debug=False):
  2222. '''
  2223. Skip the DAISI calibration stuff and perform all of the calibration steps manually.
  2224. Parameters
  2225. ----------
  2226. ncubes : int
  2227. The number of measurement cubes to acquire.
  2228. do_registration : bool
  2229. Whether or not to output a cropped registered hypercube or a fullsize hypercube.
  2230. cropping_xy : ndarray
  2231. A (Nw,4) size matrix of (xlo,xhi,ylo,yhi) coordinates for registration cropping.
  2232. units : str
  2233. {'degC','radiance'} the output data type. ('counts' is not present in this list because \
  2234. there is no sense in calling a calibration collection routine if you want uncalibrated data.)
  2235. Returns
  2236. -------
  2237. hcb : ndarray
  2238. A (Nx,Ny,Nw,Nt) hypercube.
  2239. Notes
  2240. -----
  2241. If there is something wrong with the DAISI calibration code, then you need to perform all of the calibration
  2242. steps manually. This code performs the pre-measurement calibration setup, acquires ncubes of raw data,
  2243. uses the acquired calibration data to calibrated the raw counts into degC, and then optionally crops the result
  2244. into a fully registered hypercube.
  2245. '''
  2246. import time
  2247. assert (ncubes == int(ncubes)), 'The input "ncubes" must be an integer type.'
  2248. assert (units.lower() in ['radiance','degc']), '"units" input ("' + units + '") must be either "radiance" or "degC".'
  2249. ## This block to set up and acquire the datacube of the unheated shutter.
  2250. if debug: print('Taking the cool shutter measurement ...')
  2251. set_heater_state('on')
  2252. #send_command('disable_camera_autocal')
  2253. #send_command('disable_autocal_activity_control')
  2254. #send_command('disable_auto_calibration')
  2255. move_shutter('warm', 'up')
  2256. move_shutter('cool', 'down')
  2257. send_command('trigger_1pt_calibration')
  2258. time.sleep(1)
  2259. hcb = acquire_dcb(5, 6009)
  2260. dcb_cool = mean(float32(hcb), axis=3)
  2261. cool_temps = get_control_data()['cool_shutter_temperatures']
  2262. if (alen(cool_temps) == 2): cool_temps = mean(cool_temps)
  2263. ## This block to acquire the datacube of the heated shutter.
  2264. if debug: print('Taking the warm shutter measurement ...')
  2265. move_shutter('cool', 'up')
  2266. move_shutter('warm', 'down')
  2267. hcb = acquire_dcb(5, 6009)
  2268. dcb_warm = mean(float32(hcb), axis=3)
  2269. warm_temps = get_control_data()['warm_shutter_temperatures']
  2270. move_shutter('warm', 'up')
  2271. if debug:
  2272. print('Cool shutter temperatures=')
  2273. print(cool_temps)
  2274. print('Warm shutter temperatures=')
  2275. print(warm_temps)
  2276. print('Calculating the offset and gain cubes ...')
  2277. (offset_dcb, gain_dcb) = get_offsets_and_gains(dcb_cool, dcb_warm, cool_temps, warm_temps, units=units)
  2278. ## This block to acquire the measurement datacube.
  2279. if debug: print('Collecting the measurement cube ...')
  2280. hcb = float32(acquire_dcb(ncubes, 6009))
  2281. ## Now that we're done with data acquisition, turn autocal back on.
  2282. send_command('enable_camera_autocal')
  2283. if debug:
  2284. show_dcb(dcb_cool, title='cool cal dcb')
  2285. show_dcb(dcb_warm, title='warm cal dcb')
  2286. show_dcb(offset_dcb, title='offset dcb')
  2287. show_dcb(gain_dcb, title='gain dcb')
  2288. show_dcb(mean(hcb, axis=3), title='meas dcb, counts')
  2289. ## Next use the offset and gain cubes to convert the raw counts to degC units.
  2290. if debug: print('Converting the dcb to ' + units + ' units ...')
  2291. gain_dcb[logical_not(isfinite(gain_dcb))] = mean(gain_dcb[isfinite(gain_dcb)])
  2292. for t in arange(ncubes):
  2293. hcb[:,:,:,t] = ((hcb[:,:,:,t] - offset_dcb) / gain_dcb)
  2294. if debug:
  2295. show_dcb(mean(hcb, axis=3), title='measurement dcb (' + units + ' units)')
  2296. if do_registration:
  2297. if (cropping_xy == None):
  2298. raise ValueError('The manual registration function has been disabled. '
  2299. 'Cannot change registration rectangles.')
  2300. #dcb = mean(hcb, axis=3)
  2301. ### Use the dataset to get registration info.
  2302. #cropping_xy = generate_aois_from_dcb(dcb)
  2303. #if debug: print('Saving the registration rectangles to "cropping_xy.npz" ...')
  2304. #savez('cropping_xy.npz', cropping_xy=cropping_xy)
  2305. dcb_cropped = crop_dcb(dcb, cropping_xy)
  2306. if debug:
  2307. show_dcb(dcb_cropped, title='co-registered dcb')
  2308. return(hcb)
  2309. ## =================================================================================================
  2310. def get_gas_spectra(waves=None, gaslist=None, path=None, skip_nonquant=True, debug=False):
  2311. '''
  2312. Get the dictionary of gas cross-section spectra from the library of JDX files located in given directory.
  2313. Parameters
  2314. ----------
  2315. waves : ndarray
  2316. A vector of wavelengths at which to interpolate the gas spectra.
  2317. gaslist : list
  2318. A list of gas names (matching the JDX filenames) of interest. Ignore any filenames that are \
  2319. not in this list.
  2320. skip_nonquant : bool
  2321. Whether or not to ignore gas spectra that are non-quantitative (i.e. not in absolute \
  2322. cross-section units).
  2323. path : str
  2324. A directory for where to find the gas spectra files (if you want to look in a place other \
  2325. than the standard calibration directory).
  2326. Returns
  2327. -------
  2328. gas_data : dict
  2329. A dictionary whose keys are the filenames of the gases (minus the filename suffix) and whose \
  2330. values are the cross-section spectra in units of cm^2 ppm. An extra key "waves" is also added, and gives \
  2331. the wavelengths at which the spectra have been interpolated.
  2332. '''
  2333. from scipy.interpolate import interp1d
  2334. if (waves == None):
  2335. #waves = linspace(6.0, 20.0, 500)
  2336. waves = linspace(7.0, 18.0, 500)
  2337. #nwaves = alen(waves)
  2338. wavemin = amin(waves)
  2339. wavemax = amax(waves)
  2340. path = GCIDIR+'cal/gas_spectra/' if (path == None) else path
  2341. jcamp_files = glob.glob(os.path.normpath(path+'*.jdx'))
  2342. #ngases = alen(jcamp_files)
  2343. gas_data = {}
  2344. gas_data['waves'] = waves
  2345. gas_data['none'] = ones_like(waves)
  2346. ## Read in the gas spectra
  2347. for i,f in enumerate(jcamp_files):
  2348. gasname = os.path.basename(f)[:-4]
  2349. if (gaslist != None) and (gasname not in gaslist): continue
  2350. if debug: print(gasname)
  2351. jcamp_dict = jcamp_spectrum(f, wavemin-0.25, wavemax+0.25, skip_nonquant=skip_nonquant, debug=debug)
  2352. if not jcamp_dict: continue
  2353. f = interp1d(jcamp_dict['x'], jcamp_dict['xsec'])
  2354. gas_data[jcamp_dict['title']] = f(waves)
  2355. return(gas_data)
  2356. ## =================================================================================================
  2357. def calculate_hmatrix(filterlist, filter_data, return_data_structure=False, debug=False):
  2358. '''
  2359. Calculate the GCI system matrix, for a set of M filters and N spectral channels. The spectral
  2360. channels are defined automatically by the filter edge wavelengths. (If any bandpass filter is
  2361. in the system, it ignores it for the purpose of calculating the spectral channel wavelengths.)
  2362. Parameters
  2363. ----------
  2364. filterlist : list
  2365. A list the filter uniqueIDs in the system, in the order of the cameras they are mounted to.
  2366. filter_data : dict
  2367. A dictionary containing the filter spectra.
  2368. return_data_structure : bool
  2369. Whether to return just the H-matrix itself or to also return some of the associated spectral \
  2370. channel information.
  2371. Returns
  2372. -------
  2373. H : ndarray
  2374. A MxN matrix.
  2375. channel_basisfcns : ndarray
  2376. The nwaves x nchannels matrix of basis functions for discretizing the spectrum.
  2377. edgewave_list : list
  2378. A list of the Nw "edge wavelengths" for each of the Nw filters.
  2379. channel_boundaries : ndarray
  2380. A Nw+1 list of the wavelengths associated with the spectral channel boundaries.
  2381. '''
  2382. assert isinstance(filterlist, (list, tuple)), 'The input "filterlist" must be a list type.'
  2383. assert isinstance(filter_data, dict), 'The input "filter_data" must be a dict type.'
  2384. nfilters = len(filterlist)
  2385. waves = filter_data['waves']
  2386. ## First go through and get a list of the simple filter names (stripped out of the uniqueIDs).
  2387. filternames = []
  2388. for f in filterlist:
  2389. if (f.lower() == 'none'): continue
  2390. pieces = f.split('_')
  2391. filternames.append(pieces[1])
  2392. ## Grab the rectangles dictionary, which contains information about the reference cameras. Make a list
  2393. ## of which cameras have reference cameras.
  2394. gci_serial_number = serial_number_from_filterlist(filterlist)
  2395. rd = read_rectanglesfile(serial_number=gci_serial_number)
  2396. refcam_list = rd['refcam'].keys()
  2397. edgewave_list = {}
  2398. if debug:
  2399. import matplotlib.pyplot as plt
  2400. plt.figure()
  2401. legendstr = []
  2402. for i,f in enumerate(filterlist):
  2403. if i in refcam_list: continue
  2404. if (f.lower() == 'none'): continue
  2405. filter_nameparts = f.split('_')
  2406. filtertype = filter_nameparts[1][0:2] ## gives 'BP', 'LP', or 'SP'
  2407. if (filtertype == 'BP'):
  2408. continue
  2409. elif (filtertype not in ('LP','SP')):
  2410. continue
  2411. (edgewave, edgeval) = get_filter_edge_wavelength(waves, filter_data[f], filtertype=filtertype)
  2412. edgewave_list[f] = edgewave
  2413. if debug:
  2414. print('edgewave_list=', edgewave_list)
  2415. plt.plot(waves, filter_data[f])
  2416. #legendstr.append(f)
  2417. legendstr.append(filter_nameparts[1])
  2418. if debug:
  2419. leg = plt.legend(legendstr, prop={'size':12})
  2420. leg.draggable()
  2421. plt.title('Filter transmission curves')
  2422. plt.xlabel('wavelength (um)')
  2423. dw = mean(gradient(waves))
  2424. nfilters = alen(filterlist)
  2425. ## Define the list of channel boundary wavelengths.
  2426. edgewave_values = sorted(float32(array(edgewave_list.items())[:,1]))
  2427. channel_boundaries = append(append(amin(waves), edgewave_values), amax(waves))
  2428. #zzz
  2429. #import pdb
  2430. #pdb.set_trace()
  2431. ## Sometimes the channel boundaries are too close to define a useful channel (i.e. less than three data
  2432. ## points). If so, delete the unwanted boundary. The "roll" is to add the too-narrow channel to the
  2433. ## next channel to the right and not the next channel to the left. The "append" command is needed
  2434. ## to make up for the fact that the dimensionality of the difference between channel boundary wavelengths
  2435. ## is one less than that of the original vector.
  2436. #okay = ((channel_boundaries[1:] - channel_boundaries[:-1]) > (3.0*dw))
  2437. okay = ((channel_boundaries[1:] - channel_boundaries[:-1]) > 0.2)
  2438. okay = append(okay, True)
  2439. okay = roll(okay, 1)
  2440. channel_boundaries = channel_boundaries[okay]
  2441. nchannels = alen(channel_boundaries) - 1
  2442. if debug: print('channel_boundaries=', channel_boundaries)
  2443. channel_basisfcns = zeros((alen(waves),nchannels))
  2444. ## From the list of channel boundary wavelengths, generate the channel basis functions.
  2445. for n in arange(nchannels):
  2446. channel_basisfcns[:,n] = (waves > channel_boundaries[n]) * (waves <= channel_boundaries[n+1])
  2447. channel_basisfcns[0,0] = 1.0
  2448. H = zeros((nfilters,nchannels))
  2449. sensitivity = filter_data['responsivity'] * filter_data['tamarisk_lens_transmission'] * filter_data['germanium_window_transmission']
  2450. ## Given the channel basis functions, we can now form our integral for building the system matrix.
  2451. for w,f in enumerate(filterlist):
  2452. indicator = 0.0 if (w in refcam_list) else 1.0
  2453. if debug: plt.figure()
  2454. for n in arange(nchannels):
  2455. integral = indicator * sum(filter_data[f] * sensitivity * channel_basisfcns[:,n] * dw)
  2456. #zzz
  2457. #normalizer = sum(channel_basisfcns[:,n] * dw)
  2458. normalizer = 1.0
  2459. if debug: print('w,f,n,integral,normalizer=', (w, f, n, integral, normalizer))
  2460. if debug: plt.plot(waves, channel_basisfcns[:,n])
  2461. H[w,n] = integral / normalizer
  2462. if debug:
  2463. plt.plot(waves, filter_data[f], 'r-', linewidth=2)
  2464. plt.plot(waves, sensitivity, 'k-', linewidth=2)
  2465. plt.title('w=' + str(w) + ': ' + f)
  2466. plt.axis([6.5,18.5,-0.075,1.075])
  2467. plt.xlabel('wavelength (um)')
  2468. plt.ylabel('normalized\n responsivity')
  2469. #plt.show()
  2470. if not return_data_structure:
  2471. return(H)
  2472. else:
  2473. return(H, channel_basisfcns, edgewave_list, channel_boundaries)
  2474. ## =================================================================================================
  2475. ## Return "none", "bandpass', "longpass" or "shortpass" depending on whether the input filter spectrum represents no filterat all,
  2476. ## a bandpass filter, a longpass filter, or a shortpass filter. The input wavelengths ("waves_um") must be
  2477. ## in microns for this to work.
  2478. def get_filter_type(waves_um, filter_spectrum):
  2479. '''
  2480. Determine what type of filter a given transmission spectrum represents.
  2481. Parameters
  2482. ----------
  2483. waves_um : ndarray
  2484. A vector of wavelengths (in microns)
  2485. filter_spectrum : ndarray
  2486. A vector giving a filter transmission spectrum
  2487. Returns
  2488. -------
  2489. type : str
  2490. {'none','bandpass','longpass','shortpass'}
  2491. '''
  2492. waves_um = asarray(waves_um)
  2493. filter_spectrum = asarray(filter_spectrum)
  2494. assert (alen(waves_um) == alen(filter_spectrum)), 'Inputs "waves_um" and "filter_spectrum" must have the same length.'
  2495. #nwaves = alen(waves_um)
  2496. if all(filter_spectrum == filter_spectrum[0]):
  2497. return('none')
  2498. peak_trans = amax(filter_spectrum)
  2499. idx = ravel(where(filter_spectrum > 0.5*peak_trans))
  2500. first_idx = idx[0]
  2501. last_idx = idx[-1]
  2502. #print('peak_trans=%5.2f, first_idx=%i, last_idx=%i' % (peak_trans, first_idx, last_idx))
  2503. if (first_idx == 0) and (waves_um[last_idx] < 12.0):
  2504. filtertype = 'shortpass'
  2505. elif (first_idx > 0) and (waves_um[last_idx] > 12.0):
  2506. filtertype = 'longpass'
  2507. else:
  2508. filtertype = 'broadband'
  2509. return(filtertype)
  2510. ## =================================================================================================
  2511. def planck_blackbody(waves_um, T_celsius=25.0):
  2512. '''
  2513. Calculate the Planck blackbody spectrum.
  2514. Parameters
  2515. ----------
  2516. waves_um : ndarray
  2517. A vector of wavelengths (in microns) over which to sample the spectrum.
  2518. T_celsius : float
  2519. A temperature (in degC) at which to evaluate the blackbody spectrum.
  2520. Returns
  2521. -------
  2522. spectrum : ndarray
  2523. A vector sampled at the input wavelengths.
  2524. '''
  2525. waves_um = asarray(waves_um)
  2526. assert (alen(T_celsius) == 1), 'The input "T_celsius" should be a scalar, but is a length ' + \
  2527. str(alen(T_celsius)) + ' object.'
  2528. waves_m = waves_um * 1.0E-6 ## convert from wavelength in um to meters
  2529. h = 6.63E-34 ## Planck's constant
  2530. c = 3.0E8 ## speed of light
  2531. k = 1.38E-23 ## Boltzmann's constant
  2532. T = T_celsius + 273.15 ## convert from degC to Kelvin
  2533. if (T > 0.1):
  2534. numer = 2.0E-10 * h * c**2 / waves_m**5
  2535. factor2 = h * c / (waves_m * k * T)
  2536. if any(factor2 > 127.0):
  2537. #print('Exponent overflow detected. Planck exponent term = exp(' + str(amax(factor2)) + ')!')
  2538. factor2[factor2 > 127.0] = 127.0
  2539. elif any(factor2 < -127.0):
  2540. #print('Exponent underflow detected. Planck exponent term = exp(' + str(amin(factor2)) + ')!')
  2541. factor2[factor2 < -127.0] = -127.0
  2542. return(numer / (exp(factor2) - 1.0)) ## Units are (W)/(cm^2.sr.um).
  2543. else:
  2544. return(waves_m * 0.0) ## Units are (W)/(cm^2.sr.um).
  2545. ## =================================================================================================
  2546. def get_degc_to_atsensor_radiance_lut(temps=None, filename=None, filterlist=None, filter_data=None, \
  2547. poly_approx=False, serial_number=None, debug=False):
  2548. '''
  2549. Create the lookup table for converting temperature in degC to at-sensor radiance for each of the Nw cameras.
  2550. Parameters
  2551. ----------
  2552. temps : ndarray, optional
  2553. A vector of temperatures at which to evaluate the table.
  2554. filename : str, optional
  2555. The filename at which to save a text version of the LUT.
  2556. filterlist : list, optional
  2557. A list of the filters attached to each of the Nw cameras.
  2558. filter_data : dict, optional
  2559. A dictionary containing the filter spectra.
  2560. poly_approx : bool, optional
  2561. Whether to use a polynomial approximation to the LUT, and not the LUT itself.
  2562. serial_number : int, optional
  2563. The serial number of the GCI being queried.
  2564. Returns
  2565. -------
  2566. LUT : ndarray
  2567. The (ntemps,Nw) matrix of lookup table elements. If `poly_approx == True`, then the \
  2568. lookup table elements returned are generated from the polynomial approximation and not \
  2569. from the LUT directly.
  2570. temps : ndarray
  2571. The (ntemps) vector of degC temperatures used to sample the lookup table.
  2572. '''
  2573. if (filterlist == None): filterlist = get_filterlist(serial_number=serial_number)
  2574. if (filter_data == None): filter_data = get_filter_spectra(filterlist=filterlist)
  2575. assert isinstance(filterlist, (list, tuple)), 'The input "filterlist" must be a list type.'
  2576. assert isinstance(filter_data, dict), 'The input "filter_data" must be a dict type.'
  2577. Nw = len(filterlist)
  2578. if (temps == None):
  2579. temps = linspace(-150.0, 250.0, 46)
  2580. ## Make sure that the temperatures are rounded to single-decimal place for an easy LUT.
  2581. for n in arange(alen(temps)):
  2582. temps[n] = around(temps[n] * 10.0) / 10.0
  2583. ## Add extreme temperatures at the ends of the table. We needs to add *duplicate* entries
  2584. ## on both ends of the table in preparation to enforcing truncation at the table ends
  2585. ## (this helps the Intell IPP library used by DAISI which has annoying behavior outside the
  2586. ## bounds of the table).
  2587. temps = append(temps, 500.0) ## add a really hot temp
  2588. temps = append(temps, 500.1) ## add a really hot temp plus a little
  2589. temps = append(-273.0, temps) ## add almost absolute zero
  2590. temps = append(-273.1, temps) ## add absolute zero
  2591. ntemps = alen(temps)
  2592. LUT = zeros((ntemps,Nw))
  2593. new_temps = temps[1:-1]
  2594. sensitivity = filter_data['responsivity'] * filter_data['tamarisk_lens_transmission'] * filter_data['germanium_window_transmission']
  2595. for w in arange(Nw):
  2596. for n in arange(1,ntemps-1):
  2597. LUT[n,w] = degc_to_atsensor_radiance(temps[n], filter_data['waves'], filter_data[filterlist[w]], sensitivity)
  2598. if poly_approx:
  2599. new_LUT = zeros_like(LUT)
  2600. for w in arange(Nw):
  2601. coeffs = get_poly_coeffs(new_temps, LUT[1:-1,w], 5)
  2602. if debug:
  2603. if (w == 0): print('camera coeffs[0] coeffs[1] coeffs[2] coeffs[3] coeffs[4] coeffs[5]')
  2604. print('%2i %15.6e %15.6e %15.6e %15.6e %15.6e %15.6e' % (w, coeffs[0], coeffs[1], coeffs[2], coeffs[3], coeffs[4], coeffs[5]))
  2605. new_LUT[1:-1,w] = polyeval_Horner(new_temps, coeffs)
  2606. if debug and (w == 0):
  2607. import matplotlib.pyplot as plt
  2608. plt.figure()
  2609. plt.plot(new_temps, LUT[1:-1,0])
  2610. plt.plot(new_temps, new_LUT[1:-1,w])
  2611. plt.legend(('LUT','poly_approx'), loc='best')
  2612. plt.show()
  2613. if debug:
  2614. ## Only show the polynomial -> LUT comparison for w=0. It's too long to show all Nw.
  2615. w = 0
  2616. for n,t in enumerate(temps):
  2617. print('n=%3i, t=%8.3f: LUT[n,w]=%f, polyval=%f' % (n, t, LUT[n,w], new_LUT[n,w]))
  2618. ## Last step in the polynomial approximation is to replace the LUT with the approximate form.
  2619. LUT = new_LUT
  2620. ## Now replace the end temperatures with absurdly high and low values. We need to allow a tiny
  2621. ## difference at the ends of the table to prevent scipy's interp1d from throwing an exception
  2622. ## due to seeing zero slope there.
  2623. LUT[0,:] = -1.0E6
  2624. LUT[-1,:] = 1.0E6
  2625. ## Write the LUT out to a file, if the filename is given.
  2626. if (filename != None):
  2627. f = open(filename, 'w', encoding='utf-8')
  2628. f.write(' '.join(('Temp','E0','E1','E2','E3','E4','E5','E6','E7','E8','E9','E10','E11'))+'\n')
  2629. for n in arange(ntemps):
  2630. f.write('%10.1f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f %16.7f\n' % \
  2631. (temps[n], LUT[n,0], LUT[n,1], LUT[n,2], LUT[n,3], LUT[n,4], LUT[n,5], LUT[n,6], LUT[n,7], LUT[n,8], LUT[n,9], LUT[n,10], LUT[n,11]))
  2632. f.close()
  2633. return(LUT, temps)
  2634. ## =================================================================================================
  2635. def dcb_atsensor_radiance_to_objtemp(dcb_radiance, LUT=None, LUTtemps=None, silent=True, debug=False):
  2636. '''
  2637. Convert a datacube in radiance units to temperature in degC.
  2638. Parameters
  2639. ----------
  2640. dcb_radiance : ndarray
  2641. A (Nx,Ny,Nw) datacube in radiance units.
  2642. LUT : ndarray
  2643. A (ntemps,Nw) matrix of lookup table elements to use for interpolation.
  2644. LUTtemps : ndarray
  2645. A (ntemps) long vector of the degC values used to construct the lookup table.
  2646. silent : bool
  2647. Allow an interpolation exception to be passed silently.
  2648. Returns
  2649. -------
  2650. dcb_objtemp : ndarray
  2651. A (Nx,Ny,Nw) datacube in units of object temperature (degC).
  2652. Notes
  2653. -----
  2654. For the degC->radiance mapping, DAISI uses a polynomial approximation to the actual function.
  2655. For the radiance->degC mapping used here, however, DAISI uses a lookup table --- the same one
  2656. we use here.
  2657. '''
  2658. from scipy.interpolate import interp1d
  2659. dcb_radiance = asarray(dcb_radiance)
  2660. assert (dcb_radiance.ndim == 3), 'Input "dcb_radiance" must have three dimensions.'
  2661. if (LUT == None):
  2662. (LUT, temps) = get_degc_to_atsensor_radiance_lut(debug=debug)
  2663. else:
  2664. temps = asarray(LUT)
  2665. temps = asarray(LUTtemps)
  2666. (Nx,Ny,Nw) = dcb_radiance.shape
  2667. dcb_objtemp = zeros_like(dcb_radiance)
  2668. #import matplotlib.pyplot as plt
  2669. #show_dcb(dcb_radiance)
  2670. #plt.show()
  2671. for w in arange(Nw):
  2672. lut_radiance_values = LUT[:,w]
  2673. dcb_min_radiance = amin(dcb_radiance[:,:,w])
  2674. dcb_max_radiance = amax(dcb_radiance[:,:,w])
  2675. lut_min_radiance = LUT[1,w] ## the ends of the LUT are special - don't truncate to those
  2676. lut_max_radiance = LUT[-1,w] ## the ends of the LUT are special - don't truncate to those
  2677. if (dcb_min_radiance < lut_min_radiance):
  2678. if silent:
  2679. mask = zeros((Nx,Ny,Nw), 'bool')
  2680. mask[:,:,w] = (dcb_radiance[:,:,w] < lut_min_radiance)
  2681. dcb_radiance[mask] = lut_min_radiance
  2682. print('Truncating datacube values below %f in w=%i ...' % (lut_min_radiance, w))
  2683. else:
  2684. raise ValueError('The min datacube value (' + str(dcb_min_radiance) + ') is below the lower limit of the LUT.')
  2685. if (dcb_max_radiance > lut_max_radiance):
  2686. if silent:
  2687. mask = zeros((Nx,Ny,Nw), 'bool')
  2688. mask[:,:,w] = (dcb_radiance[:,:,w] > lut_max_radiance)
  2689. dcb_radiance[mask] = lut_max_radiance
  2690. print('Truncating datacube values above %f in w=%i ...' % (lut_max_radiance, w))
  2691. else:
  2692. raise ValueError('The max datacube value (' + str(dcb_max_radiance) + ') is above the upper limit of the LUT.')
  2693. #f = interp1d(lut_radiance_values, temps, bounds_error=False, fill_value=0.0)
  2694. f = interp1d(lut_radiance_values, temps)
  2695. dcb_objtemp[:,:,w] = f(dcb_radiance[:,:,w])
  2696. return(dcb_objtemp)
  2697. ## =================================================================================================
  2698. def read_rectanglesfile(serial_number=None, debug=False):
  2699. '''
  2700. Read the rectangles file "dynamic_cal_config.js" (from the calibration data archive) into a Python dictionary.
  2701. Returns
  2702. -------
  2703. c : dict
  2704. a dictionary containing the various rectangles definitions. The set of keys are
  2705. * `aperture_xy`: defines the field reference aperture coordinates.
  2706. * `cropping_xy`: defines the cropping rectangles used to co-register each image for the datacube.
  2707. * `noaperture_xy`: defines the region of each image completely unobscured by the reference aperture edge.
  2708. * `scene_xy`: defines the scene rectangle in the final coregistered images (1x4 array).
  2709. * `unregistered_scene_xy`: defines the scene rectangle in coordinates of the *original* full image.
  2710. * `full_xy`: defines the coordinates of the original uncropped images (1x4 array).
  2711. serial_number : int
  2712. The serial number of the GCI system whose rectangles you want to look up.
  2713. Notes
  2714. -----
  2715. The DAISI software uses a JSON file format which defines the following keys: {`noaperture_rects`,\
  2716. `aperture_rects`,`cropper_rects`,`scene_rect`}.
  2717. The rectangles dictionary defined here adds a couple more keys, and changes the format of the entries.
  2718. Note that the `xycoords` array is structured with `w` as the row index, and `[xlo,xhi,ylo,yhi]` as the
  2719. four columns. The `xhi` and `yhi` values are not the highest pixel value but rather one more than the
  2720. highest pixel value, so that cropping can be performed as `img[xlo:xhi,ylo:yhi]` without adding of one on
  2721. the high indices.
  2722. '''
  2723. gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
  2724. d = get_calib_dictionary('dynamic_cal_config.js', serial_number=gci_serial_number, debug=debug)
  2725. ## Find out the *full* (not cropped!) dcb dimensions. Don't use "aperture_xy", since the unobscured
  2726. ## cameras do not have their aperture rectangles defined, and so Nw will be too small.
  2727. Nx = d['full_rects']['IR']['height']
  2728. #Ny = d['full_rects']['IR']['width']
  2729. Nw = len(d['cropper_rects']['IR'].keys())
  2730. ## Make a new dictionary "c" and convert the aperture rectangles to xy-coordinate format.
  2731. c = {}
  2732. c['aperture_xy'] = int32(daisi_rect_to_xycoords(d['aperture_rects'], Nx, Nw))
  2733. c['cropping_xy'] = int32(daisi_rect_to_xycoords(d['cropper_rects']['IR'], Nx, Nw))
  2734. c['noaperture_xy'] = int32(daisi_rect_to_xycoords(d['noaperture_rects'], Nx, Nw))
  2735. c['full_xy'] = int32(daisi_rect_to_xycoords(d['full_rects']['IR'], Nx))
  2736. c['reference_camera'] = int32(d['reference_channel'])
  2737. ## The dictionary mapping center cameras to their reference cameras will have the form that the
  2738. ## center cameras will be the keys and the reference cameras the values.
  2739. if ('center_cam_config' in d):
  2740. c['refcam'] = {}
  2741. for config in d['center_cam_config']:
  2742. c['refcam'][config['center']] = int(config['reference'])
  2743. ## The registered scene coords are relative to the cropped coords and not the full coords.
  2744. Nx_cropped = d['cropper_rects']['IR']['0']['height']
  2745. c['scene_xy'] = int32(daisi_rect_to_xycoords(d['scene_rect'], Nx_cropped))
  2746. if ('scene_rect' in d):
  2747. c['unregistered_scene_xy'] = zeros((Nw,4), 'int32')
  2748. c['unregistered_scene_xy'][:,0] = c['scene_xy'][0] + c['cropping_xy'][:,0]
  2749. c['unregistered_scene_xy'][:,1] = c['scene_xy'][1] + c['cropping_xy'][:,0]
  2750. c['unregistered_scene_xy'][:,2] = c['scene_xy'][2] + c['cropping_xy'][:,2]
  2751. c['unregistered_scene_xy'][:,3] = c['scene_xy'][3] + c['cropping_xy'][:,2]
  2752. if debug: print('c=', c)
  2753. return(c)
  2754. ## =================================================================================================
  2755. def update_rectangles_dict(rect_dict, rect_name, xycoords, debug=False):
  2756. '''
  2757. Take an existing "rectangles" dictionary and update one of the entries with the input. Update the rectangles
  2758. file with the result, and return the updated rectangles dictionary as well. This requires a *function*
  2759. because some of the rectangle values are interconnected (i.e. if you change `cropping_xy`, then
  2760. `scene_xy` must also change to correspond).
  2761. Parameters
  2762. ----------
  2763. rect_dict : dict
  2764. The rectangles dictionary to update.
  2765. rect_name : str
  2766. {'aperture_xy','noaperture_xy','scene_xy','cropping_xy','unregistered_scene_xy'} \
  2767. name of the rectangle object to update.
  2768. xycoords : ndarray
  2769. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates for all Nw cameras. Or, in the \
  2770. case of the "scene_xy" rectangle, `xycoords` is only a length-4 vector.
  2771. Returns
  2772. -------
  2773. rd : dict
  2774. The updated rectangles dictionary.
  2775. '''
  2776. assert isinstance(rect_dict, dict), 'Input "rect_dict" must be a dict type.'
  2777. assert isinstance(rect_name, str), 'Input "rect_name" must be a string type.'
  2778. xycoords = asarray(xycoords)
  2779. if (rect_name not in ['aperture_xy','noaperture_xy','scene_xy','cropping_xy','unregistered_scene_xy']):
  2780. raise ValueError('The "rect_name" input (' + rect_name + ') does not correspond to one of the defined rectangle names!')
  2781. ## Define an abbreviation.
  2782. rd = rect_dict
  2783. Nx = rd['full_xy'][0,1]
  2784. Ny = rd['full_xy'][0,3]
  2785. Nw = alen(rd['full_xy'][:,0])
  2786. ## Update the dictionary entry.
  2787. rd[rect_name] = xycoords
  2788. ## If the input rectangle is "unregistered_scene_xy", then we need to update the corresponding
  2789. ## "scene_rect". Translate the (uniform) registered scene coordinates to the unregistered (varies from
  2790. ## wavelength to wavelength) scene rectangles.
  2791. if (rect_name == 'unregistered_scene_xy'):
  2792. rd['scene_xy'][0] = xycoords[0,0] - rd['cropping_xy'][0,0]
  2793. rd['scene_xy'][1] = xycoords[0,1] - rd['cropping_xy'][0,0]
  2794. rd['scene_xy'][2] = xycoords[0,2] - rd['cropping_xy'][0,2]
  2795. rd['scene_xy'][3] = xycoords[0,3] - rd['cropping_xy'][0,2]
  2796. ## If the input rectangle is "scene_reg_rect" or "cropping_xy", then we need to update the corresponding
  2797. ## "scene_xy". Translate the (uniform) registered scene coordinates to the unregistered (varies from wavelength to
  2798. ## wavelength) scene rectangles. Note that the scene rectangles do *not* change when the registration
  2799. ## rectangles change.
  2800. if (rect_name == 'scene_xy'):
  2801. for w in arange(Nw):
  2802. rd['unregistered_scene_xy'][w,0] = xycoords[0] + rd['cropping_xy'][w,0]
  2803. rd['unregistered_scene_xy'][w,1] = xycoords[1] + rd['cropping_xy'][w,0]
  2804. rd['unregistered_scene_xy'][w,2] = xycoords[2] + rd['cropping_xy'][w,2]
  2805. rd['unregistered_scene_xy'][w,3] = xycoords[3] + rd['cropping_xy'][w,2]
  2806. elif (rect_name == 'cropping_xy'):
  2807. for w in arange(Nw):
  2808. rd['unregistered_scene_xy'][w,0] = xycoords[w,0] + rd['cropping_xy'][w,0]
  2809. rd['unregistered_scene_xy'][w,1] = xycoords[w,1] + rd['cropping_xy'][w,0]
  2810. rd['unregistered_scene_xy'][w,2] = xycoords[w,2] + rd['cropping_xy'][w,2]
  2811. rd['unregistered_scene_xy'][w,3] = xycoords[w,3] + rd['cropping_xy'][w,2]
  2812. return(rd)
  2813. ## =================================================================================================
  2814. def write_rectanglesfile(rect_dict, serial_number=None, debug=False):
  2815. '''
  2816. Convert a rectangles dictionary to the form used by DAISI and save to a file.
  2817. Parameters
  2818. ----------
  2819. rect_dict : dict
  2820. The rectangles dictionary.
  2821. serial_number : int
  2822. The serial number of the system whose repository rectangles file you want to modify.
  2823. '''
  2824. assert isinstance(rect_dict, dict), 'Input "rect_dict" must be a dict type.'
  2825. gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
  2826. ## Define an abbreviation
  2827. rd = rect_dict
  2828. ## Need to convert each dictionary to the unusual format used by the JSON file.
  2829. assert ('aperture_xy' in rd), '"aperture_xy" is not in the input dictionary!'
  2830. assert ('scene_xy' in rd), '"scene_rect" is not in the input dictionary!'
  2831. assert ('noaperture_xy' in rd), '"noaperture_xy" is not in the input dictionary!'
  2832. assert ('full_xy' in rd), '"full_xy" is not in the input dictionary!'
  2833. Nx = rd['full_xy'][0,1]
  2834. Ny = rd['full_xy'][0,3]
  2835. Nw = alen(rd['full_xy'][:,0])
  2836. ## Note you *must* convert from the numpy integer type to a pure Python int-type in order for "simplejson"
  2837. ## to be able to serialize the result.
  2838. d = {}
  2839. d['scene_rect'] = xycoords_to_daisi_rect(rd['scene_xy'], Nx)
  2840. d['aperture_rects'] = xycoords_to_daisi_rect(rd['aperture_xy'], Nx, Nw)
  2841. d['cropper_rects'] = xycoords_to_daisi_rect(rd['full_xy'], Nx, Nw)
  2842. d['noaperture_rects'] = xycoords_to_daisi_rect(rd['noaperture_xy'], Nx, Nw)
  2843. if ('cropping_xy' in rd):
  2844. d['registration_rects'] = xycoords_to_daisi_rect(rd['cropping_xy'], Nx, Nw)
  2845. ## This can trip you up, so an explicit check is helpful!
  2846. assert isinstance(d['aperture_rects']['0']['left'], int), 'daisi-format dictionary entries must be datatype "int" to be JSON-serializable!'
  2847. s = json.dumps(d, sort_keys=True, indent=4)
  2848. if debug: print(s)
  2849. filename = os.path.normpath(GCIDIR + 'cal/gci' + str(gci_serial_number) + '/dynamic_cal_config.js')
  2850. if os.path.exists(filename):
  2851. import shutil
  2852. shutil.copyfile(filename, filename+'~')
  2853. f = open(filename, 'w', encoding='utf-8')
  2854. f.write(s)
  2855. f.close()
  2856. return
  2857. ## =================================================================================================
  2858. def draw_block_spectrum(channel_boundaries, spectrum, newfigure=True, title=None, **kwargs):
  2859. '''
  2860. Draw a plot where the spectral channels are nonuniform in width and shaped by histogram-like rectangles.
  2861. Parameters
  2862. ----------
  2863. channel_boundaries : ndarray
  2864. A vector of length Nw+1 giving the wavelengths defining the boundaries of the Nw spectral channels.
  2865. spectrum : ndarray
  2866. A Nw length vector.
  2867. newfigure : bool
  2868. Whether or not to call matplotlib's `figure()` function.
  2869. title : str
  2870. The plot title.
  2871. **kwargs : any
  2872. Any keyword arguments that can be used by matplotlib's `plot()` function.
  2873. '''
  2874. import matplotlib.pyplot as plt
  2875. channel_boundaries = asarray(channel_boundaries)
  2876. spectrum = asarray(spectrum)
  2877. assert (alen(channel_boundaries) == 1 + alen(spectrum)), 'Input "channel_boundaries" must have length 1 more than input "spectrum".'
  2878. cb = channel_boundaries
  2879. nchannels = alen(cb) - 1
  2880. x = []
  2881. y = []
  2882. x.append(cb[0])
  2883. y.append(0.0)
  2884. for n in arange(nchannels):
  2885. x.append(cb[n])
  2886. x.append(cb[n+1])
  2887. y.append(spectrum[n])
  2888. y.append(spectrum[n])
  2889. x.append(cb[-1])
  2890. y.append(0.0)
  2891. if newfigure:
  2892. plt.figure()
  2893. xmin = amin(x)
  2894. xmax = amax(x)
  2895. xptp = xmax - xmin
  2896. xmean = 0.5 * (xmin + xmax)
  2897. xlo = xmean - 0.55 * xptp
  2898. xhi = xmean + 0.55 * xptp
  2899. ymin = amin(y)
  2900. ymax = amax(y)
  2901. yptp = ymax - ymin
  2902. ymean = 0.5 * (ymin + ymax)
  2903. ylo = ymean - 0.55 * yptp
  2904. yhi = ymean + 0.55 * yptp
  2905. else:
  2906. ## First grab the existing plot limits. If these were previously determined by draw_block_spectrum(),
  2907. ## then we need only rescale the plot range by (0.5/0.55) to get to the original data limits.
  2908. ## Appending these to the current data vector, we can update the plot limits using both old and new
  2909. ## data. First grab the existing limits.
  2910. (x0,x1,y0,y1) = plt.axis()
  2911. x0mean = 0.5 * (x0 + x1)
  2912. x0ptp = (0.5 / 0.55) * (x1 - x0)
  2913. x0min = x0mean - 0.5 * x0ptp
  2914. x0max = x0mean + 0.5 * x0ptp
  2915. y0mean = 0.5 * (y0 + y1)
  2916. y0ptp = (0.5 / 0.55) * (y1 - y0)
  2917. y0min = y0mean - 0.5 * y0ptp
  2918. y0max = y0mean + 0.5 * y0ptp
  2919. ## Next determine the new plot range using the old and new data limits.
  2920. xmin = amin(append(array(x), x0min))
  2921. xmax = amax(append(array(x), x0max))
  2922. xptp = xmax - xmin
  2923. xmean = 0.5 * (xmin + xmax)
  2924. xlo = xmean - 0.55 * xptp
  2925. xhi = xmean + 0.55 * xptp
  2926. ymin = amin(append(array(y), y0min))
  2927. ymax = amax(append(array(y), y0max))
  2928. yptp = ymax - ymin
  2929. ymean = 0.5 * (ymin + ymax)
  2930. ylo = ymean - 0.55 * yptp
  2931. yhi = ymean + 0.55 * yptp
  2932. plt.plot(x, y, **kwargs)
  2933. if (title != None): plt.title(title)
  2934. plt.axis([xlo,xhi,ylo,yhi])
  2935. return
  2936. ## =================================================================================================
  2937. def get_mean_spect(dcb, xycoords):
  2938. '''
  2939. Get the mean spectrum of a rectangular region of interest in a datacube.
  2940. Parameters
  2941. ----------
  2942. dcb : ndarray
  2943. A (Nx,Ny,Nw) datacube
  2944. xycoords : ndarray
  2945. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates specifying a rectangular region \
  2946. in each plane of the datacube.
  2947. Returns
  2948. -------
  2949. spect : ndarray
  2950. A Nw-length vector giving the mean spectrum of the region.
  2951. '''
  2952. dcb = asarray(dcb)
  2953. xycoords = asarray(xycoords)
  2954. assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
  2955. Nw = dcb.shape[2]
  2956. spect = zeros(Nw)
  2957. if (xycoords.ndim == 1):
  2958. for w in arange(Nw):
  2959. xlo = xycoords[0]
  2960. xhi = xycoords[1]
  2961. ylo = xycoords[2]
  2962. yhi = xycoords[3]
  2963. if (xlo == xhi) or (ylo == yhi):
  2964. spect[w] = nan
  2965. else:
  2966. spect[w] = mean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  2967. else:
  2968. for w in arange(Nw):
  2969. xlo = xycoords[w,0]
  2970. xhi = xycoords[w,1]
  2971. ylo = xycoords[w,2]
  2972. yhi = xycoords[w,3]
  2973. if (xlo >= xhi) or (ylo >= yhi):
  2974. spect[w] = nan
  2975. else:
  2976. spect[w] = mean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  2977. return(spect)
  2978. ## =================================================================================================
  2979. def dcb_region_stats(dcb, xycoords=None):
  2980. '''
  2981. Get the mean and stdev spectra of a rectangular region of interest in a datacube.
  2982. Parameters
  2983. ----------
  2984. dcb : ndarray
  2985. A (Nx,Ny,Nw) datacube
  2986. xycoords : ndarray
  2987. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates specifying a rectangular region \
  2988. in each plane of the datacube.
  2989. Returns
  2990. -------
  2991. meanvals : ndarray
  2992. A Nw-length vector giving the mean spectrum of the region.
  2993. stdvals : ndarray
  2994. A Nw-length vector giving the stdev of each region in the Nw planes of the cube.
  2995. Example
  2996. -------
  2997. import gci
  2998. rect_dict = gci.read_rectanglesfile()
  2999. xycoord = rect_dict('scene_xy')
  3000. (means,stdevs) = gci.dcb_region_stats(dcb, xycoord)
  3001. '''
  3002. dcb = asarray(dcb)
  3003. assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
  3004. (Nx,Ny,Nw) = dcb.shape
  3005. if (xycoords == None):
  3006. xycoords = asarray([0,Nx-1,0,Ny-1])
  3007. else:
  3008. xycoords = asarray(xycoords)
  3009. meanvals = zeros(Nw)
  3010. stdvals = zeros(Nw)
  3011. minvals = zeros(Nw)
  3012. maxvals = zeros(Nw)
  3013. if (xycoords.ndim == 1):
  3014. for w in arange(Nw):
  3015. xlo = xycoords[0]
  3016. xhi = xycoords[1]
  3017. ylo = xycoords[2]
  3018. yhi = xycoords[3]
  3019. if (xlo == xhi) or (ylo == yhi):
  3020. meanvals[w] = nan
  3021. stdvals[w] = nan
  3022. minvals[w] = nan
  3023. maxvals[w] = nan
  3024. else:
  3025. #meanvals[w] = nanmean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3026. #stdvals[w] = nanstd(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3027. #minvals[w] = nanmin(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3028. #maxvals[w] = nanmax(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3029. meanvals[w] = mean(dcb[xlo:xhi,ylo:yhi,w])
  3030. stdvals[w] = std(dcb[xlo:xhi,ylo:yhi,w])
  3031. minvals[w] = amin(dcb[xlo:xhi,ylo:yhi,w])
  3032. maxvals[w] = amax(dcb[xlo:xhi,ylo:yhi,w])
  3033. else:
  3034. for w in arange(Nw):
  3035. xlo = xycoords[w,0]
  3036. xhi = xycoords[w,1]
  3037. ylo = xycoords[w,2]
  3038. yhi = xycoords[w,3]
  3039. if (xlo >= xhi) or (ylo >= yhi):
  3040. meanvals[w] = nan
  3041. stdvals[w] = nan
  3042. minvals[w] = nan
  3043. maxvals[w] = nan
  3044. else:
  3045. #meanvals[w] = nanmean(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3046. #stdvals[w] = nanstd(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3047. #minvals[w] = nanmin(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3048. #maxvals[w] = nanmax(ravel(dcb[xlo:xhi,ylo:yhi,w]))
  3049. meanvals[w] = mean(dcb[xlo:xhi,ylo:yhi,w])
  3050. stdvals[w] = std(dcb[xlo:xhi,ylo:yhi,w])
  3051. minvals[w] = amin(dcb[xlo:xhi,ylo:yhi,w])
  3052. maxvals[w] = amax(dcb[xlo:xhi,ylo:yhi,w])
  3053. return(meanvals, stdvals, minvals, maxvals)
  3054. ## =================================================================================================
  3055. def daisi_rect_to_xycoords(rect_dict, Nx, Nw=0):
  3056. '''
  3057. Convert an a DAISI-format rectangles dictionary (converted from the JSON file) to an xycoords matrix.
  3058. Parameters
  3059. ----------
  3060. rect_dict : dict
  3061. A rectangles dictionary.
  3062. Nx : int
  3063. The x-dimension of the datacube.
  3064. Nw : int
  3065. The w-dimension of the datacube.
  3066. Returns
  3067. -------
  3068. xycoords : ndarray
  3069. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) for all Nw cameras. If the input contains only one \
  3070. rectangle and not Nw rectangles, then `xycoords` is returned as simply a length-4 vector.
  3071. '''
  3072. assert isinstance(rect_dict, dict), 'Input "rect_dict" must be a dict type.'
  3073. assert (Nx == int(Nx)), 'Input "Nx" must be an integer type.'
  3074. rd = rect_dict
  3075. if (Nw == 0):
  3076. xycoords = zeros(4, 'int32')
  3077. xycoords[0] = Nx - rd['top'] - rd['height']
  3078. xycoords[1] = Nx - rd['top']
  3079. xycoords[2] = rd['left']
  3080. xycoords[3] = rd['left'] + rd['width']
  3081. else:
  3082. assert (Nx > 0), 'Input "Nx" is zero!'
  3083. xycoords = zeros((Nw,4), 'int32')
  3084. for w in arange(Nw):
  3085. if (str(w) in rd):
  3086. xycoords[w,0] = Nx - rd[str(w)]['top'] - rd[str(w)]['height']
  3087. xycoords[w,1] = Nx - rd[str(w)]['top']
  3088. xycoords[w,2] = rd[str(w)]['left']
  3089. xycoords[w,3] = rd[str(w)]['left'] + rd[str(w)]['width']
  3090. return(xycoords)
  3091. ## =================================================================================================
  3092. def xycoords_to_daisi_rect(xycoords, Nx, Nw=0):
  3093. '''
  3094. Convert an xycoords matrix to DAISI-format.
  3095. Parameters
  3096. ----------
  3097. xycoords : ndarray
  3098. A (Nw,4) matrix of (xlo,xhi,ylo,yhi) coordinates for all Nw cameras. Or the input can \
  3099. be a length-4 vector of (xlo,xhi,ylo,yhi).
  3100. Nx : int
  3101. The x-dimension of the datacube.
  3102. Nw : int
  3103. The w-dimension of the datacube.
  3104. Returns
  3105. -------
  3106. d : dict
  3107. A DAISI-format dictionary (ready to be converted to JSON format).
  3108. '''
  3109. xycoords = asarray(xycoords)
  3110. assert (Nx == int(Nx)), 'Input "Nx" must be an integer type.'
  3111. ## Note that the "int" conversion here is *necessary* because the datatype must be a pure Python type
  3112. ## and not a numpy datatype in order for simplejson to serialize the resulting dictionary.
  3113. d = {}
  3114. if (Nw == 0):
  3115. assert (xycoords.ndim == 1), 'Input "xycoords" should be a simple vector when Nw=0.'
  3116. d['left'] = int(xycoords[2])
  3117. d['top'] = int(Nx - xycoords[1])
  3118. d['width'] = int(xycoords[3] - xycoords[2])
  3119. d['height'] = int(xycoords[1] - xycoords[0])
  3120. else:
  3121. assert (Nx > 0), 'Input "Nx" is zero!'
  3122. for w in arange(Nw):
  3123. if (xycoords[w,0] + xycoords[w,1] == 0): continue
  3124. d[str(w)] = {}
  3125. d[str(w)]['left'] = int(xycoords[w,2])
  3126. d[str(w)]['top'] = int(Nx - xycoords[w,1])
  3127. d[str(w)]['width'] = int(xycoords[w,3] - xycoords[w,2])
  3128. d[str(w)]['height'] = int(xycoords[w,1] - xycoords[w,0])
  3129. return(d)
  3130. ## =================================================================================================
  3131. def print_rects(rect_dict=None):
  3132. '''
  3133. Print out the rectangles dictionary.
  3134. Parameters
  3135. ----------
  3136. rect_dict : dict
  3137. The rectangles dictionary to print. If not entered, then the function reads from the \
  3138. default file to get the rectangles dictionary.
  3139. '''
  3140. rd = rect_dict
  3141. if (rd == None): rd = read_rectanglesfile()
  3142. aperture_xy = rd['aperture_xy']
  3143. cropping_xy = rd['cropping_xy']
  3144. noaperture_xy = rd['noaperture_xy']
  3145. scene_xy = rd['scene_xy']
  3146. full_xy = rd['full_xy']
  3147. unregistered_scene_xy = rd['unregistered_scene_xy']
  3148. Nw = len(cropping_xy)
  3149. print('aperture_xy:')
  3150. for w in arange(Nw):
  3151. print(' w%2i: %3i, %3i, %3i, %3i' % (w, aperture_xy[w,0], aperture_xy[w,1], aperture_xy[w,2], aperture_xy[w,3]))
  3152. print('cropping_xy:')
  3153. for w in arange(Nw):
  3154. print(' w%2i: %3i, %3i, %3i, %3i' % (w, cropping_xy[w,0], cropping_xy[w,1], cropping_xy[w,2], cropping_xy[w,3]))
  3155. print('noaperture_xy:')
  3156. for w in arange(Nw):
  3157. print(' w%2i: %3i, %3i, %3i, %3i' % (w, noaperture_xy[w,0], noaperture_xy[w,1], noaperture_xy[w,2], noaperture_xy[w,3]))
  3158. print('scene_xy:')
  3159. print(' %3i, %3i, %3i, %3i' % (scene_xy[0], scene_xy[1], scene_xy[2], scene_xy[3]))
  3160. print('full_xy:')
  3161. print(' %3i, %3i, %3i, %3i' % (full_xy[0], full_xy[1], full_xy[2], full_xy[3]))
  3162. print('unregistered_scene_xy:')
  3163. for w in arange(Nw):
  3164. print(' w%2i: %3i, %3i, %3i, %3i' % (w, unregistered_scene_xy[w,0], unregistered_scene_xy[w,1], unregistered_scene_xy[w,2], unregistered_scene_xy[w,3]))
  3165. return
  3166. ## =================================================================================================
  3167. def xcorr(f, g):
  3168. '''
  3169. Calculate the normalized cross-correlation coefficient of the two vectors `f` and `g` without shifting
  3170. one relative to the other.
  3171. Parameters
  3172. ----------
  3173. f : ndarray
  3174. g : ndarray
  3175. Returns
  3176. -------
  3177. xcorr : float
  3178. The normalized cross-correlation value (ranging from -1 to +1).
  3179. '''
  3180. assert (f.size == g.size), 'Inputs "f" and "g" must have the same length.'
  3181. assert (f.shape == g.shape), 'Inputs "f" and "g" must have the same shape.'
  3182. assert not any(isnan(f)), 'Cannot compute the cross-correation, since input "f" has NaN values.'
  3183. assert not any(isnan(g)), 'Cannot compute the cross-correation, since input "g" has NaN values.'
  3184. from numpy.linalg import norm
  3185. f_mean = mean(f)
  3186. g_mean = mean(g)
  3187. f_unit = (f - f_mean) / norm(f - f_mean)
  3188. g_unit = (g - g_mean) / norm(g - g_mean)
  3189. xcorr = dot(f_unit, g_unit)
  3190. ## The code above is equivalent to:
  3191. ## xcorr = sum((f - mean(f)) * (g - mean(g))) / (std(f) * std(g))
  3192. ## xcorr = xcorr / (alen(f) - 1.0)
  3193. return(xcorr)
  3194. ## =================================================================================================
  3195. #def xcorr3d_old(dcb, g):
  3196. # '''
  3197. # Calculate the normalized cross-correlation coefficient of each pixel in `dcb` with the input
  3198. # spectrum `g` without shifting one spectrum relative to the other.
  3199. # '''
  3200. #
  3201. # ## Cross-correlate each spectrum in "dcb" (giving vector "f") with an equal-length vector spectrum
  3202. # ## "g".
  3203. # from numpy.linalg import norm
  3204. #
  3205. # g_mean = mean(g)
  3206. # g_unit = (g - g_mean) / norm(g - g_mean)
  3207. # (Nx,Ny,Nw) = dcb.shape
  3208. # xcorr_img = zeros((Nx,Ny))
  3209. #
  3210. # for x in arange(Nx):
  3211. # for y in arange(Ny):
  3212. # if all(dcb[x,y,:] == 0):
  3213. # xcorr_img[x,y] = nan
  3214. # else:
  3215. # f_mean = mean(dcb[x,y,:])
  3216. # f_unit = (dcb[x,y,:] - f_mean) / norm(dcb[x,y,:] - f_mean)
  3217. # xcorr_img[x,y] = dot(f_unit, g_unit)
  3218. #
  3219. # return(xcorr_img)
  3220. ## =================================================================================================
  3221. def xcorr3d(dcb, g, weights=None):
  3222. '''
  3223. Calculate the normalized cross-correlation coefficient of each pixel in `dcb` with the input
  3224. spectrum `g` without shifting one spectrum relative to the other.
  3225. Parameters
  3226. ----------
  3227. dcb : ndarray
  3228. The datacube to run cross-correlation on.
  3229. g : ndarray
  3230. The spectrum to cross-correlate to.
  3231. weights: ndarray
  3232. The weights to apply to the cross correlation, a cube of the same dimension as `dcb`.
  3233. normalize : bool
  3234. Whether or not to perform normalization on the cube and spectrum prior to calculating the \
  3235. normalized cross-correlation. If you are not pulling from the "unit absorption cube" then you \
  3236. should set this keyword to "True".
  3237. Returns
  3238. -------
  3239. weighted_xcorr : ndarray
  3240. The weighted cross-correlation image, giving the normalized cross-correlation of each column in the \
  3241. datacube with the reference spectrum.
  3242. '''
  3243. ## Cross-correlate each spectrum in "dcb" (giving vector "f") with an equal-length vector spectrum
  3244. ## "g".
  3245. from numpy.linalg import norm
  3246. dcb = float64(dcb)
  3247. if (weights == None):
  3248. weights = ones_like(dcb)
  3249. (Nx,Ny,Nw) = dcb.shape
  3250. gcube = zeros_like(dcb)
  3251. normgcube = zeros_like(dcb)
  3252. normdcb = zeros_like(dcb)
  3253. ## The three "wcov" are weighted covariance cubes -- these calculated the covariance of the
  3254. ## datacube spectra with the target spectrum "g", weighted according to the input weights.
  3255. wcov1 = zeros((Nx,Ny,Nw))
  3256. wcov2 = zeros((Nx,Ny,Nw))
  3257. wcov3 = zeros((Nx,Ny,Nw))
  3258. for w in arange(Nw):
  3259. gcube[:,:,w] = g[w]
  3260. ## Calc
  3261. dcbmean = average(dcb, weights=weights, axis=2)
  3262. gcubemean = average (gcube, weights=weights, axis=2)
  3263. bad = (dcbmean == 0) & (sum(dcb, axis=2) == 0)
  3264. for w in arange(Nw):
  3265. wcov1[:,:,w] = weights[:,:,w] * (dcb[:,:,w] - dcbmean) * (gcube[:,:,w] - gcubemean)
  3266. wcov2[:,:,w] = weights[:,:,w] * (dcb[:,:,w] - dcbmean)**2
  3267. wcov3[:,:,w] = weights[:,:,w] * (gcube[:,:,w] - gcubemean)**2
  3268. avg_wcov1 = mean(wcov1, axis=2)
  3269. avg_wcov2 = mean(wcov2, axis=2)
  3270. avg_wcov3 = mean(wcov3, axis=2)
  3271. weighted_xcorr = avg_wcov1 / (sqrt(avg_wcov2 * avg_wcov3))
  3272. weighted_xcorr[bad] = 0
  3273. return(weighted_xcorr)
  3274. ## =================================================================================================
  3275. def convert_radiancecube_to_spectralcube(dcb, filterlist=None, filter_data=None, debug=False):
  3276. '''
  3277. For an input datacube in radiance units, use the filter spectra to build a system matrix and convert
  3278. the input to a spectral radiance cube.
  3279. Parameters
  3280. ----------
  3281. dcb : ndarray
  3282. A (Nx,Ny,Nw) datacube in radiance units.
  3283. filterlist : list
  3284. A list of the Nw filter uniqueIDs associated with the Nw cameras.
  3285. filter_data : dict
  3286. A dictionary of the filter transmission spectra.
  3287. Returns
  3288. -------
  3289. spectraldcb : ndarray
  3290. A (Nx,Ny,nchannels) datacube in units of mean spectral radiance per channel (*not* per micron!).
  3291. '''
  3292. from scipy.linalg import pinv #, svd
  3293. dcb = asarray(dcb)
  3294. assert (dcb.ndim == 3), 'Input "dcb" must have three dimensions.'
  3295. if (filterlist == None): filterlist = get_filterlist()
  3296. if (filter_data == None): filter_data = get_filter_spectra(filterlist=filterlist)
  3297. assert isinstance(filterlist, (list, tuple)), 'The input "filterlist" must be a list type.'
  3298. assert isinstance(filter_data, dict), 'The input "filter_data" must be a dict type.'
  3299. ## The next step is to transform the measurement cube to a spectral cube.
  3300. H = calculate_hmatrix(filterlist, filter_data, return_data_structure=False, debug=debug)
  3301. (Nx,Ny,Nw) = dcb.shape
  3302. nchannels = (H.shape)[1]
  3303. Hplus = pinv(H)
  3304. spectraldcb = zeros((Nx,Ny,nchannels))
  3305. for x in arange(Nx):
  3306. for y in arange(Ny):
  3307. spectraldcb[x,y,:] = dot(Hplus, dcb[x,y,:])
  3308. if debug:
  3309. print('H[w,c]=')
  3310. print(('%12s'*nchannels) % tuple(arange(nchannels)))
  3311. for w in arange(Nw):
  3312. print('%2i ' % w, end='')
  3313. print(('%11.7f '*nchannels) % tuple(H[w,:]))
  3314. print('\nHplus[c,w]=')
  3315. print(('%12s'*Nw) % tuple(arange(Nw)))
  3316. for c in arange(nchannels):
  3317. print('%2i ' % c, end='')
  3318. print(('%11.7f '*Nw) % tuple(Hplus[c,:]))
  3319. return(spectraldcb)
  3320. ## =================================================================================================
  3321. def send_asynch_command(command_name, verbose=False, *args):
  3322. '''
  3323. Send a command to DAISI through the JSON asynchronous messaging system.
  3324. Parameters
  3325. ----------
  3326. command_name : str
  3327. The name of the command to send.
  3328. input : any
  3329. A Python object to be serialized into the command.
  3330. Combinations include :
  3331. * "reset_temporal_reference": (no input) tell DAISI to reset the temporal reference
  3332. * "change_scene_reference_rectangle": (input is "scene_xy", a 4-element vector) tell DAISI to change the
  3333. scene reference rectangle (in memory, not in the file!) to the input xycoords vector.
  3334. Returns
  3335. -------
  3336. ?
  3337. '''
  3338. assert isinstance(command_name, str), 'Input "command_name" must be a string type.'
  3339. ctx = zmq.Context()
  3340. if (command_name == 'reset_temporal_reference'):
  3341. get_return_msg = False
  3342. topic = 'reset-commands'
  3343. payload = json.dumps({'cmd_name':'force-temporal-reset'})
  3344. return_info = json.dumps({'return_addr':'temporal-reset'})
  3345. elif (command_name == 'change_scene_reference_rectangle'):
  3346. ## Parse the input arguments into a vector for "scene_xy" and a scalar for "Nx".
  3347. Nx = None
  3348. scene_xy = None
  3349. for a in args:
  3350. if (a.size == 1):
  3351. Nx = a
  3352. elif (a.size == 4):
  3353. scene_xy = a
  3354. if Nx and scene_xy: break
  3355. ## Convert the xycoords into a DAISI-format cropping dictionary.
  3356. top = Nx - 1 - scene_xy[0]
  3357. height = scene_xy[1] - scene_xy[0] + 1
  3358. left = scene_xy[2]
  3359. width = scene_xy[3] - scene_xy[2] + 1
  3360. payload = json.dumps({'cmd_name':'set_scene_ref_rect', 'rect':{'left':left, 'top':top, 'width':width, 'height':height}})
  3361. topic = 'dynamic-cal'
  3362. return_info = json.dumps({'return_addr':'dummy value'})
  3363. cmd = [topic, 'JSON_ASYNC_REQ', payload, return_info]
  3364. if verbose: print('Sending asynchronous command:\n' + cmd)
  3365. pubsock = ctx.socket(zmq.PUB)
  3366. pubsock.connect('tcp://10.1.10.11:9550')
  3367. pubsock.send_multipart(cmd)
  3368. pubsock.close()
  3369. return
  3370. ## =================================================================================================
  3371. def set_gci_serial_number(num):
  3372. ''' Set the gci serial number.'''
  3373. global GCI_SERIAL_NUMBER
  3374. GCI_SERIAL_NUMBER = int(num)
  3375. return
  3376. ## =================================================================================================
  3377. def get_gci_serial_number():
  3378. ''' Get the gci serial number.'''
  3379. return(GCI_SERIAL_NUMBER)
  3380. ## =================================================================================================
  3381. def get_calib_dictionary(filename, serial_number=None, debug=False):
  3382. '''
  3383. Get a calibration file from the data archive and convert its contents to a Python dictionary.
  3384. Parameters
  3385. ----------
  3386. filename : str
  3387. The name of the file within the archive we want to convert.
  3388. serial_number : int
  3389. The serial number of the GCI to query.
  3390. Returns
  3391. -------
  3392. thisdict : dict
  3393. A dictionary of the file contents.
  3394. '''
  3395. assert isinstance(filename, str), 'The input "filename" must be a string type.'
  3396. gci_serial_number = GCI_SERIAL_NUMBER if (serial_number == None) else serial_number
  3397. path = os.path.normpath(GCIDIR + 'cal/gci' + str(gci_serial_number) + '/' + filename)
  3398. if debug: print('calibration file = "' + path + '"')
  3399. f = open(path, 'r')
  3400. contents = f.read()
  3401. f.close()
  3402. thisdict = json.loads(contents)
  3403. if ('system_serial_number' in thisdict):
  3404. del thisdict['system_serial_number']
  3405. if ('comment' in thisdict):
  3406. del thisdict['comment']
  3407. return(thisdict)
  3408. ## =================================================================================================
  3409. def jcamp_reader(filename_or_filecontentstr):
  3410. '''
  3411. Read a JDX-format file.
  3412. The function returns three items in a dictionary: the numpy vectors `x` and `y` giving
  3413. the abscissa and ordinate data, and `info`, a dictionary containing the header information.
  3414. Parameters
  3415. ----------
  3416. filename_or_filecontentstr : str
  3417. Either the string giving the entire file contents or the name of the file to read.
  3418. Returns
  3419. -------
  3420. thisdict : dict
  3421. A dictionary of the file contents, with keys `x`, `y`, and `info`.
  3422. Notes
  3423. -----
  3424. ## TODO: you should split the data lines from the info lines using the ##XYDATA marker as the
  3425. ## beginning of the former.
  3426. '''
  3427. if os.path.exists(filename_or_filecontentstr):
  3428. f = open(filename_or_filecontentstr,'r')
  3429. flines = f.read().splitlines()
  3430. f.close()
  3431. else:
  3432. flines = filename_or_filecontentstr.splitlines()
  3433. jcamp_dict = {}
  3434. xstart = []
  3435. xnum = []
  3436. y = []
  3437. #has_info = False
  3438. #header_lines = 0
  3439. for n,line in enumerate(flines):
  3440. ## First work out the header dictionary.
  3441. if line.startswith('##'):
  3442. line = line[2:]
  3443. if line.endswith('='):
  3444. lhs = line
  3445. rhs = ''
  3446. else:
  3447. (lhs,rhs) = line.split('=', 1)
  3448. lhs = lhs.strip().lower()
  3449. rhs = rhs.strip().lower()
  3450. if rhs.isdigit():
  3451. jcamp_dict[lhs] = int(rhs)
  3452. elif is_float(rhs):
  3453. jcamp_dict[lhs] = float(rhs)
  3454. else:
  3455. jcamp_dict[lhs] = rhs
  3456. elif line.startswith('$$'):
  3457. ## If the line starts with '$$' then it is a comment: ignore it.
  3458. pass
  3459. else:
  3460. ## If the line does not start with '##' or '$$' then it should be a data line.
  3461. ## To make sure, check that all split elements are floats.
  3462. datavals = line.split(' ')
  3463. if all(is_float(datavals)):
  3464. xstart.append(float(datavals[0]))
  3465. xnum.append(len(datavals)-1)
  3466. for dataval in datavals[1:]:
  3467. y.append(float(dataval))
  3468. ## You got all of the Y-values. Next you need to figure out how to generate the missing X's...
  3469. ## First look for the "lastx" dictionary entry. You will need that one to finish the set.
  3470. xstart.append(jcamp_dict['lastx'])
  3471. x = asarray([])
  3472. y = asarray(y)
  3473. for n in arange(len(xnum)):
  3474. x = append(x, linspace(xstart[n],xstart[n+1],xnum[n]))
  3475. jcamp_dict['x'] = x
  3476. jcamp_dict['y'] = y
  3477. return(jcamp_dict)
  3478. ## =================================================================================================
  3479. def jcamp_spectrum(filename, wavemin=None, wavemax=None, skip_nonquant=True, debug=False):
  3480. '''
  3481. Read a JDX-format file and convert its contents to an interpolated spectrum.
  3482. The function attempts to extract quantitative information from the header, in order to
  3483. convert the raw absorption data into cross-section data.
  3484. Parameters
  3485. ----------
  3486. filename : str
  3487. The file to read.
  3488. wavemin : float, optional
  3489. The minimum wavelength (in um) to interpolate to.
  3490. wavemax : float, optional
  3491. The maximum wavelength (in um) to interpolate to.
  3492. skip_nonquant : bool
  3493. Whether or not to return None for those spectra missing complete quantitative info. If False, \
  3494. and the spectra are nonquantitative, then make a guess to fill in the missing scale values.
  3495. Returns
  3496. -------
  3497. thisdict : dict
  3498. A dictionary of the file contents, with keys `x`, `y`, `xsec`, and `info`.
  3499. '''
  3500. jcamp_dict = jcamp_reader(filename)
  3501. x = jcamp_dict['x']
  3502. y = jcamp_dict['y']
  3503. ppm = 1.0E-6 ## for conversion to ppm units
  3504. amagat = 2.687E+25 ## number of molecules per m^3 at std temp and pressure
  3505. T = 288.0 ## standard temperature in Kelvin
  3506. R = 2.78 ## the gas constant in (m^3 * mmHg) / (amg * K)
  3507. ## Note: normally when we convert from wavenumber to wavelength units, the ordinate must be nonuniformly
  3508. ## rescaled in order to compensate. But this is only true if we resample the abscissa to a uniform sampling
  3509. ## grid. In this case here, we keep the sampling grid nonuniform in wavelength space, such that each digital
  3510. ## bin retains its proportionality to energy, which is what we want.
  3511. if (jcamp_dict['xunits'] == '1/cm') or (jcamp_dict['xunits'] == 'cm-1'):
  3512. x = 10000.0 / x
  3513. jcamp_dict['xunits'] = 'um'
  3514. elif (jcamp_dict['xunits'] == 'micrometers'):
  3515. jcamp_dict['xunits'] = 'um'
  3516. elif (jcamp_dict['xunits'] == 'nanometers'):
  3517. x = x * 1000.0
  3518. jcamp_dict['xunits'] = 'um'
  3519. if (jcamp_dict['yunits'] == 'transmittance'):
  3520. y = 1.0 - y
  3521. jcamp_dict['yunits'] = 'absorbance'
  3522. if (x[0] > x[-1]):
  3523. x = x[::-1]
  3524. y = y[::-1]
  3525. if ('xfactor' in jcamp_dict): x = x * jcamp_dict['xfactor']
  3526. if (wavemin != None) and (amax(x) < wavemin):
  3527. msg = 'The spectrum for "' + jcamp_dict['title'] + '" contains no data in the requested spectral range.'
  3528. if debug: print(msg)
  3529. return(None)
  3530. elif (wavemax != None) and (amin(x) > wavemax):
  3531. msg = 'The spectrum for "' + jcamp_dict['title'] + '" contains no data in the requested spectral range.'
  3532. if debug: print(msg)
  3533. return(None)
  3534. elif (wavemin != None) and (amin(x) > wavemin):
  3535. min_retrieved = str(amin(x))
  3536. x = append(array(wavemin), x)
  3537. y = append(y[0], y)
  3538. msg = '"' + jcamp_dict['title'] + '": Minimum of x retrieved ( ' + min_retrieved + ') is greater than the required minimum wavelength (' + str(wavemin) + ').'
  3539. if debug: print(msg)
  3540. #raise ValueError(msg)
  3541. elif (wavemax != None) and (amax(x) < wavemax):
  3542. max_retrieved = str(amax(x))
  3543. x = append(x, wavemax)
  3544. y = append(y, y[-1])
  3545. msg = '"' + jcamp_dict['title'] + '": Maximum of x retrieved ( ' + max_retrieved + ') is less than the required maximum wavelength (' + str(wavemax) + ').'
  3546. if debug: print(msg)
  3547. #raise ValueError(msg)
  3548. ## Correct for any unphysical data.
  3549. if any(y < 0.0):
  3550. y[y < 0.0] = 0.0
  3551. if any(y > 1.0):
  3552. y[y > 1.0] = 1.0
  3553. ## Determine the effective path length "ell" of the measurement chamber, in meters.
  3554. if ('path length' in jcamp_dict):
  3555. (val,unit) = jcamp_dict['path length'].split()[0:2]
  3556. if (unit == 'cm'):
  3557. ell = float(val) / 100.0
  3558. elif (unit == 'm'):
  3559. ell = float(val)
  3560. elif (unit == 'mm'):
  3561. ell = float(val) / 1000.0
  3562. else:
  3563. ell = 0.1
  3564. else:
  3565. if debug: print('"' + jcamp_dict['title'] + '": No path length value available for ' + jcamp_dict['title'] + '. Using the default ell = 0.1 ...')
  3566. if skip_nonquant: return({'info':None, 'x':None, 'xsec':None, 'y':None})
  3567. ell = 0.1
  3568. assert(alen(x) == alen(y))
  3569. if ('yfactor' in jcamp_dict):
  3570. y = y * jcamp_dict['yfactor']
  3571. if ('npoints' in jcamp_dict):
  3572. if (alen(x) != jcamp_dict['npoints']):
  3573. npts_retrieved = str(alen(x))
  3574. msg = '"' + jcamp_dict['title'] + '": Number of data points retrieved (' + npts_retrieved + \
  3575. ') does not equal the expected length (npoints = ' + str(jcamp_dict['npoints']) + ')!'
  3576. if debug: print(msg)
  3577. #raise ValueError(msg)
  3578. #assert(alen(x) == jcamp_dict['npoints'])
  3579. ## For each gas, manually define the pressure "p" at which the measurement was taken (in units of mmHg).
  3580. if (jcamp_dict['title'] == 'carbon dioxide'):
  3581. p = 200.0
  3582. elif (jcamp_dict['title'] == 'carbon monoxide'):
  3583. p = 400.0
  3584. elif (jcamp_dict['title'] == 'hydrogen sulfide'):
  3585. p = 600.0
  3586. elif (jcamp_dict['title'] == 'methane'):
  3587. p = 150.0
  3588. elif (jcamp_dict['title'] == 'ethane'):
  3589. p = 150.0
  3590. elif (jcamp_dict['title'] == 'propane'):
  3591. p = 150.0
  3592. elif (jcamp_dict['title'] == 'n-pentane'):
  3593. p = 6.0
  3594. elif (jcamp_dict['title'] == 'propane, 2-methyl-'):
  3595. p = 200.0
  3596. jcamp_dict['title'] = 'iso-butane'
  3597. elif (jcamp_dict['title'] == 'butane, 2-methyl-'):
  3598. p = 100.0
  3599. jcamp_dict['title'] = 'iso-pentane'
  3600. elif (jcamp_dict['title'] == 'ammonia'):
  3601. p = 50.0
  3602. elif (jcamp_dict['title'] == 'benzene'):
  3603. p = 70.0
  3604. elif (jcamp_dict['title'] == 'butadiene'):
  3605. p = 100.0
  3606. elif (jcamp_dict['title'] == 'chlorobenzene'):
  3607. p = 10.0
  3608. elif (jcamp_dict['title'] == '1,2-dichloroethane'):
  3609. p = 50.0
  3610. elif (jcamp_dict['title'] == 'ethanol'):
  3611. p = 30.0
  3612. elif (jcamp_dict['title'] == 'methanol'):
  3613. p = 70.0
  3614. elif (jcamp_dict['title'] == 'propylene'):
  3615. p = 150.0
  3616. elif (jcamp_dict['title'] == 'propylene oxide'):
  3617. p = 100.0
  3618. elif (jcamp_dict['title'] == 'toluene'):
  3619. p = 20.0
  3620. elif (jcamp_dict['title'] == 'vinyl chloride'):
  3621. p = 6.0
  3622. elif (jcamp_dict['title'] == 'p-xylene'):
  3623. p = 5.0
  3624. elif (jcamp_dict['title'] == 'm-xylene'):
  3625. p = 30.0
  3626. elif (jcamp_dict['title'] == 'sulfur dioxide'):
  3627. p = 100.0
  3628. elif (jcamp_dict['title'] == 'butane'):
  3629. p = 100.0
  3630. elif (jcamp_dict['title'] == 'sulfur hexafluoride'):
  3631. p = 600.0
  3632. elif (jcamp_dict['title'] == 'ethylene'):
  3633. p = 759.8
  3634. elif (jcamp_dict['title'] == 'ozone'):
  3635. p = 759.8 * 40.0 / 1.0E6
  3636. else:
  3637. if debug: print('No pressure "p" value entry for ' + jcamp_dict['title'] + '. Using the default p = 150.0 ...')
  3638. if skip_nonquant: return([None]*4)
  3639. p = 150.0
  3640. ## Convert the absorbance units to cross-section in meters squared, for a gas at 1ppm at std atmospheric
  3641. ## temperature and pressure.
  3642. xsec = y * ppm * R * T / (p * ell)
  3643. ## Finally, truncate the data to the range of interest. If xmin=xmax, then simply skip this step.
  3644. if (wavemin != wavemax):
  3645. okay = (x >= wavemin) & (x <= wavemax)
  3646. x = x[okay]
  3647. y = y[okay]
  3648. xsec = xsec[okay]
  3649. ## Update the "x" and "y" values and add the "xsec" values.
  3650. jcamp_dict['x'] = x
  3651. jcamp_dict['xsec'] = xsec
  3652. jcamp_dict['y'] = y
  3653. return(jcamp_dict)
  3654. ## =================================================================================================
  3655. def format_coord(self, x, y):
  3656. '''
  3657. Redefine the `format_coord()` method to matplotlib's Axes object --- this allows us to add a
  3658. `z=...` in the status bar.
  3659. The function *replaces* the existing method. So, calls to plt.imshow() will now use a different
  3660. formatting function in the status bar.
  3661. Parameters
  3662. ----------
  3663. self : matplotlib Axes handle
  3664. The Axes object's `self` handle.
  3665. x : int or float
  3666. The x-position of the mouse cursor in data coordinates.
  3667. y : int or float
  3668. The y-position of the mouse cursor in data coordinates.
  3669. '''
  3670. col = int(x+0.5)
  3671. row = int(y+0.5)
  3672. ## If we're not looking at an image, then use "x" as the abscissa and "y" as the ordinate.
  3673. if (self._current_image == None):
  3674. if (x == int(x)):
  3675. return('x=%i, y=%i' % (row, col))
  3676. else:
  3677. #return('x=%1.4f, y=%1.4f' % (x, y))
  3678. return('x=%f, y=%f' % (x, y))
  3679. else:
  3680. xmin = self._current_image._extent[2]
  3681. xmax = self._current_image._extent[3]
  3682. ymin = self._current_image._extent[0]
  3683. ymax = self._current_image._extent[1]
  3684. iscolor = (self._current_image._A.ndim == 3)
  3685. if iscolor:
  3686. (Nx,Ny,_) = self._current_image._A.shape
  3687. else:
  3688. (Nx,Ny) = self._current_image._A.shape
  3689. if (col >= ymin) and (col < ymax) and (row >= xmin) and (row < xmax):
  3690. #print('(xmin,xmax,ymin,ymax)=', (xmin,xmax,ymin,ymax))
  3691. if iscolor:
  3692. z = self._current_image._A[row,col,:]
  3693. else:
  3694. z = self._current_image._A[row,col]
  3695. ## Check if the image is RGB color, and if so then make sure that `z` is an integer.
  3696. try:
  3697. if iscolor:
  3698. isint = (z[0] == int(z[0]))
  3699. else:
  3700. isint = (z == int(z))
  3701. except Exception:
  3702. isint = False
  3703. if iscolor and isint:
  3704. return('x=%i, y=%i, z=(%i,%i,%i)' % (row, col, z[0], z[1], z[2]))
  3705. elif iscolor and not isint:
  3706. return('x=%i, y=%i, z=(%f,%f,%f)' % (row, col, z[0], z[1], z[2]))
  3707. elif isint:
  3708. return('x=%i, y=%i, z=%i' % (row, col, z))
  3709. else:
  3710. #return('x=%i, y=%i, z=%1.4f' % (row, col, z))
  3711. return('x=%i, y=%i, z=%f' % (row, col, z))
  3712. else:
  3713. return('x=%i, y=%i'%(y, x))
  3714. ## Now redefine the default formatting function.
  3715. try:
  3716. from matplotlib.axes import Axes
  3717. Axes.format_coord = format_coord
  3718. except Exception:
  3719. pass
  3720. #def imshow(image, ax=None, box=None, title=None, **kwargs):
  3721. # '''
  3722. # Use matplotlib's `imshow()` function but add a `z=...` in the status bar.
  3723. #
  3724. # The function returns three items in a dictionary: the numpy vectors `x` and `y` giving
  3725. # the abscissa and ordinate data, and `info`, a dictionary containing the header information.
  3726. #
  3727. # Parameters
  3728. # ----------
  3729. # image : ndarray
  3730. # The image to display.
  3731. # ax : matplotlib axis handle, optional
  3732. # The axis handle.
  3733. # box : ndarray
  3734. # A 4-element array of (xlo,xhi,ylo,yhi) rectangle drawing coords.
  3735. # '''
  3736. #
  3737. # import matplotlib.pyplot as plt
  3738. #
  3739. # image = asarray(image)
  3740. # iscolor = (image.ndim == 3)
  3741. # if (box != None):
  3742. # box = asarray(box)
  3743. # assert (alen(box) == 4), 'The input "box" should have 4 elements, not ' + str(alen(box)) + '.'
  3744. #
  3745. # if (ax == None):
  3746. # ax = plt.gca()
  3747. #
  3748. # if ('extent' in kwargs):
  3749. # xmin = kwargs['extent'][0]
  3750. # xmax = kwargs['extent'][1]
  3751. # ymin = kwargs['extent'][2]
  3752. # ymax = kwargs['extent'][3]
  3753. # else:
  3754. # xmin = 0
  3755. # xmax = Nx
  3756. # ymin = 0
  3757. # ymax = Ny
  3758. #
  3759. # def myformat_coord(x, y):
  3760. # col = int(x+0.5)
  3761. # row = int(y+0.5)
  3762. # if iscolor:
  3763. # (Nx,Ny,_) = image.shape
  3764. # else:
  3765. # (Nx,Ny) = image.shape
  3766. #
  3767. # if (col >= ymin) and (col < ymax) and (row >= xmin) and (row < xmax):
  3768. # print('(xmin,xmax,ymin,ymax)=', (xmin,xmax,ymin,ymax))
  3769. # if iscolor:
  3770. # z = image[row,col,:]
  3771. # else:
  3772. # z = image[row,col]
  3773. #
  3774. # ## Check if the image is RGB color, and if so then make sure that `z` is an integer.
  3775. # if iscolor or (z == int(z)):
  3776. # if iscolor:
  3777. # return('x=%i, y=%i, z=(%i,%i,%i)' % (row, col, z[0], z[1], z[2]))
  3778. # else:
  3779. # return('x=%i, y=%i, z=%i' % (row, col, z))
  3780. # else:
  3781. # return('x=%i, y=%i, z=%1.4f' % (row, col, z))
  3782. # else:
  3783. # return('x=%i, y=%i'%(y, x))
  3784. #
  3785. # plt.imshow(image, **kwargs)
  3786. # ax.format_coord = myformat_coord
  3787. # plt.axis('tight')
  3788. #
  3789. # if (title != None):
  3790. # plt.title(title)
  3791. #
  3792. # if (box != None):
  3793. # xlo = box[0]
  3794. # xhi = box[1]
  3795. # ylo = box[2]
  3796. # yhi = box[3]
  3797. # ax.plot([ylo,yhi,yhi,ylo,ylo], [xlo,xlo,xhi,xhi,xlo], 'r-')
  3798. #
  3799. # return
  3800. ## =================================================================================================
  3801. def find_nearest_brackets(x, value):
  3802. '''
  3803. Find the indices of the nearest pair of values to a search value in an ordered vector.
  3804. Parameters
  3805. ----------
  3806. x : ndarray
  3807. An ordered monotonic vector inside which to search for `value`.
  3808. value : (int,float)
  3809. The value to search for.
  3810. Returns
  3811. -------
  3812. idx : int
  3813. The index location of the exact value (if an exact match is found)
  3814. [idx,idx+1] : (int,int)
  3815. A list giving the pair of points before and after where the value is found.
  3816. '''
  3817. idx = (abs(x-value)).argmin()
  3818. if (x[idx] > value):
  3819. return([idx-1,idx])
  3820. elif (x[idx] < value):
  3821. return([idx,idx+1])
  3822. else:
  3823. return([idx])
  3824. ## =================================================================================================
  3825. def is_float(s):
  3826. '''
  3827. Check if a string (or list of strings) describes a (list of) floating-point type of number.
  3828. Parameters
  3829. ----------
  3830. s : str
  3831. The string or list of strings to analyze.
  3832. Returns
  3833. -------
  3834. bool : bool
  3835. Whether or not `s` contains a numeric value.
  3836. '''
  3837. if isinstance(s,tuple) or isinstance(s,list):
  3838. if not all(isinstance(i,str) for i in s): raise TypeError("Input 's' is not a list of strings")
  3839. if len(s) == 0:
  3840. try:
  3841. _ = float(s)
  3842. except ValueError:
  3843. return(False)
  3844. else:
  3845. res = list(True for i in range(0,len(s)))
  3846. for i in range(0,len(s)):
  3847. try:
  3848. _ = float(s[i])
  3849. except ValueError:
  3850. res[i] = False
  3851. return(res)
  3852. else:
  3853. if not isinstance(s,str): raise TypeError("Input 's' is not a string")
  3854. try:
  3855. _ = float(s)
  3856. return(True)
  3857. except ValueError:
  3858. return(False)
  3859. ## =================================================================================================
  3860. def getcolors():
  3861. '''
  3862. Make a list of 16 discernably different colors that can be used for drawing plots.
  3863. Returns
  3864. -------
  3865. mycolors : list of floats
  3866. A 16x4 list of colors, with each color being a 4-vector (R,G,B,A).
  3867. '''
  3868. mycolors = [None]*16
  3869. mycolors[0] = [0.0,0.0,0.0,1.0] ## black
  3870. mycolors[1] = [1.0,0.0,1.0,1.0] ## fuchsia
  3871. mycolors[2] = [0.0,0.0,0.5,1.0] ## navy blue
  3872. mycolors[3] = [0.0,0.0,1.0,1.0] ## blue
  3873. mycolors[4] = [0.0,0.5,0.5,1.0] ## teal
  3874. mycolors[5] = [0.0,1.0,1.0,1.0] ## aqua
  3875. mycolors[6] = [0.0,0.5,0.0,1.0] ## dark green
  3876. mycolors[7] = [0.0,1.0,0.0,1.0] ## lime green
  3877. mycolors[8] = [0.5,0.5,0.0,1.0] ## olive green
  3878. mycolors[9] = [1.0,1.0,0.0,1.0] ## yellow
  3879. mycolors[10] = [1.0,0.5,0.0,1.0] ## orange
  3880. mycolors[11] = [1.0,0.0,0.0,1.0] ## red
  3881. mycolors[12] = [0.5,0.0,0.0,1.0] ## maroon
  3882. mycolors[13] = [0.5,0.0,0.5,1.0] ## purple
  3883. mycolors[14] = [0.7,0.7,0.7,1.0] ## bright grey
  3884. mycolors[15] = [0.4,0.4,0.4,1.0] ## dark grey
  3885. return(mycolors)
  3886. ## =================================================================================================
  3887. def get_poly_coeffs(x, g, order):
  3888. '''
  3889. Perform a, nth order polynomial fit to a curve and return the coefficients.
  3890. The return coefficient vector "c" fits the functional form\
  3891. g = c[0] + c[1]*x + c[2]*x^2 + c[3]*x^3 + ...
  3892. Parameters
  3893. ----------
  3894. x : ndarray
  3895. The data abscissa.
  3896. g : ndarray
  3897. The data ordinate.
  3898. order: int
  3899. The integer order (>0) of polynomial to fit.
  3900. Returns
  3901. -------
  3902. coeffs : ndarray
  3903. The order+1 length vector of polynomial coefficients.
  3904. '''
  3905. #npts = size(g)
  3906. #x = -1.0 + arange(npts) * 2.0 / (npts - 1.0)
  3907. H = zeros((size(g),order+1))
  3908. H[:,0] = 1.0
  3909. for o in arange(1,order+1):
  3910. H[:,o] = x**o
  3911. coeffs = dot(linalg.pinv(H), g)
  3912. #remainder = g
  3913. #for n in arange(alen(coeffs)):
  3914. # remainder -= coeffs[n] * x**n
  3915. return(coeffs)
  3916. ## =================================================================================================
  3917. def polyeval_Horner(x, poly_coeffs):
  3918. '''
  3919. Use Horner's rule for polynomial evaluation.
  3920. Assume a polynomial of the form \
  3921. p = c[0] + (c[1] * x) + (c[2] * x**2) + (c[3] * x**3) + ... + (c[N] * x**N).
  3922. Parameters
  3923. ----------
  3924. x : ndarray
  3925. The abscissa at which to evaluate the polynomial.
  3926. poly_coeffs : ndarray
  3927. The vector of polynomial coefficients.
  3928. Returns
  3929. -------
  3930. p : ndarray
  3931. The polynomial evaluated at the points given in x.
  3932. '''
  3933. ncoeffs = alen(poly_coeffs)
  3934. p = zeros(alen(x))
  3935. for n in arange(ncoeffs-1,-1,-1):
  3936. p = poly_coeffs[n] + (x * p)
  3937. #print('n=%i, c=%f' % (n, coeffs[n]))
  3938. return(p)
  3939. ## =================================================================================================
  3940. def get_parallax_mask(dcb, camera_pair1, camera_pair2, thresh=2.5, abs_thresh=None, erosions=0, dilations=0,
  3941. saturation_mask=None, smooth=0, debug=False):
  3942. '''
  3943. Calculate the parallax mask from the datacube.
  3944. Parameters
  3945. ----------
  3946. dcb : ndarray
  3947. The (Nx,Ny,Nw) multiplex datacube.
  3948. camera_pair1 : (tuple of two integers)
  3949. The two cameras to use for calculating the horizontal difference image (as input to the \
  3950. parallax estimator).
  3951. camera_pair2 : (tuple of two integers)
  3952. The two cameras to use for calculating the vertical difference image (as input to the \
  3953. parallax estimator).
  3954. thresh : float
  3955. The threshold value above which to say a given pixel belongs to the parallax mask. This is \
  3956. calculated as distance from the mean in units of standard deviations.
  3957. abs_thresh : float
  3958. The absolute threshold, given in distance from 0.0 and not the mean. When this value is \
  3959. define, it oevrrides the relative threshold.
  3960. erosions : int
  3961. The number of morphological erosion steps to use.
  3962. dilations : int
  3963. The number of morphological dilation steps to use.
  3964. saturation_mask: ndarray
  3965. An optional (Nx, Ny) mask indicating which pixels of the camera pairs are saturated.
  3966. smooth : int
  3967. The smoothing kernel size (0 = no smoothing).
  3968. Returns
  3969. -------
  3970. parallax_mask : ndarray
  3971. The boolean array parallax mask (True where parallax is above the detection threshold).
  3972. '''
  3973. from scipy import ndimage
  3974. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  3975. import numpy.ma as ma
  3976. #from meanclip import meanclip
  3977. erosions = int(erosions)
  3978. dilations = int(dilations)
  3979. struct = ones((3,3), 'bool')
  3980. diff1 = dcb[:,:,camera_pair1[0]] - dcb[:,:,camera_pair1[1]]
  3981. diff2 = dcb[:,:,camera_pair2[0]] - dcb[:,:,camera_pair2[1]]
  3982. if (smooth > 0):
  3983. diff1 = ndimage.uniform_filter(diff1, smooth)
  3984. diff2 = ndimage.uniform_filter(diff2, smooth)
  3985. (Nx,Ny) = diff1.shape
  3986. supermask= zeros((Nx,Ny), 'bool')
  3987. if saturation_mask != None:
  3988. supermask[saturation_mask] = 1
  3989. diff1_ma = ma.array(diff1, mask = supermask)
  3990. diff2_ma = ma.array(diff2, mask = supermask)
  3991. if (abs_thresh == None):
  3992. center1 = mean(diff1_ma)
  3993. center2 = mean(diff2_ma)
  3994. #center1 = median(diff1)
  3995. #center2 = median(diff2)
  3996. std1 = std(diff1_ma)
  3997. std2 = std(diff2_ma)
  3998. thresh1 = thresh * std1
  3999. thresh2 = thresh * std2
  4000. else:
  4001. thresh1 = abs_thresh
  4002. thresh2 = abs_thresh
  4003. if (abs_thresh == None):
  4004. parallax1_mask = (abs(diff1_ma - center1) > thresh1)
  4005. parallax2_mask = (abs(diff2_ma- center2) > thresh2)
  4006. else:
  4007. parallax1_mask = (abs(diff1_ma) > abs_thresh)
  4008. parallax2_mask = (abs(diff2_ma) > abs_thresh)
  4009. parallax_mask = (parallax1_mask | parallax2_mask)
  4010. if debug:
  4011. import matplotlib.pyplot as plt
  4012. plt.figure()
  4013. plt.imshow(diff1)
  4014. plt.title('parallax diff image 1 (input to mask)')
  4015. plt.figure()
  4016. plt.imshow(diff2)
  4017. plt.title('parallax diff image 2 (input to mask)')
  4018. plt.figure()
  4019. plt.imshow(parallax1_mask)
  4020. plt.title('parallax mask 1')
  4021. plt.figure()
  4022. plt.imshow(parallax2_mask)
  4023. plt.title('parallax mask 2')
  4024. plt.figure()
  4025. plt.imshow(parallax_mask)
  4026. plt.title('combined parallax mask - before morphology')
  4027. if (erosions > 0):
  4028. parallax_mask = binary_erosion(parallax_mask, structure=struct, iterations=erosions)
  4029. if debug:
  4030. plt.figure()
  4031. plt.imshow(parallax_mask)
  4032. plt.title('parallax_mask - after erosion')
  4033. if (dilations > 0):
  4034. parallax_mask = binary_dilation(parallax_mask, structure=struct, iterations=dilations)
  4035. if debug:
  4036. plt.figure()
  4037. plt.imshow(parallax_mask)
  4038. plt.title('parallax_mask - after dilation')
  4039. plt.show()
  4040. parallax_mask[supermask] = 0
  4041. #parallax_mask = logical_not(parallax_mask)
  4042. return(parallax_mask)
  4043. ## =================================================================================================
  4044. def get_vismotion_mask(meas_img, ref_img, thresh=2.5, dynamic_thresh=None, abs_thresh = None, saturation_mask=None, parallax_mask=None, erosions=0, dilations=0, smooth=0, debug=False):
  4045. '''
  4046. Calculate the parallax mask from the datacube.
  4047. The input "vis" image is assumed to be *already* cropped to the size of the infrared overlay region.
  4048. Parameters
  4049. ----------
  4050. meas_img : ndarray
  4051. The (vis_Nx,vis_Ny) greyscale image from the measurement frame.
  4052. ref_img : ndarray
  4053. The (vis_Nx,vis_Ny) greyscale image from the reference frame.
  4054. thresh : float
  4055. The threshold value above which to say a given pixel belongs to the vismotion mask. This is \
  4056. calculated as
  4057. thresh : float
  4058. The threshold value above which to say a given pixel belongs to the vismotion mask. This is \
  4059. calculated as distance from the mean in units of standard deviations.
  4060. dynamic_thresh: float
  4061. The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
  4062. calculated from the average of 30 frames standard deviation, calculated as distance from the mean.
  4063. abs_thresh : float
  4064. The absolute threshold, given in distance from 0.0 and not the mean. When this value is \
  4065. define, it oevrrides the relative threshold.
  4066. erosions : int
  4067. The number of morphological erosion steps to use.
  4068. dilations : int
  4069. The number of morphological dilation steps to use.
  4070. smooth : int
  4071. The smoothing kernel size (0 = no smoothing).
  4072. Returns
  4073. -------
  4074. motion_mask : ndarray
  4075. The boolean array motion mask (True where motion is above the detection threshold).
  4076. '''
  4077. from scipy import ndimage
  4078. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4079. import numpy.ma as ma
  4080. #from meanclip import meanclip
  4081. erosions = int(erosions)
  4082. dilations = int(dilations)
  4083. struct = ones((3,3), 'bool')
  4084. ## DAISI uses the following formula for converting an RGB image to greyscale
  4085. if (meas_img.ndim == 3):
  4086. meas_grey = uint8(0.299*meas_img[:,:,0] + 0.587*meas_img[:,:,1] + 0.114*meas_img[:,:,2])
  4087. else:
  4088. meas_grey = meas_img
  4089. if (ref_img.ndim == 3):
  4090. ref_grey = uint8(0.299*ref_img[:,:,0] + 0.587*ref_img[:,:,1] + 0.114*ref_img[:,:,2])
  4091. else:
  4092. ref_grey = ref_img
  4093. diff_img = float32(meas_grey) - float32(ref_grey)
  4094. (Nx,Ny) = diff_img.shape
  4095. supermask= zeros((Nx,Ny), 'bool')
  4096. if saturation_mask != None:
  4097. supermask[saturation_mask] = 1
  4098. if (smooth > 0):
  4099. diff_img = ndimage.uniform_filter(diff_img, smooth)
  4100. diff_img_ma = ma.array(diff_img, mask=supermask)
  4101. #(meanval, stdval) = meanclip(diff_img, clipsig=2.0)
  4102. (centerval, stdval) = (nanmean(diff_img_ma), nanstd(diff_img))
  4103. #(centerval, stdval) = (nanmedian(diff_img), nanstd(diff_img))
  4104. motion_mask = (abs(diff_img_ma - centerval) > (thresh * stdval))
  4105. #motion_mask = ((diff_img - meanval) > (thresh * stdval))
  4106. if abs_thresh != None:
  4107. motion_mask = (abs(diff_img_ma - centerval) > (abs_thresh))
  4108. elif dynamic_thresh != None:
  4109. motion_mask = (abs(diff_img_ma - centerval) > (dynamic_thresh))
  4110. if debug:
  4111. import matplotlib.pyplot as plt
  4112. plt.figure()
  4113. plt.imshow(diff_img)
  4114. plt.title('vis diff image (meas - ref)')
  4115. plt.figure()
  4116. plt.imshow(255*motion_mask)
  4117. plt.title('motion mask - before morphology')
  4118. if (erosions > 0):
  4119. motion_mask = binary_erosion(motion_mask, structure=struct, iterations=erosions)
  4120. if debug:
  4121. plt.figure()
  4122. plt.imshow(255*motion_mask)
  4123. plt.title('motion_mask - after erosion')
  4124. if (dilations > 0):
  4125. motion_mask = binary_dilation(motion_mask, structure=struct, iterations=dilations)
  4126. if debug:
  4127. plt.figure()
  4128. plt.imshow(255*motion_mask)
  4129. plt.title('motion_mask - after dilation')
  4130. plt.show()
  4131. motion_mask[supermask] = 0
  4132. #motion_mask = logical_not(motion_mask)
  4133. return(motion_mask)
  4134. ## =================================================================================================
  4135. def get_irmotion_mask(dcb, ref_dcb, pos_channels, neg_channels, thresh=2.5, dynamic_thresh=None, erosions=0, dilations=0, abs_thresh=None,
  4136. threshmin=None, smooth=0, saturation_mask = None, parallax_mask = None, debug=False):
  4137. '''
  4138. Calculate the parallax mask from the datacube.
  4139. Parameters
  4140. ----------
  4141. dcb : ndarray
  4142. The (Nx,Ny,Nw) datacube from the measurement frame.
  4143. ref_dcb : ndarray
  4144. The (Nx,Ny,Nw) datacube from the reference frame.
  4145. pos_channels : list
  4146. ???
  4147. neg_channels : list
  4148. ???
  4149. thresh : float
  4150. The threshold value above which to say a given pixel belongs to the parallax mask. This is \
  4151. calculated as
  4152. erosions : int
  4153. The number of morphological erosion steps to use.
  4154. dilations : int
  4155. The number of morphological dilation steps to use.
  4156. abs_thresh : float
  4157. ???
  4158. dynamic_thresh: float
  4159. The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
  4160. calculated from the average of 30 frames standard deviation, calculated as distance from the mean.
  4161. threshmin : float
  4162. ???
  4163. smooth : int
  4164. The size of the smoothing kernel (0 = no smoothing).
  4165. Returns
  4166. -------
  4167. motion_mask : ndarray
  4168. The boolean array motion mask (True where motion is above the detection threshold).
  4169. '''
  4170. from scipy import ndimage
  4171. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4172. import numpy.ma as ma
  4173. #from meanclip import meanclip
  4174. erosions = int(erosions)
  4175. dilations = int(dilations)
  4176. struct = ones((3,3), 'bool')
  4177. (Nx,Ny,Nw) = dcb.shape
  4178. diff_img = zeros((Nx,Ny))
  4179. for c in pos_channels:
  4180. diff_img += dcb[:,:,c].reshape(Nx, Ny)
  4181. diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
  4182. for c in neg_channels:
  4183. diff_img -= dcb[:,:,c].reshape(Nx, Ny)
  4184. diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
  4185. if (smooth > 0):
  4186. diff_img = ndimage.uniform_filter(diff_img, smooth)
  4187. supermask= zeros((Nx,Ny), 'bool')
  4188. if saturation_mask != None:
  4189. supermask[saturation_mask] = 1
  4190. if parallax_mask != None:
  4191. supermask[parallax_mask] = 1
  4192. diff_img_ma = ma.array(diff_img, mask = supermask)
  4193. if (abs_thresh == None):
  4194. #(centerval, stdval) = meanclip(diff_img, clipsig=2.0)
  4195. (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
  4196. #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
  4197. imgthresh = thresh * stdval
  4198. if (imgthresh < threshmin): imgthresh = threshmin
  4199. motion_mask = (abs(diff_img_ma - centerval) > imgthresh)
  4200. elif (abs_thresh != None):
  4201. imgthresh = abs_thresh
  4202. motion_mask = abs(diff_img_ma) > imgthresh
  4203. elif (dynamic_thresh != None):
  4204. imgthresh = dynamic_thresh
  4205. motion_mask = abs(diff_img_ma) > imgthresh
  4206. if debug:
  4207. import matplotlib.pyplot as plt
  4208. plt.figure()
  4209. plt.imshow(diff_img)
  4210. plt.title('IR diff image (meas - ref)')
  4211. plt.figure()
  4212. plt.imshow(255*motion_mask)
  4213. plt.title('IR motion mask - before morphology')
  4214. if (erosions > 0):
  4215. motion_mask = binary_erosion(motion_mask, structure=struct, iterations=erosions)
  4216. if debug:
  4217. plt.figure()
  4218. plt.imshow(255*motion_mask)
  4219. plt.title('IR motion_mask - after erosion')
  4220. if (dilations > 0):
  4221. motion_mask = binary_dilation(motion_mask, structure=struct, iterations=dilations)
  4222. if debug:
  4223. plt.figure()
  4224. plt.imshow(255*motion_mask)
  4225. plt.title('IR motion_mask - after dilation')
  4226. #plt.show()
  4227. motion_mask[supermask] = 0
  4228. #motion_mask = logical_not(motion_mask)
  4229. return(motion_mask)
  4230. ## =================================================================================================
  4231. def mask_overlay(img, mask1=None, mask2=None, mask3=None, alpha=1.0):
  4232. '''
  4233. Overlay a detection mask on top of a greyscale or color image.
  4234. Parameters
  4235. ----------
  4236. img : ndarray
  4237. The (img_Nx,img_Ny) image to use as the background against which to apply the overlay.
  4238. mask1 : ndarray, optional
  4239. A (Nx,Ny) detection mask --- a boolean array no larger than `img` --- to be displayed in RED.
  4240. mask2 : ndarray, optional
  4241. A (Nx,Ny) detection mask --- a boolean array no larger than `img` --- to be displayed in GREEN.
  4242. mask3 : ndarray, optional
  4243. A (Nx,Ny) detection mask --- a boolean array no larger than `img` --- to be displayed in BLUE.
  4244. alpha : float
  4245. A value for tuning how much the background is suppressed relative to the masks. If alpha==1.0, then the \
  4246. masks are rendered invisible. If alpha==0.0, then the masks show as fully saturated pixels and the \
  4247. background pixel value (underneath the mask) is rendered invisible.
  4248. Returns
  4249. -------
  4250. overlay : ndarray
  4251. The (img_Nx,img_Ny) image with detection mask overlaid in red.
  4252. '''
  4253. if (mask1 == None) and (mask2 == None) and (mask3 == None):
  4254. return(img)
  4255. assert (0.0 <= alpha <= 1.0), 'Input parameter "alpha" must be from 0.0 to 1.0.'
  4256. ## If the input image is not 8-bit, truncate it to 8-bit first before applying the overlay.
  4257. img = array(img)
  4258. img = uint8(255.0 * (img - amin(img)) / amax(img - amin(img)))
  4259. if (mask1 != None):
  4260. mask1 = (asarray(mask1) == 1)
  4261. (Nx,Ny) = mask1.shape
  4262. if (mask2 != None):
  4263. mask2 = (asarray(mask2) == 1)
  4264. (Nx,Ny) = mask2.shape
  4265. if (mask3 != None):
  4266. mask3 = (asarray(mask3) == 1)
  4267. (Nx,Ny) = mask3.shape
  4268. ## Make a combined mask
  4269. if (mask1 == None): mask1 = zeros((Nx,Ny), 'bool')
  4270. if (mask2 == None): mask2 = zeros((Nx,Ny), 'bool')
  4271. if (mask3 == None): mask3 = zeros((Nx,Ny), 'bool')
  4272. mask = ((mask1 + mask2 + mask3) > 0)
  4273. (img_Nx,img_Ny) = img.shape
  4274. dx = (img_Nx - Nx) // 2
  4275. dy = (img_Ny - Ny) // 2
  4276. xoff = 0 if ((img_Nx - Nx) % 2 == 0) else 1
  4277. yoff = 0 if ((img_Ny - Ny) % 2 == 0) else 1
  4278. if (img.ndim == 3):
  4279. r = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,0]))
  4280. g = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,1]))
  4281. b = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,2]))
  4282. overlay = array(img)
  4283. elif (img.ndim == 2):
  4284. r = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff]))
  4285. g = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff]))
  4286. b = float32(array(img[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff]))
  4287. overlay = zeros((img_Nx,img_Ny,3), 'uint8')
  4288. overlay[:,:,0] = img
  4289. overlay[:,:,1] = img
  4290. overlay[:,:,2] = img
  4291. r[mask] = r[mask] * alpha
  4292. g[mask] = g[mask] * alpha
  4293. b[mask] = b[mask] * alpha
  4294. r[mask1] += 255.0 * (1.0 - alpha)
  4295. g[mask2] += 255.0 * (1.0 - alpha)
  4296. b[mask3] += 255.0 * (1.0 - alpha)
  4297. overlay[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,0] = uint8(r)
  4298. overlay[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,1] = uint8(g)
  4299. overlay[dx:img_Nx-dx-xoff,dy:img_Ny-dy-yoff,2] = uint8(b)
  4300. return(overlay)
  4301. ## =================================================================================================
  4302. def draw_box(box, ax=None, color='r', w=0, **kwargs):
  4303. '''
  4304. Draw a box in the current figure.
  4305. Parameters
  4306. ----------
  4307. box : ndarray
  4308. ???
  4309. ax : matplotlib.pyplot axis handle, optional
  4310. ???
  4311. color : str
  4312. The color of the box to plot.
  4313. w : int
  4314. An index to the first dimension of `box`.
  4315. '''
  4316. box = asarray(box)
  4317. if any(isnan(box)):
  4318. return
  4319. if (box.size == 4):
  4320. (xlo,xhi,ylo,yhi) = box - array([0.5,0.5,0.5,0.5])
  4321. else:
  4322. (xlo,xhi,ylo,yhi) = box[w,:] - array([0.5,0.5,0.5,0.5])
  4323. if (ax == None):
  4324. import matplotlib.pyplot as plt
  4325. ax = plt.gca()
  4326. if (w == 0):
  4327. ax.plot([ylo,yhi,yhi,ylo,ylo], [xlo,xlo,xhi,xhi,xlo], color+'-', **kwargs)
  4328. else:
  4329. ax.plot([ylo,yhi,yhi,ylo,ylo], [xlo,xlo,xhi,xhi,xlo], color+'-', **kwargs)
  4330. return
  4331. ## =================================================================================================
  4332. def get_vis_steam_mask(vis, ref_vis, thresh=1.0, erosions=0, dilations=0, smooth=0, debug=False): #use thresh=4.0 with meanclip
  4333. '''
  4334. Calculate the parallax mask from the datacube.
  4335. The input "vis" image is assumed to be *already* cropped to the size of the infrared overlay region.
  4336. Parameters
  4337. ----------
  4338. vis : ndarray
  4339. The (vis_Nx,vis_Ny) greyscale image from the measurement frame.
  4340. ref_vis : ndarray
  4341. The (vis_Nx,vis_Ny) greyscale image from the reference frame.
  4342. thresh : float
  4343. The threshold value above which to say a given pixel belongs to the parallax mask. This is \
  4344. calculated as
  4345. erosions : int
  4346. The number of morphological erosion steps to use.
  4347. dilations : int
  4348. The number of morphological dilation steps to use.
  4349. smooth : int
  4350. The size of the smoothing kernel (0 = no smoothing).
  4351. Returns
  4352. -------
  4353. steam_mask : ndarray
  4354. The boolean array motion mask (True where motion is above the detection threshold).
  4355. '''
  4356. from scipy import ndimage
  4357. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4358. #from meanclip import meanclip
  4359. ## DAISI uses the following formula for converting an RGB image to greyscale
  4360. if (vis.ndim == 3):
  4361. meas_grey = uint8(0.299*vis[:,:,05] + 0.587*vis[:,:,1] + 0.114*vis[:,:,2])
  4362. else:
  4363. meas_grey = vis
  4364. if (ref_vis.ndim == 3):
  4365. ref_grey = uint8(0.299*ref_vis[:,:,0] + 0.587*ref_vis[:,:,1] + 0.114*ref_vis[:,:,2])
  4366. else:
  4367. ref_grey = ref_vis
  4368. erosions = int(erosions)
  4369. dilations = int(dilations)
  4370. struct = ones((3,3), 'bool')
  4371. diff_img = float32(meas_grey) - float32(ref_grey)
  4372. if (smooth > 0):
  4373. diff_img = ndimage.uniform_filter(diff_img, smooth)
  4374. #(centerval, stdval) = meanclip(diff_img, clipsig=2.0)
  4375. (centerval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
  4376. #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
  4377. steam_mask = ((diff_img - centerval) > (thresh * stdval))
  4378. if debug:
  4379. import matplotlib.pyplot as plt
  4380. plt.figure()
  4381. plt.imshow(diff_img)
  4382. plt.title('vis diff image (meas - ref)')
  4383. plt.figure()
  4384. plt.imshow(255*steam_mask)
  4385. plt.title('motion mask - before morphology')
  4386. if (erosions > 0):
  4387. steam_mask = binary_erosion(steam_mask, structure=struct, iterations=erosions)
  4388. if debug:
  4389. plt.figure()
  4390. plt.imshow(255*steam_mask)
  4391. plt.title('steam_mask - after erosion')
  4392. if (dilations > 0):
  4393. steam_mask = binary_dilation(steam_mask, structure=struct, iterations=dilations)
  4394. if debug:
  4395. plt.figure()
  4396. plt.imshow(255*steam_mask)
  4397. plt.title('steam_mask - after dilation')
  4398. plt.show()
  4399. #steam_mask = logical_not(steam_mask)
  4400. return(steam_mask)
  4401. ## =================================================================================================
  4402. def get_ir_steam_mask(dcb, ref_dcb, thresh=1.0, erosions=0, dilations=0, smooth=0, debug=False): #use thresh=7.0 with meanclip
  4403. '''
  4404. Calculate the parallax mask from the datacube.
  4405. Parameters
  4406. ----------
  4407. dcb : ndarray
  4408. The (Nx,Ny,Nw) datacube from the measurement frame.
  4409. ref_dcb : ndarray
  4410. The (Nx,Ny,Nw) datacube from the reference frame.
  4411. thresh : float
  4412. The threshold value above which to say a given pixel belongs to the parallax mask.
  4413. erosions : int
  4414. The number of morphological erosion steps to use.
  4415. dilations : int
  4416. The number of morphological dilation steps to use.
  4417. smooth : int
  4418. The size of the smoothing kernel (0 = no smoothing).
  4419. Returns
  4420. -------
  4421. steam_mask : ndarray
  4422. The boolean array motion mask (True where motion is above the detection threshold).
  4423. '''
  4424. from scipy import ndimage
  4425. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4426. #from meanclip import meanclip
  4427. erosions = int(erosions)
  4428. dilations = int(dilations)
  4429. struct = ones((3,3), 'bool')
  4430. diff_img = sum(dcb,2) - sum(ref_dcb,2)
  4431. if (smooth > 0):
  4432. diff_img = ndimage.uniform_filter(diff_img, smooth)
  4433. #(meanval, stdval) = meanclip(diff_img, clipsig=2.0)
  4434. (centerval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
  4435. #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
  4436. steam_mask = (abs(diff_img - centerval) > (thresh * stdval))
  4437. if debug:
  4438. import matplotlib.pyplot as plt
  4439. plt.figure()
  4440. plt.imshow(diff_img)
  4441. plt.title('IR diff image (meas - ref)')
  4442. plt.figure()
  4443. plt.imshow(255*steam_mask)
  4444. plt.title('IR motion mask - before morphology')
  4445. if (erosions > 0):
  4446. steam_mask = binary_erosion(steam_mask, structure=struct, iterations=erosions)
  4447. if debug:
  4448. plt.figure()
  4449. plt.imshow(255*steam_mask)
  4450. plt.title('IR steam_mask - after erosion')
  4451. if (dilations > 0):
  4452. steam_mask = binary_dilation(steam_mask, structure=struct, iterations=dilations)
  4453. if debug:
  4454. plt.figure()
  4455. plt.imshow(255*steam_mask)
  4456. plt.title('IR steam_mask - after dilation')
  4457. #plt.show()
  4458. #steam_mask = logical_not(steam_mask)
  4459. return(steam_mask)
  4460. ## =================================================================================================
  4461. def get_spectral_mask(dcb, ref_dcb, pos_channels, neg_channels, abs_thresh=None, dynamic_thresh=None, thresh=2.5,
  4462. threshmin=0.0, erosions=0, dilations=0, parallax_mask=None,
  4463. saturation_mask = None, comparison_type= 2, smooth=0, debug=False):
  4464. '''
  4465. Calculate the gas detection mask from the multiplex datacube.
  4466. Parameters
  4467. ----------
  4468. dcb : ndarray
  4469. The (Nx,Ny,Nw) datacube from the measurement frame.
  4470. ref_dcb : ndarray
  4471. The (Nx,Ny,Nw) datacube from the reference frame.
  4472. pos_channels : list
  4473. ???
  4474. neg_channels : list
  4475. ???
  4476. abs_thresh : float
  4477. ???
  4478. thresh : float
  4479. The threshold value above which to say a given pixel belongs to the parallax mask.
  4480. threshmin : float
  4481. ???
  4482. dynamic_thresh: float
  4483. The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
  4484. calculated from the average of 30 frames standard deviation, calculated as distance from 0.
  4485. erosions : int
  4486. The number of morphological erosion steps to use.
  4487. parallax_mask : ndarray
  4488. ???
  4489. saturation_mask: ndarray
  4490. ???
  4491. dilations : int
  4492. The number of morphological dilation steps to use.
  4493. comparison_type: int
  4494. Whether the comparison is double ended (2), positive single ended, (1) or negative single ended (-1), logical not double_ended (0)
  4495. smooth : bool
  4496. Whether to apply a 3x3 smoothing kernel to the difference image prior to calculating the mask.
  4497. Returns
  4498. -------
  4499. detection_mask : ndarray
  4500. The boolean array gas detection mask.
  4501. '''
  4502. from scipy import ndimage
  4503. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4504. #from meanclip import meanclip
  4505. #force comparison to default to 2.
  4506. if comparison_type not in [-1, 0, 1, 2]: comparison_type = 2
  4507. erosions = int(erosions)
  4508. dilations = int(dilations)
  4509. struct = ones((3,3), 'bool')
  4510. (Nx,Ny,Nw) = dcb.shape
  4511. diff_img = zeros((Nx,Ny))
  4512. for c in pos_channels:
  4513. diff_img += dcb[:,:,c].reshape(Nx, Ny)
  4514. diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
  4515. for c in neg_channels:
  4516. diff_img -= dcb[:,:,c].reshape(Nx, Ny)
  4517. diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
  4518. if (smooth > 0):
  4519. diff_img = ndimage.uniform_filter(diff_img, smooth)
  4520. supermask= zeros((Nx,Ny), 'bool')
  4521. if saturation_mask != None:
  4522. supermask[saturation_mask] = 1
  4523. if parallax_mask != None:
  4524. supermask[parallax_mask] = 1
  4525. diff_img_ma = ma.array(diff_img, mask = supermask)
  4526. if (abs_thresh == None) and (dynamic_thresh == None):
  4527. (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
  4528. #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
  4529. imgthresh = thresh * stdval
  4530. if (imgthresh < threshmin): imgthresh = threshmin
  4531. if comparison_type == 2:
  4532. spectral_mask = abs(diff_img_ma- centerval) > imgthresh
  4533. elif comparison_type == 1:
  4534. spectral_mask = (diff_img_ma- centerval) > imgthresh
  4535. elif comparison_type == 0:
  4536. spectral_mask = abs(diff_img_ma- centerval) < imgthresh
  4537. else:
  4538. spectral_mask = (diff_img_ma- centerval) < (-1*imgthresh)
  4539. elif (abs_thresh == None) and (dynamic_thresh !=None):
  4540. (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
  4541. imgthresh = dynamic_thresh
  4542. if (imgthresh < threshmin): imgthresh = threshmin
  4543. if comparison_type == 2:
  4544. spectral_mask = abs(diff_img_ma- centerval) > imgthresh
  4545. elif comparison_type == 1:
  4546. spectral_mask = (diff_img_ma- centerval) > imgthresh
  4547. elif comparison_type == 0:
  4548. spectral_mask = abs(diff_img_ma- centerval) < imgthresh
  4549. else:
  4550. spectral_mask = (diff_img_ma- centerval) < (-1* imgthresh)
  4551. else:
  4552. (centerval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
  4553. imgthresh = abs_thresh
  4554. if comparison_type == 2:
  4555. spectral_mask = abs(diff_img_ma- centerval) > imgthresh
  4556. elif comparison_type == 1:
  4557. spectral_mask = (diff_img_ma- centerval) > imgthresh
  4558. elif comparison_type == 0:
  4559. spectral_mask = abs(diff_img_ma- centerval) < imgthresh
  4560. else:
  4561. spectral_mask = (diff_img_ma- centerval) < (-1* imgthresh)
  4562. if debug:
  4563. import matplotlib.pyplot as plt
  4564. if (parallax_mask != None):
  4565. plt.figure()
  4566. plt.imshow(255*parallax_mask)
  4567. plt.title('Parallax mask')
  4568. plt.figure()
  4569. plt.imshow(diff_img)
  4570. if (abs_thresh == None):
  4571. (meanval, stdval) = (nanmean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
  4572. maxval = nanmax(ravel(diff_img))
  4573. plt.title('spectral diff image, mean=%f, std=%f, max=%f, thresh=%f' % (meanval, stdval, maxval, imgthresh), fontsize=10)
  4574. plt.figure()
  4575. plt.imshow(255*detection_mask)
  4576. plt.title('gas detection mask - before morphology')
  4577. plt.figure()
  4578. vec = array(diff_img.flatten())
  4579. meanval = nanmean(vec)
  4580. medianval = nanmedian(vec)
  4581. stdval = nanstd(vec)
  4582. minval = nanmin(vec)
  4583. maxval = nanmax(vec)
  4584. diff_img[isnan(diff_img)] = 0.0 ## put pixel values back now that stats are done
  4585. vec = array(diff_img.flatten())
  4586. (n, bin_edges) = histogram(log10(vec), 40)
  4587. ax = plt.subplot(111)
  4588. (counts, edges, patches) = ax.hist(vec, 40, zorder=4, edgecolor='0.5', log=True)
  4589. #statstr = 'mean='+('%f' % meanval) + '\nstd='+('%f' % stdval)
  4590. statstr = ('median=%f\nstd=%f' % (medianval, stdval))
  4591. ax.text(0.8, 0.8, statstr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), zorder=5)
  4592. ax.axis([amin(edges), amax(edges), 0.5, amax(counts)*1.1])
  4593. if (erosions > 0):
  4594. spectral_mask = binary_erosion(spectral_mask, structure=struct, iterations=erosions)
  4595. if debug:
  4596. plt.figure()
  4597. plt.imshow(255*spectral_mask)
  4598. plt.title('IR detection_mask - after erosion')
  4599. if (dilations > 0):
  4600. spectral_mask = binary_dilation(spectral_mask, structure=struct, iterations=dilations)
  4601. if debug:
  4602. plt.figure()
  4603. plt.imshow(255*spectral_mask)
  4604. plt.title('IR detection_mask - after dilation')
  4605. #plt.show()
  4606. spectral_mask[supermask] = 0
  4607. return(spectral_mask)
  4608. ## =================================================================================================
  4609. def get_camdiff_detection_mask(dcb, ref_dcb, pos_channels, neg_channels, thresh=2.5, erosions=0, dilations=0, smooth=0, \
  4610. saturation_mask=None, parallax_mask=None, ir_motion_mask=None, vis_motion_mask=None, debug=False):
  4611. '''
  4612. Calculate the gas detection mask from the multiplex datacube.
  4613. Parameters
  4614. ----------
  4615. dcb : ndarray
  4616. The (Nx,Ny,Nw) datacube from the measurement frame.
  4617. ref_dcb : ndarray
  4618. The (Nx,Ny,Nw) datacube from the reference frame.
  4619. camera_pair : (tuple of two ints)
  4620. The cameras used to generate the spectral difference image.
  4621. thresh : float
  4622. The threshold value above which to say a given pixel belongs to the parallax mask.
  4623. erosions : int
  4624. The number of morphological erosion steps to use.
  4625. dilations : int
  4626. The number of morphological dilation steps to use.
  4627. smooth : int
  4628. The size of the smoothing kernel (0 = no smoothing).
  4629. Returns
  4630. -------
  4631. detection_mask : ndarray
  4632. The boolean array gas detection mask.
  4633. '''
  4634. from scipy import ndimage
  4635. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4636. import numpy.ma as ma
  4637. #from meanclip import meanclip
  4638. erosions = int(erosions)
  4639. dilations = int(dilations)
  4640. struct = ones((3, 3), dtype=bool)
  4641. (Nx,Ny,Nw) = dcb.shape
  4642. diff_img = zeros((Nx,Ny))
  4643. for c in pos_channels:
  4644. diff_img += dcb[:,:,c].reshape(Nx, Ny)
  4645. diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
  4646. for c in neg_channels:
  4647. diff_img -= dcb[:,:,c].reshape(Nx, Ny)
  4648. diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
  4649. if (smooth > 0):
  4650. diff_img = ndimage.uniform_filter(diff_img, smooth)
  4651. #(centerval, stdval) = meanclip(diff_img, clipsig=2.0)
  4652. (centerval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
  4653. #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
  4654. detection_mask = (abs(diff_img - centerval) > (thresh * stdval))
  4655. if debug:
  4656. import matplotlib.pyplot as plt
  4657. plt.figure()
  4658. plt.imshow(diff_img)
  4659. plt.title('spectral diff image meas(cam0 - cam1) - diff(cam0 - cam1)')
  4660. plt.figure()
  4661. plt.imshow(255*detection_mask)
  4662. plt.title('gas detection mask - before morphology')
  4663. if (erosions > 0):
  4664. detection_mask = binary_erosion(detection_mask, structure=struct, iterations=erosions)
  4665. if debug:
  4666. plt.figure()
  4667. plt.imshow(255*detection_mask)
  4668. plt.title('IR detection_mask - after erosion')
  4669. if (dilations > 0):
  4670. detection_mask = binary_dilation(detection_mask, structure=struct, iterations=dilations)
  4671. if debug:
  4672. plt.figure()
  4673. plt.imshow(255*detection_mask)
  4674. plt.title('IR detection_mask - after dilation')
  4675. #plt.show()
  4676. return(detection_mask)
  4677. ## =================================================================================================
  4678. def get_spectral_detection_mask(dcb, ref_dcb, pos_channels, neg_channels, abs_thresh=None, dynamic_thresh=None, thresh=2.5,
  4679. threshmin=0.0, erosions=0, dilations=0, parallax_mask=None,
  4680. ir_motion_mask=None, ir_spectral_mask =None, saturation_mask = None, smooth=0, debug=False):
  4681. '''
  4682. Calculate the gas detection mask from the multiplex datacube.
  4683. Parameters
  4684. ----------
  4685. dcb : ndarray
  4686. The (Nx,Ny,Nw) datacube from the measurement frame.
  4687. ref_dcb : ndarray
  4688. The (Nx,Ny,Nw) datacube from the reference frame.
  4689. pos_channels : list
  4690. ???
  4691. neg_channels : list
  4692. ???
  4693. abs_thresh : float
  4694. ???
  4695. thresh : float
  4696. The threshold value above which to say a given pixel belongs to the parallax mask.
  4697. threshmin : float
  4698. ???
  4699. dynamic_thresh: float
  4700. The threshold value above which to say a given pixel belogns to the vismotion mask. This is a set value. \
  4701. calculated from the average of 30 frames standard deviation, calculated as distance from 0.
  4702. erosions : int
  4703. The number of morphological erosion steps to use.
  4704. parallax_mask : ndarray
  4705. ???
  4706. ir_motion_mask : ndarray
  4707. ???
  4708. ir_spectral_mask : ndarray
  4709. ???
  4710. dilations : int
  4711. The number of morphological dilation steps to use.
  4712. smooth : int
  4713. The size of the smoothing kernel (0 = no smoothing).
  4714. Returns
  4715. -------
  4716. detection_mask : ndarray
  4717. The boolean array gas detection mask.
  4718. '''
  4719. from scipy import ndimage
  4720. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4721. import numpy.ma as ma
  4722. #from meanclip import meanclip
  4723. erosions = int(erosions)
  4724. dilations = int(dilations)
  4725. struct = ones((3,3), 'bool')
  4726. (Nx,Ny,Nw) = dcb.shape
  4727. diff_img = zeros((Nx,Ny))
  4728. supermask= zeros((Nx,Ny), 'bool')
  4729. for c in pos_channels:
  4730. diff_img += dcb[:,:,c].reshape(Nx, Ny)
  4731. diff_img -= ref_dcb[:,:,c].reshape(Nx, Ny)
  4732. for c in neg_channels:
  4733. diff_img -= dcb[:,:,c].reshape(Nx, Ny)
  4734. diff_img += ref_dcb[:,:,c].reshape(Nx, Ny)
  4735. if smooth:
  4736. diff_img = ndimage.uniform_filter(diff_img, smooth)
  4737. #handle masks
  4738. if (ir_motion_mask != None):
  4739. supermask[ir_motion_mask] = 1
  4740. if (ir_spectral_mask != None):
  4741. supermask[ir_spectral_mask] = 1
  4742. if (saturation_mask != None):
  4743. supermask[saturation_mask] = 1
  4744. if (parallax_mask != None):
  4745. supermask[parallax_mask] = 1
  4746. diff_img_ma = ma.array(diff_img, mask = supermask)
  4747. (centerval, stdval) = (mean(ravel(diff_img_ma)), nanstd(ravel(diff_img_ma)))
  4748. if (abs_thresh == None) and (dynamic_thresh == None):
  4749. #(centerval, stdval) = (nanmedian(ravel(diff_img)), nanstd(ravel(diff_img)))
  4750. imgthresh = thresh * stdval
  4751. if (imgthresh < threshmin): imgthresh = threshmin
  4752. detection_mask = abs(diff_img_ma - centerval) > imgthresh
  4753. elif (dynamic_thresh !=None):
  4754. imgthresh = dynamic_thresh
  4755. if (imgthresh < threshmin): imgthresh = threshmin
  4756. detection_mask = abs(diff_img_ma) > imgthresh
  4757. else:
  4758. imgthresh = abs_thresh
  4759. detection_mask = abs(diff_img_ma) > imgthresh
  4760. if debug:
  4761. import matplotlib.pyplot as plt
  4762. if (ir_motion_mask != None):
  4763. plt.figure()
  4764. plt.imshow(255*ir_motion_mask)
  4765. plt.title('IR motion mask')
  4766. if (parallax_mask != None):
  4767. plt.figure()
  4768. plt.imshow(255*parallax_mask)
  4769. plt.title('Parallax mask')
  4770. plt.figure()
  4771. plt.imshow(diff_img)
  4772. if (abs_thresh == None):
  4773. (meanval, stdval) = (nanmean(ravel(diff_img)), nanstd(ravel(diff_img)))
  4774. maxval = nanmax(ravel(diff_img))
  4775. plt.title('spectral diff image, mean=%f, std=%f, max=%f, thresh=%f' % (meanval, stdval, maxval, imgthresh), fontsize=10)
  4776. plt.figure()
  4777. plt.imshow(255*detection_mask)
  4778. plt.title('gas detection mask - before morphology')
  4779. plt.figure()
  4780. vec = array(diff_img.flatten())
  4781. meanval = nanmean(vec)
  4782. medianval = nanmedian(vec)
  4783. stdval = nanstd(vec)
  4784. minval = nanmin(vec)
  4785. maxval = nanmax(vec)
  4786. diff_img[isnan(diff_img)] = 0.0 ## put pixel values back now that stats are done
  4787. vec = array(diff_img.flatten())
  4788. (n, bin_edges) = histogram(log10(vec), 40)
  4789. ax = plt.subplot(111)
  4790. (counts, edges, patches) = ax.hist(vec, 40, zorder=4, edgecolor='0.5', log=True)
  4791. #statstr = 'mean='+('%f' % meanval) + '\nstd='+('%f' % stdval)
  4792. statstr = ('median=%f\nstd=%f' % (medianval, stdval))
  4793. ax.text(0.8, 0.8, statstr, horizontalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.5), zorder=5)
  4794. ax.axis([amin(edges), amax(edges), 0.5, amax(counts)*1.1])
  4795. if (erosions > 0):
  4796. detection_mask = binary_erosion(detection_mask, structure=struct, iterations=erosions)
  4797. if debug:
  4798. plt.figure()
  4799. plt.imshow(255*detection_mask)
  4800. plt.title('IR detection_mask - after erosion')
  4801. if (dilations > 0):
  4802. detection_mask = binary_dilation(detection_mask, structure=struct, iterations=dilations)
  4803. if debug:
  4804. plt.figure()
  4805. plt.imshow(255*detection_mask)
  4806. plt.title('IR detection_mask - after dilation')
  4807. #plt.show()
  4808. detection_mask[supermask] = 0
  4809. return(detection_mask)
  4810. ## =================================================================================================
  4811. def get_alarm_mask(detection_mask, ir_motion_mask=None, vis_motion_mask=None, parallax_mask=None, ir_steam_mask=None,
  4812. vis_steam_mask=None, erosions=0, dilations=0, debug=False):
  4813. '''
  4814. Calculate the alarm mask.
  4815. Parameters
  4816. ----------
  4817. detection_mask : (bool ndarray)
  4818. An (Nx,Ny) binary image 0=gas absent, 1=gas present.
  4819. ir_motion_mask : (bool ndarray)
  4820. An (Nx,Ny) binary image 0=motion absent, 1=motion present.
  4821. vis_motion_mask : (bool ndarray)
  4822. An (Nx,Ny) binary image 0=motion absent, 1=motion present.
  4823. parallax_mask : (bool ndarray)
  4824. An (Nx,Ny) binary image 0=parallax absent, 1=parallax present.
  4825. ir_steam_mask : (bool ndarray)
  4826. An (Nx,Ny) binary image 0=steam absent, 1=steam present.
  4827. vis_steam_mask : (bool ndarray)
  4828. An (Nx,Ny) binary image 0=steam absent, 1=steam present.
  4829. erosions : int
  4830. The number of morphological erosion steps to use.
  4831. dilations : int
  4832. The number of morphological dilation steps to use.
  4833. Returns
  4834. -------
  4835. detection_mask : ndarray
  4836. The boolean array gas detection mask.
  4837. Notes
  4838. -----
  4839. Do we want to change the definitions of the input masks so that they correspond to the DAISI definitions?
  4840. (That is, should we use their logical inverse from the definitions given above?)
  4841. '''
  4842. from scipy.ndimage.morphology import binary_erosion, binary_dilation
  4843. struct = ones((3,3), 'bool')
  4844. mask = ones_like(detection_mask)
  4845. if (ir_motion_mask != None):
  4846. mask = mask * logical_not(ir_motion_mask)
  4847. if (vis_motion_mask != None):
  4848. mask = mask * logical_not(vis_motion_mask)
  4849. if (parallax_mask != None):
  4850. mask = mask * logical_not(parallax_mask)
  4851. if (ir_steam_mask != None):
  4852. mask = mask * logical_not(ir_steam_mask)
  4853. if (vis_steam_mask != None):
  4854. mask = mask * logical_not(vis_steam_mask)
  4855. alarm_mask = detection_mask * mask
  4856. if debug:
  4857. import matplotlib.pyplot as plt
  4858. plt.figure()
  4859. plt.imshow(255*alarm_mask)
  4860. plt.title('Alarm mask - before morphology')
  4861. if (erosions > 0):
  4862. alarm_mask = binary_erosion(alarm_mask, structure=struct, iterations=erosions)
  4863. if debug:
  4864. plt.figure()
  4865. plt.imshow(255*alarm_mask)
  4866. plt.title('Alarm mask - after erosion')
  4867. if (dilations > 0):
  4868. alarm_mask = binary_dilation(alarm_mask, structure=struct, iterations=dilations)
  4869. if debug:
  4870. plt.figure()
  4871. plt.imshow(255*alarm_mask)
  4872. plt.title('Alarm mask - after dilation')
  4873. #plt.show()
  4874. return(alarm_mask)
  4875. ## =================================================================================================
  4876. def set_saturated_to_nan(dcb_input, ceiling_value, cameras=None):
  4877. '''
  4878. Set any saturated pixels (as determined by the ceiling value) to NaN.
  4879. Parameters
  4880. ----------
  4881. dcb_input : ndarray
  4882. The datacube for which we want to detect saturation and perform pixel replacement.
  4883. ceiling_value : float
  4884. The value above which we want to say the pixels are saturated.
  4885. Returns
  4886. -------
  4887. dcb : ndarray
  4888. The datacube with saturated pixels replaced with NaNs.
  4889. '''
  4890. dcb = array(dcb_input)
  4891. ceiling_value = float(ceiling_value)
  4892. if (cameras != None): ## apply to only a list of the camera numbers
  4893. for w in cameras:
  4894. img = dcb[:,:,w]
  4895. img[img > ceiling_value] = nan
  4896. dcb[:,:,w] = img
  4897. else:
  4898. dcb[dcb > ceiling_value] = nan
  4899. return(dcb)
  4900. ## =================================================================================================
  4901. def get_downsampled_gas_spectrum(gasname, waves_um=None, serial_number=2, normtype='channel_width', debug=False):
  4902. '''
  4903. Get a gas spectrum sampled onto the GCI's spectral channels.
  4904. Parameters
  4905. ----------
  4906. gasname : str
  4907. The name of the gas whose spectrum you want. This name informs what filename in the repository \
  4908. to look for: "cal/gas_spectra/gasname.jdx".
  4909. waves_um : ndarray, optional
  4910. The wavelengths at which to sample the spectrum prior to calculate the downsampling integral.
  4911. serial_number : int
  4912. The serial number of the GCI system to query.
  4913. normtype : string ['L2','max=1','channel_width']
  4914. What type of normalization to apply to the spectrum. With normtype='L2', the resulting vector will \
  4915. have unit normtype (i.e. ideal for cross-correlation measurement). With normtype='max=1', it might \
  4916. be ideal for display. With normtype='channel_width', the downsampled spectrum values should be
  4917. scaled to the values given by the input continuous spectrum (likely ideal for display).
  4918. Example
  4919. -------
  4920. (methane_spectrum, channel_boundaries) = get_downsampled_gas_spectrum('methane', serial_number=2, debug=False)
  4921. '''
  4922. from numpy.linalg import norm
  4923. filterlist = get_filterlist(serial_number=serial_number)
  4924. filter_data = get_filter_spectra(waves_um, filterlist=filterlist)
  4925. waves = filter_data['waves']
  4926. dw = mean(gradient(waves))
  4927. ## The next step is to transform the measurement cube to a spectral cube.
  4928. (H, channel_basisfcns, edgewaves, channel_boundaries) = calculate_hmatrix(filterlist, filter_data, return_data_structure=True)
  4929. nchannels = alen(channel_boundaries) - 1
  4930. gas_data = get_gas_spectra(waves=waves, gaslist=[gasname], skip_nonquant=False, debug=debug)
  4931. gas_fullspectrum = gas_data[gasname]
  4932. downspectrum = zeros(nchannels)
  4933. for n in arange(nchannels):
  4934. integral = sum(gas_fullspectrum * channel_basisfcns[:,n] * dw)
  4935. if (normtype == 'channel_width'):
  4936. normalizer = sum(channel_basisfcns[:,n] * dw)
  4937. else:
  4938. normalizer = 1.0
  4939. downspectrum[n] = integral / normalizer
  4940. if (normtype == 'max=1'):
  4941. downspectrum = downspectrum / amax(downspectrum)
  4942. elif (normtype == 'L2'):
  4943. downspectrum = downspectrum / norm(downspectrum)
  4944. return(downspectrum, channel_boundaries, gas_fullspectrum, waves)
  4945. ## =================================================================================================
  4946. def gci_file_ismotioncalib(filename, debug=False):
  4947. '''
  4948. Read in a GCI datafile's metadata contents.
  4949. Parameters
  4950. ----------
  4951. filename : str
  4952. The name of the `*.gci` file to read.
  4953. Returns
  4954. -------
  4955. isbusy : (boolean or list of boolean plus 2-tuple)
  4956. "isbusy" = Whether the current cube is in-motion or in-calib. \
  4957. "pantilt_setpoint" = 2-tuple of the pan-tilt setpoint coordinates.
  4958. Example
  4959. -------
  4960. ## Search for a reference frame by seeing the switch not_busy -> isbusy.
  4961. dir = '/home/user/DATA/'
  4962. files = sorted(glob(dir+'*.gci'))
  4963. wasbusy = False
  4964. for f in files:
  4965. isbusy = gci_file_ismotioncalib(f, debug=False)
  4966. if wasbusy and not isbusy:
  4967. ref_file = f
  4968. break
  4969. wasbusy = isbusy
  4970. '''
  4971. import struct
  4972. assert isinstance(filename, str), 'The input "filename" must be a string type.'
  4973. assert os.path.exists(filename), 'The input filename ("' + filename + '") does not exist.'
  4974. f = open(filename, 'rb')
  4975. raw = f.read()
  4976. f.close()
  4977. total_nbytes = sys.getsizeof(raw)
  4978. if debug: print('total_nbytes=', total_nbytes)
  4979. n = 0
  4980. msg = []
  4981. nparts = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  4982. if debug: print('nparts=', nparts)
  4983. n += 4
  4984. ## The first message part is the string label giving the ZMQ subscription topic.
  4985. strlen = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  4986. if debug: print('strlen=', strlen)
  4987. n += 4
  4988. topic = ''.join(struct.unpack('>'+str(strlen)+'c', raw[n:n+strlen]))
  4989. msg.append(topic)
  4990. if debug: print('subscription topic=', topic)
  4991. n += strlen
  4992. ## Loop through the subsequent parts, defining each part of the msg list.
  4993. msg_nbytes = uint32(struct.unpack('>I', raw[n:n+4]))[0]
  4994. if debug: print('msg_nbytes=', msg_nbytes)
  4995. n += 4
  4996. msg.append(raw[n:n+msg_nbytes])
  4997. isbusy = gci_msg_ismotioncalib(msg, debug=debug)
  4998. return(isbusy)
  4999. ## =================================================================================================
  5000. def gci_msg_ismotioncalib(msg, give_pantilt_setpoint=False, debug=False):
  5001. '''
  5002. Return True/False about whether the current msg indicates the system is in-motion or in-calib.
  5003. If one cube is in-motion or in-calib, while the next cube is not, it indicates that the second
  5004. cube is a reference cube.
  5005. Parameters
  5006. ----------
  5007. msg : list of str
  5008. The message delivered by the GCI's zmq network interface, or through the contents of a \
  5009. `*.gci` file.
  5010. give_pantilt_setpoint : bool, optional
  5011. Whether or not to include the pan-tilt setpoint values in the return.
  5012. Returns
  5013. -------
  5014. isbusy : (boolean or list of boolean plus 2-tuple)
  5015. "isbusy" = Whether the current cube is in-motion or in-calib. \
  5016. "pantilt_setpoint" = 2-tuple of the pan-tilt setpoint coordinates.
  5017. '''
  5018. assert isinstance(msg, (list, tuple, ndarray))
  5019. topic = msg[0]
  5020. if (topic != '') and debug: print('topic = ' + topic)
  5021. d = json.loads(msg[1]) ## the "payload" dictionary
  5022. inmotion = d['gci-meta']['metadata']['pan-tilt-in-motion']
  5023. incalib = (d['gci-meta']['metadata']['calibration-state'].lower() != 'calibration-idle')
  5024. isbusy = (inmotion or incalib)
  5025. if give_pantilt_setpoint:
  5026. pantilt_setpoint = float32(d['gci-meta']['metadata']['pan-tilt-setpoint'])
  5027. return(isbusy, pantilt_setpoint)
  5028. else:
  5029. return(isbusy)
  5030. ## =================================================================================================
  5031. def to_json(python_object):
  5032. '''
  5033. Allows Numpy arrays to be JSON-seralizable.
  5034. Parameters
  5035. ----------
  5036. python_object : any
  5037. The object to be serialized.
  5038. Returns
  5039. -------
  5040. d : dict
  5041. A Python dictionary describing the object structure and contents.
  5042. Example
  5043. -------
  5044. print(json.dumps(array(data), default=gci.to_json))
  5045. '''
  5046. if isinstance(python_object, ndarray):
  5047. d = {'__class__':'ndarray', '__value__':python_object.tolist()}
  5048. return(d)
  5049. raise TypeError(repr(python_object) + ' is not JSON serializable')
  5050. ## =================================================================================================
  5051. def serial_number_from_filterlist(filterlist, debug=False):
  5052. '''
  5053. Calculate the serial number of the system given knowledge of the filterlist.
  5054. Parameters
  5055. ----------
  5056. filterlist : list of str
  5057. The list of filter present on the system, given in order of the cameras present.
  5058. Returns
  5059. -------
  5060. sn : int
  5061. The serial number of the system.
  5062. '''
  5063. if ('SG_LP8110_06Aug12_0' in filterlist):
  5064. sn = 3
  5065. elif ('NOC_SP11578-000278_02Jan13_0' in filterlist):
  5066. sn = 2
  5067. else:
  5068. raise ValueError('The filterlist does not contain a known library of filters.')
  5069. if debug: print('GCI serial number = ' + str(sn))
  5070. return(sn)
  5071. ## =================================================================================================
  5072. def send_keepalive_command(topic, debug=False):
  5073. '''
  5074. Send a command to the detection algorithm process to emit detection result messages on a given topic.
  5075. Parameters
  5076. ----------
  5077. topic : str
  5078. The topic you want to listen for.
  5079. '''
  5080. ctx = zmq.Context()
  5081. pub = ctx.socket(zmq.PUB)
  5082. pub.connect('tcp://10.1.10.11:9550')
  5083. ## Build the msg
  5084. msg = list()
  5085. msg.append(topic)
  5086. msg.append('JSON_ASYNC_REQ')
  5087. msg.append(json.dumps({'cmd-name':'keep-alive'})) ## payload
  5088. msg.append(json.dumps({'return_addr':'script-keep-alive'})) ## return info
  5089. if debug: print('Sending keep alive command:', msg)
  5090. pub.send_multipart(msg)
  5091. pub.close()
  5092. return
  5093. ## =================================================================================================
  5094. def send_trigger_cal_command(use_1pt_or_2pt, verbose=False):
  5095. '''
  5096. Send an ASYNC command to the local network to perform a 2pt calibration.
  5097. This is a temporary thing until the "send_command" function is back up.
  5098. Parameters
  5099. ----------
  5100. use_1pt_or_2pt : int, (1,2)
  5101. Whether to request a 1pt calibration or a 2pt calibration.
  5102. '''
  5103. use_1pt_or_2pt = int(use_1pt_or_2pt)
  5104. if (use_1pt_or_2pt == 1):
  5105. sub_topic = 'jar-trigger-1pt'
  5106. elif (use_1pt_or_2pt == 2):
  5107. sub_topic = 'jar-trigger-2pt'
  5108. ctx = zmq.Context()
  5109. pub = ctx.socket( zmq.PUB )
  5110. pub.connect('tcp://10.1.10.11:9550')
  5111. topic = 'calibration-control'
  5112. marker = 'JSON_ASYNC_REQ'
  5113. ret_info = dict()
  5114. ret_info['return_addr'] = sub_topic
  5115. payload = dict()
  5116. payload['cmd-name'] = 'trigger_2pt_calibration'
  5117. request = [topic, marker, json.dumps(payload), json.dumps(ret_info)]
  5118. if verbose:
  5119. print('Sending ' + str(use_1pt_or_2pt) + 'pt calibration command:')
  5120. print(request)
  5121. pub.send_multipart(request)
  5122. pub.close()
  5123. return
  5124. ## =================================================================================================
  5125. ## =================================================================================================
  5126. if __name__ == "__main__":
  5127. print('GCIDIR = ' + GCIDIR)
  5128. print('NOTHING TO DO!')