stsci.tools-3.4.12/0000755001120100020070000000000013241171572015570 5ustar jhunkSTSCI\science00000000000000stsci.tools-3.4.12/lib/0000755001120100020070000000000013241171572016336 5ustar jhunkSTSCI\science00000000000000stsci.tools-3.4.12/lib/stsci/0000755001120100020070000000000013241171572017463 5ustar jhunkSTSCI\science00000000000000stsci.tools-3.4.12/lib/stsci/__init__.py0000644001120100020070000000073413006721301021566 0ustar jhunkSTSCI\science00000000000000try: # As long as we're using setuptools/distribute, we need to do this the # setuptools way or else pkg_resources will throw up unncessary and # annoying warnings (even though the namespace mechanism will still # otherwise work without it). # Get rid of this as soon as setuptools/distribute is dead. __import__('pkg_resources').declare_namespace(__name__) except ImportError: pass __path__ = __import__('pkgutil').extend_path(__path__, __name__) stsci.tools-3.4.12/lib/stsci/tools/0000755001120100020070000000000013241171572020623 5ustar jhunkSTSCI\science00000000000000stsci.tools-3.4.12/lib/stsci/tools/__init__.py0000644001120100020070000000015113241163620022724 0ustar jhunkSTSCI\science00000000000000from __future__ import division # confidence high from .version import * __vdate__ = __version_date__ stsci.tools-3.4.12/lib/stsci/tools/alert.py0000644001120100020070000000632713006721301022302 0ustar jhunkSTSCI\science00000000000000#### # Class AlertDialog # # Purpose # ------- # # AlertDialog's are widgets that allow one to pop up warnings, one line # questions etc. They return a set of standard action numbers being :- # 0 => Cancel was pressed # 1 => Yes was pressed # 2 => No was pressed # # Standard Usage # -------------- # # F = AlertDialog(widget, message) # action = F.Show() #### """ $Id$ """ from __future__ import absolute_import, division # confidence high from .dialog import * class AlertDialog(ModalDialog): def __init__(self, widget, msg): self.widget = widget self.msgString = msg Dialog.__init__(self, widget) def SetupDialog(self): upperFrame = Frame(self.top) upperFrame['relief'] = 'raised' upperFrame['bd'] = 1 upperFrame.pack({'expand':'yes', 'side':'top', 'fill':'both' }) self.bitmap = Label(upperFrame) self.bitmap.pack({'side':'left'}) msgList = self.msgString.split("\n") for i in range(len(msgList)): msgText = Label(upperFrame) msgText["text"] = msgList[i] msgText.pack({'expand':'yes', 'side':'top', 'anchor':'nw', 'fill':'x' }) self.lowerFrame = Frame(self.top) self.lowerFrame['relief'] = 'raised' self.lowerFrame['bd'] = 1 self.lowerFrame.pack({'expand':'yes', 'side':'top', 'pady':'2', 'fill':'both' }) def OkPressed(self): self.TerminateDialog(1) def CancelPressed(self): self.TerminateDialog(0) def NoPressed(self): self.TerminateDialog(2) def CreateButton(self, text, command): self.button = Button(self.lowerFrame) self.button["text"] = text self.button["command"] = command self.button.pack({'expand':'yes', 'pady':'2', 'side':'left'}) #### # Class ErrorDialog # # Purpose # ------- # # To pop up an error message #### class ErrorDialog(AlertDialog): def SetupDialog(self): AlertDialog.SetupDialog(self) self.bitmap['bitmap'] = 'error' self.CreateButton("OK", self.OkPressed) #### # Class WarningDialog # # Purpose # ------- # # To pop up a warning message. #### class WarningDialog(AlertDialog): def SetupDialog(self): AlertDialog.SetupDialog(self) self.bitmap['bitmap'] = 'warning' self.CreateButton("Yes", self.OkPressed) self.CreateButton("No", self.CancelPressed) #### # Class QuestionDialog # # Purpose # ------- # # To pop up a simple question #### class QuestionDialog(AlertDialog): def SetupDialog(self): AlertDialog.SetupDialog(self) self.bitmap['bitmap'] = 'question' self.CreateButton("Yes", self.OkPressed) self.CreateButton("No", self.NoPressed) self.CreateButton("Cancel", self.CancelPressed) #### # Class MessageDialog # # Purpose # ------- # # To pop up a message. #### class MessageDialog(AlertDialog): def SetupDialog(self): AlertDialog.SetupDialog(self) self.bitmap['bitmap'] = 'warning' self.CreateButton("Dismiss", self.CancelPressed) stsci.tools-3.4.12/lib/stsci/tools/asnutil.py0000644001120100020070000006431613112074216022660 0ustar jhunkSTSCI\science00000000000000""" A module which provides utilities for reading, writing, creating and updating association tables and shift files. :author: Warren Hack, Nadia Dencheva :version: '0.1 (2008-01-03)' """ from __future__ import absolute_import, division, print_function # confidence high from . import fileutil as fu from . import wcsutil import astropy from astropy.io import fits import numpy as N import os.path, time from distutils.version import LooseVersion ASTROPY_VER_GE13 = LooseVersion(astropy.__version__) >= LooseVersion('1.3') __version__ = '0.2(2015-06-23)' def readASNTable(fname, output=None, prodonly=False): """ Given a fits filename repesenting an association table reads in the table as a dictionary which can be used by pydrizzle and multidrizzle. An association table is a FITS binary table with 2 required columns: 'MEMNAME', 'MEMTYPE'. It checks 'MEMPRSNT' column and removes all files for which its value is 'no'. Parameters ---------- fname : str name of association table output : str name of output product - if not specified by the user, the first PROD-DTH name is used if present, if not, the first PROD-RPT name is used if present, if not, the rootname of the input association table is used. prodonly : bool what files should be considered as input if True - select only MEMTYPE=PROD* as input if False - select only MEMTYPE=EXP as input Returns ------- asndict : dict A dictionary-like object with all the association information. Examples -------- An association table can be read from a file using the following commands:: >>> from stsci.tools import asnutil >>> asntab = asnutil.readASNTable('j8bt06010_shifts_asn.fits', prodonly=False) The `asntab` object can now be passed to other code to provide relationships between input and output images defined by the association table. """ try: f = fits.open(fu.osfn(fname)) except: raise IOError("Can't open file %s\n" % fname) colnames = f[1].data.names try: colunits = f[1].data.units except AttributeError: pass hdr = f[0].header if 'MEMNAME' not in colnames or 'MEMTYPE' not in colnames: msg = 'Association table incomplete: required column(s) MEMNAME/MEMTYPE NOT found!' raise ValueError(msg) d = {} for n in colnames: d[n]=f[1].data.field(n) f.close() valid_input = d['MEMPRSNT'].copy() memtype = d['MEMTYPE'].copy() prod_dth = (memtype.find('PROD-DTH')==0).nonzero()[0] prod_rpt = (memtype.find('PROD-RPT')==0).nonzero()[0] prod_crj = (memtype.find('PROD-CRJ')==0).nonzero()[0] # set output name if output is None: if prod_dth: output = d['MEMNAME'][prod_dth[0]] elif prod_rpt: output = d['MEMNAME'][prod_rpt[0]] elif prod_crj: output = d['MEMNAME'][prod_crj[0]] else: output = fname.split('_')[0] if prodonly: input = d['MEMTYPE'].find('PROD')==0 if prod_dth: input[prod_dth] = False else: input = (d['MEMTYPE'].find('EXP')==0) valid_input *= input for k in d: d[k] = d[k][valid_input] infiles = list(d['MEMNAME'].lower()) if not infiles: print("No valid input specified") return None if ('XOFFSET' in colnames and d['XOFFSET'].any()) or ('YOFFSET' in colnames and d['YOFFSET'].any()): abshift = True dshift = False try: units=colunits[colnames.index('XOFFSET')] except: units='pixels' xshifts = list(d['XOFFSET']) yshifts = list(d['YOFFSET']) elif ('XDELTA' in colnames and d['XDELTA'].any()) or ('YDELTA' in colnames and d['YDELTA'].any()): abshift = False dshift = True try: units=colunits[colnames.index('XDELTA')] except: units='pixels' xshifts = list(d['XDELTA']) yshifts = list(d['YDELTA']) else: abshift = False dshift = False members = {} if not abshift and not dshift: asndict = ASNTable(infiles,output=output) asndict.create() return asndict else: try: refimage = hdr['refimage'] except KeyError: refimage = None try: frame = hdr['shframe'] except KeyError: frame = 'input' if 'ROTATION' in colnames: rots = list(d['ROTATION']) if 'SCALE' in colnames: scales = list(d['SCALE']) for r in range(len(infiles)): row = r xshift = xshifts[r] yshift = yshifts[r] if rots: rot = rots[r] if scales: scale = scales[r] members[infiles[r]] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift, yshift=yshift, scale=scale, refimage=refimage, shift_frame=frame, shift_units=units) asndict= ASNTable(infiles, output=output) asndict.create() asndict['members'].update(members) return asndict class ASNTable(dict): """ A dictionary like object which represents an association table. An ASNTable object looks like this:: {'members': {'j8bt06nyq': {'abshift': False, 'dshift': True, 'refimage': 'j8bt06010_shifts_asn.fits[wcs]', 'rot': 0.0, 'row': 0, 'scale': 1.0, 'shift_frame': 'input', 'shift_units': 'pixels', 'xoff': 0.0, 'xshift': 0.0, 'yoff': 0.0, 'yshift': 0.0}, 'j8bt06nzq': {'abshift': False, 'dshift': True, 'refimage': 'j8bt06010_shifts_asn.fits[wcs]', 'rot': 359.99829, 'row': 1, 'scale': 1.000165, 'shift_frame': 'input', 'shift_units': 'pixels', 'xoff': 0.0, 'xshift': 0.4091132, 'yoff': 0.0, 'yshift': -0.56702018}}, 'order': ['j8bt06nyq', 'j8bt06nzq'], 'output': 'j8bt06nyq'} Examples -------- Creating an ASNTable object from 3 filenames and a shift file would be done using:: >>> asnt=ASNTable([fname1,fname2, fname3], shiftfile='shifts.txt') The ASNTable object would have the 'members' and 'order' in the association table populated based on `infiles` and `shiftfile`. This creates a blank association table from the ASNTable object:: >>> asnt.create() """ def __init__(self, inlist=None, output=None, shiftfile=None): """ Parameters ---------- inlist : list A list of filenames. output : str A user specified output name or 'final'. shiftfile : str A name of a shift file, if given, the association table will be updated with the values in the shift file. """ if output is None: if len(inlist) == 1: self.output = fu.buildNewRootname(inlist[0]) else: self.output = 'final' else: self.output = fu.buildNewRootname(output) # Ensure that output name does not already contain '_drz' _indx = self.output.find('_drz') if _indx > 0: self.output = self.output[:_indx] self.order = [] if inlist is not None: for fn in inlist: if fu.findFile(fu.buildRootname(fn)): self.order.append(fu.buildNewRootname(fn)) else: # This may mean corrupted asn table in which a file is listed as present # when it is missing. raise IOError('File %s not found.\n' %fn) dict.__init__(self, output=self.output, order=[], members={}) if inlist is not None: self.input = [fu.buildRootname(f) for f in inlist] self.shiftfile = shiftfile def create(self, shiftfile=None): members = {} row = 0 dshift = False abshift = False # Parse out shift file, if provided if shiftfile is not None: sdict = ShiftFile(shiftfile) elif self.shiftfile is not None: sdict = ShiftFile(self.shiftfile) shift_frame = sdict['frame'] shift_units = sdict['units'] refimage = sdict['refimage'] if sdict['form']=='delta': dshift = True else: abshift = True for f in self.input: xshift = sdict[f][0] yshift = sdict[f][1] rot = sdict[f][2] scale = sdict[f][3] #This may not be the right thing to do, may want to keep _flt in rootname # to distinguish between _c0h.fits, _c0f.fits and '.c0h' fname = fu.buildNewRootname(f) members[fname] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift, yshift=yshift, scale=scale, refimage=refimage, shift_frame=shift_frame, shift_units=shift_units) row+=1 else: for f in self.input: # also here fname = fu.buildNewRootname(f) members[fname] = ASNMember(row=row) row+=1 self['members'].update(members) self['order']=self.order def update(self, members=None, shiftfile=None, replace=False): __help_update=""" Update an existing association table. Parameters ---------- members : dict A dictionary representing asndict['members']. shiftfile : str The name of a shift file If given, shiftfile will replace shifts in an asndict. replace : bool False(default) A flag which indicates whether the 'members' item of an association table should be updated or replaced. default: False If True, it's up to the user to replace also asndict['order'] """ if members and isinstance(members, dict): if not replace: self['members'].update(members=members) else: self['members'] = members elif shiftfile: members = {} abshift = False dshift = False row = 0 sdict = ShiftFile(shiftfile) shift_frame = sdict['frame'] shift_units = sdict['units'] refimage = sdict['refimage'] if sdict['form']=='delta': dshift = True else: abshift = True for f in self.order: fullname = fu.buildRootname(f) xshift = sdict[fullname][0] yshift = sdict[fullname][1] rot = sdict[fullname][2] scale = sdict[fullname][3] members[f] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift, yshift=yshift, scale=scale, refimage=refimage, shift_frame=shift_frame, shift_units=shift_units) row+=1 self['members'].update(members) else: #print __help_update pass def write(self, output=None): """ Write association table to a file. """ if not output: outfile = self['output']+'_asn.fits' output = self['output'] else: outfile = output # Delete the file if it exists. if os.path.exists(outfile): warningmsg = "\n#########################################\n" warningmsg += "# #\n" warningmsg += "# WARNING: #\n" warningmsg += "# The existing association table, #\n" warningmsg += " " + str(outfile) + '\n' warningmsg += "# is being replaced. #\n" warningmsg += "# #\n" warningmsg += "#########################################\n\n" fasn = fits.HDUList() # Compute maximum length of MEMNAME for table column definition _maxlen = 0 for _fname in self['order']: if len(_fname) > _maxlen: _maxlen = len(_fname) # Enforce a mimimum size of 24 if _maxlen < 24: _maxlen = 24 namelen_str = str(_maxlen+2)+'A' self.buildPrimary(fasn, output=output) mname = self['order'][:] mname.append(output) mtype = ['EXP-DTH' for l in self['order']] mtype.append('PROD-DTH') mprsn = [True for l in self['order']] mprsn.append(False) xoff = [self['members'][l]['xoff'] for l in self['order']] xoff.append(0.0) yoff = [self['members'][l]['yoff'] for l in self['order']] yoff.append(0.0) xsh = [self['members'][l]['xshift'] for l in self['order']] xsh.append(0.0) ysh = [self['members'][l]['yshift'] for l in self['order']] ysh.append(0.0) rot = [self['members'][l]['rot'] for l in self['order']] rot.append(0.0) scl = [self['members'][l]['scale'] for l in self['order']] scl.append(1.0) memname = fits.Column(name='MEMNAME',format=namelen_str,array=N.char.array(mname)) memtype = fits.Column(name='MEMTYPE',format='14A',array=N.char.array(mtype)) memprsn = fits.Column(name='MEMPRSNT', format='L', array=N.array(mprsn).astype(N.uint8)) xoffset = fits.Column(name='XOFFSET', format='E', array=N.array(xoff)) yoffset = fits.Column(name='YOFFSET', format='E', array=N.array(yoff)) xdelta = fits.Column(name='XDELTA', format='E', array=N.array(xsh)) ydelta = fits.Column(name='YDELTA', format='E', array=N.array(ysh)) rotation = fits.Column(name='ROTATION', format='E', array=N.array(rot)) scale = fits.Column(name='SCALE', format='E', array=N.array(scl)) cols = fits.ColDefs([memname,memtype,memprsn,xoffset,yoffset,xdelta,ydelta,rotation,scale]) hdu = fits.BinTableHDU.from_columns(cols) fasn.append(hdu) if ASTROPY_VER_GE13: fasn.writeto(outfile, overwrite=True) else: fasn.writeto(outfile, clobber=True) fasn.close() mem0 = self['order'][0] refimg = self['members'][mem0]['refimage'] if refimg is not None: whdu = wcsutil.WCSObject(refimg) whdu.createReferenceWCS(outfile,overwrite=False) ftab = fits.open(outfile) ftab['primary'].header['refimage'] = outfile+"[wcs]" ftab.close() del whdu def buildPrimary(self, fasn, output=None): _prihdr = fits.Header([fits.Card('SIMPLE', True, 'Fits standard'), fits.Card('BITPIX ', 16 ,' Bits per pixel'), fits.Card('NAXIS ', 0 ,' Number of axes'), fits.Card('ORIGIN ', 'NOAO-IRAF FITS Image Kernel July 1999' ,'FITS file originator'), fits.Card('IRAF-TLM', '18:26:13 (27/03/2000)' ,' Time of last modification'), fits.Card('EXTEND ', True ,' File may contain standard extensions'), fits.Card('NEXTEND ', 1 ,' Number of standard extensions'), fits.Card('DATE ', '2001-02-14T20:07:57',' date this file was written (yyyy-mm-dd)'), fits.Card('FILENAME', 'hr_box_asn.fits' ,' name of file'), fits.Card('FILETYPE', 'ASN_TABLE' ,' type of data found in data file'), fits.Card('TELESCOP', 'HST' ,' telescope used to acquire data'), fits.Card('INSTRUME', 'ACS ' ,' identifier for instrument used to acquire data'), fits.Card('EQUINOX ', 2000.0 ,' equinox of celestial coord. system'), fits.Card('ROOTNAME', 'hr_box ' ,' rootname of the observation set'), fits.Card('PRIMESI ', 'ACS ' ,' instrument designated as prime'), fits.Card('TARGNAME', 'SIM-DITHER' ,'proposer\'s target name'), fits.Card('RA_TARG ', 0. ,' right ascension of the target (deg) (J2000)'), fits.Card('DEC_TARG', 0. ,' declination of the target (deg) (J2000)'), fits.Card('DETECTOR', 'HRC ' ,' detector in use: WFC, HRC, or SBC'), fits.Card('ASN_ID ', 'hr_box ' ,' unique identifier assigned to association'), fits.Card('ASN_TAB ', 'hr_box_asn.fits' ,' name of the association table')]) # Format time values for keywords IRAF-TLM, and DATE _ltime = time.localtime(time.time()) tlm_str = time.strftime('%H:%M:%S (%d/%m/%Y)',_ltime) date_str = time.strftime('%Y-%m-%dT%H:%M:%S',_ltime) origin_str = 'FITS Version '+ astropy.__version__ # Build PRIMARY HDU _hdu = fits.PrimaryHDU(header=_prihdr) fasn.append(_hdu) newhdr = fasn['PRIMARY'].header mem0name = self['order'][0] refimg = self['members'][mem0name]['refimage'] shframe = self['members'][mem0name]['shift_frame'] fullname = fu.buildRootname(mem0name,ext=['_flt.fits', '_c0h.fits', '_c0f.fits']) try: # Open img1 to obtain keyword values for updating template fimg1 = fits.open(fullname) except: print('File %s does not exist' % fullname) kws = ['INSTRUME', 'PRIMESI', 'TARGNAME', 'DETECTOR', 'RA_TARG', 'DEC_TARG'] mem0hdr = fimg1['PRIMARY'].header default = 'UNKNOWN' for kw in kws: try: newhdr[kw] = mem0hdr[kw] except: newhdr[kw] = default fimg1.close() if not output: output = self['output'] outfilename = fu.buildNewRootname(output, extn='_asn.fits') newhdr['IRAF-TLM']=tlm_str newhdr['DATE'] = date_str newhdr['ORIGIN'] = origin_str newhdr['ROOTNAME'] = output newhdr['FILENAME'] = outfilename newhdr['ASN_ID'] = output newhdr['ASN_TAB'] = outfilename newhdr['SHFRAME'] = (shframe, "Frame which shifts are measured") newhdr['REFIMAGE'] = (refimg, "Image shifts were measured from") class ASNMember(dict): """ A dictionary like object representing a member of an association table. It looks like this:: 'j8bt06nzq': {'abshift': False, 'dshift': True, 'refimage': 'j8bt06010_shifts_asn.fits[wcs]', 'rot': 359.99829, 'row': 1, 'scale': 1.000165, 'shift_frame': 'input', 'shift_units': 'pixels', 'xoff': 0.0, 'xshift': 0.4091132, 'yoff': 0.0, 'yshift': -0.56702018} If `abshift` is True, shifts, rotation and scale refer to absolute shifts. If `dshift` is True, they are delta shifts. """ def __init__(self, xoff=0.0, yoff=0.0, rot=0.0, xshift=0.0, yshift=0.0, scale=1.0, dshift=False, abshift=False, refimage="", shift_frame="", shift_units='pixels', row=0): dict.__init__(self, xoff=xoff, yoff=yoff, xshift=xshift, yshift=yshift, rot=rot, scale=scale, dshift=dshift, abshift=abshift, refimage=refimage, shift_frame=shift_frame, shift_units=shift_units, row=row) class ShiftFile(dict): """ A shift file has the following format (name, Xsh, Ysh, Rot, Scale):: # frame: output # refimage: tweak_wcs.fits[wcs] # form: delta # units: pixels j8bt06nyq_flt.fits 0.0 0.0 0.0 1.0 j8bt06nzq_flt.fits 0.4091132 -0.5670202 359.9983 1.000165 This object creates a `dict` like object representing a shift file used by Pydrizzle and Mirashift. """ def __init__(self,filename="", form='delta', frame=None, units='pixels', order=None, refimage=None, **kw): """ :Purpose: Create a dict like ShiftFile object from a shift file on disk or from variables in memory. If a file name is provided all other parameters are ignored. Examples --------- These examples demonstrate a couple of the most common usages. Read a shift file on disk using:: >>> sdict = ShiftFile('shifts.txt') Pass values for the fields of the shift file and a dictionary with all files:: >>> d={'j8bt06nyq_flt.fits': [0.0, 0.0, 0.0, 1.0], 'j8bt06nzq_flt.fits': [0.4091132, -0.5670202, 359.9983, 1.000165]} >>> sdict = ShiftFile(form='absolute', frame='output', units='pixels', order=['j8bt06nyq_flt.fits', 'j8bt06nzq_flt.fits'], refimage='tweak_wcs.fits[wcs]', **d) The return value can then be used to provide the shift information to code in memory. Parameters ---------- filename : str Name of shift file on disk, see above the expected format form : str Form of shifts (absolute|delta) frame : str Frame in which the shifts should be applied (input|output) units : str Units in which the shifts are measured. order : list Keeps track of the order of the files. refimage : str name of reference image **d : dict keys: file names values: a list: [Xsh, Ysh, Rot, Scale] The keys must match the files in the order parameter. Raises ------ ValueError If reference file can't be found """ ## History: This is refactored code which was initially in fileutil.py and ## pydrizzle: buildasn.py and updateasn.py dict.__init__(self, form=form, frame=frame, units=units,order=order, refimage=refimage) if filename == "": self.update(kw) else: self.readShiftFile(filename) if not self.verifyShiftFile(): msg = "\nReference image not found.\n " msg += "The keyword in the shift file has changed from 'reference' to 'refimage'.\n" msg += "Make sure this keyword is specified as 'refimage' in %s." %filename raise ValueError(msg) def readShiftFile(self, filename): """ Reads a shift file from disk and populates a dictionary. """ order = [] fshift = open(filename,'r') flines = fshift.readlines() fshift.close() common = [f.strip('#').strip() for f in flines if f.startswith('#')] c=[line.split(': ') for line in common] # Remove any line comments in the shift file - lines starting with '#' # but not part of the common block. for l in c: if l[0] not in ['frame', 'refimage', 'form', 'units']: c.remove(l) for line in c: line[1]=line[1].strip() self.update(c) files = [f.strip().split(' ',1) for f in flines if not (f.startswith('#') or f.strip() == '')] for f in files: order.append(f[0]) self['order'] = order for f in files: # Check to see if filename provided is a full filename that corresponds # to a file on the path. If not, try to convert given rootname into # a valid filename based on available files. This may or may not # define the correct filename, which is why it prints out what it is # doing, so that the user can verify and edit the shiftfile if needed. #NOTE: # Supporting the specification of only rootnames in the shiftfile with this # filename expansion is NOT to be documented, but provided solely as # an undocumented, dangerous and not fully supported helper function for # some backwards compatibility. if not os.path.exists(f[0]): f[0] = fu.buildRootname(f[0]) print('Defining filename in shiftfile as: ', f[0]) f[1] = f[1].split() try: f[1] = [float(s) for s in f[1]] except: msg = 'Cannot read in ', s, ' from shiftfile ', filename, ' as a float number' raise ValueError(msg) msg = "At least 2 and at most 4 shift values should be provided in a shiftfile" if len(f[1]) < 2: raise ValueError(msg) elif len(f[1]) == 3: f[1].append(1.0) elif len(f[1]) == 2: f[1].extend([0.0, 1.0]) elif len(f[1]) > 4: raise ValueError(msg) fdict = dict(files) self.update(fdict) def verifyShiftFile(self): """ Verifies that reference file exists. """ if self['refimage'] and fu.findFile(self['refimage']): return True else: return False def writeShiftFile(self, filename="shifts.txt"): """ Writes a shift file object to a file on disk using the convention for shift file format. """ lines = ['# frame: ', self['frame'], '\n', '# refimage: ', self['refimage'], '\n', '# form: ', self['form'], '\n', '# units: ', self['units'], '\n'] for o in self['order']: ss = " " for shift in self[o]: ss += str(shift) + " " line = str(o) + ss + "\n" lines.append(line) fshifts= open(filename, 'w') fshifts.writelines(lines) fshifts.close() stsci.tools-3.4.12/lib/stsci/tools/basicpar.py0000644001120100020070000016553013006721301022761 0ustar jhunkSTSCI\science00000000000000"""basicpar.py -- General base class for parameter objects. Broken out from PyRAF's IrafPar class. $Id$ """ from __future__ import absolute_import, division, print_function # confidence high import re, sys from . import irafutils, minmatch from .irafglobals import INDEF, Verbose, yes, no if sys.version_info[0] > 2: int_types = (int, ) else: int_types = (int, long) # container class used for __deepcopy__ method class _EmptyClass: pass # ----------------------------------------------------- # Warning (non-fatal) error. Raise an exception if in # strict mode, or print a message if Verbose is on. # ----------------------------------------------------- # Verbose (set irafglobals.py) determines # whether warning messages are printed when errors are found. The # strict parameter to various methods and functions can be set to # raise an exception on errors; otherwise we do our best to work # around errors, only raising an exception for really serious, # unrecoverable problems. def warning(msg, strict=0, exception=SyntaxError, level=0): if strict: raise exception(msg) elif Verbose>level: sys.stdout.flush() sys.stderr.write('Warning: %s' % msg) if msg[-1:] != '\n': sys.stderr.write('\n') # ----------------------------------------------------- # basic parameter factory # ----------------------------------------------------- _string_types = [ 's', 'f', 'struct', 'z' ] _real_types = [ 'r', 'd' ] def parFactory(fields, strict=0): """parameter factory function fields is a list of the comma-separated fields (as in the .par file). Each entry is a string or None (indicating that field was omitted.) Set the strict parameter to a non-zero value to do stricter parsing (to find errors in the input)""" if len(fields) < 3 or None in fields[0:3]: raise SyntaxError("At least 3 fields must be given") type = fields[1] if type in _string_types: return IrafParS(fields,strict) elif type == 'R': return StrictParR(fields,1) elif type in _real_types: return IrafParR(fields,strict) elif type == "I": return StrictParI(fields,1) elif type == "i": return IrafParI(fields,strict) elif type == "b": return IrafParB(fields,strict) elif type == "ar": return IrafParAR(fields,strict) elif type == "ai": return IrafParAI(fields,strict) elif type == "as": return IrafParAS(fields,strict) elif type == "ab": return IrafParAB(fields,strict) elif type[:1] == "a": raise SyntaxError("Cannot handle arrays of type %s" % type) else: raise SyntaxError("Cannot handle parameter type %s" % type) # -------------------------------------------------------- # Publish the (simple) algorithm for combining scope+name # -------------------------------------------------------- def makeFullName(parScope, parName): """ Create the fully-qualified name (inclues scope if used) """ # Skip scope (and leading dot) if no scope, even in cases where scope # IS used for other pars in the same task. if parScope: return parScope+'.'+parName else: return parName # ----------------------------------------------------- # Set up minmatch dictionaries for parameter fields # ----------------------------------------------------- flist = ("p_name", "p_xtype", "p_type", "p_mode", "p_prompt", "p_scope", "p_value", "p_default", "p_filename", "p_maximum", "p_minimum") _getFieldDict = minmatch.MinMatchDict() for field in flist: _getFieldDict.add(field, field) flist = ("p_prompt", "p_value", "p_filename", "p_maximum", "p_minimum", "p_mode", "p_scope") _setFieldDict = minmatch.MinMatchDict() for field in flist: _setFieldDict.add(field, field) del flist, field # utility function to check whether string is a parameter field def isParField(s): """Returns true if string s appears to be a parameter field""" try: return (s[:2] == "p_") and s in _getFieldDict except minmatch.AmbiguousKeyError: # If ambiguous match, assume it is a parameter field. # An exception will doubtless be raised later, but # there's really no good choice here. return 1 # basic IrafPar attributes # IrafPar's are protected in setattr against adding arbitrary attributes, # and this dictionary is used as a helper in instance initialization _IrafPar_attr_dict = { "name" : None, "type" : None, "mode" : None, "value" : None, "min" : None, "max" : None, "choice" : None, "choiceDict" : None, "prompt" : None, "flags" : 0, "scope" : None, } # flag bits tell whether value has been changed and # whether it was set on the command line. _changedFlag = 1 _cmdlineFlag = 2 # ----------------------------------------------------- # IRAF parameter base class # ----------------------------------------------------- class IrafPar: """Non-array IRAF parameter base class""" def __init__(self,fields,strict=0): orig_len = len(fields) if orig_len < 3 or None in fields[0:3]: raise SyntaxError("At least 3 fields must be given") # # all the attributes that are going to get defined # self.__dict__.update(_IrafPar_attr_dict) self.name = fields[0] self.type = fields[1] self.mode = fields[2] self.scope = None # simple default; may be unused # # put fields into appropriate attributes # while len(fields) < 7: fields.append(None) # self.value = self._coerceValue(fields[3],strict) if fields[4] is not None and '|' in fields[4]: self._setChoice(fields[4].strip(),strict) if fields[5] is not None: if orig_len < 7: warning("Max value illegal when choice list given" + " for parameter " + self.name + " (probably missing comma)", strict) # try to recover by assuming max string is prompt fields[6] = fields[5] fields[5] = None else: warning("Max value illegal when choice list given" + " for parameter " + self.name, strict) else: #XXX should catch ValueError exceptions here and set to null #XXX could also check for missing comma (null prompt, prompt #XXX in max field) if fields[4] is not None: self.min = self._coerceValue(fields[4],strict) if fields[5] is not None: self.max = self._coerceValue(fields[5],strict) if self.min not in [None, INDEF] and \ self.max not in [None, INDEF] and self.max < self.min: warning("Max " + str(self.max) + " is less than minimum " + \ str(self.min) + " for parameter " + self.name, strict) self.min, self.max = self.max, self.min if fields[6] is not None: self.prompt = irafutils.removeEscapes( irafutils.stripQuotes(fields[6])) else: self.prompt = '' # # check attributes to make sure they are appropriate for # this parameter type (e.g. some do not allow choice list # or min/max) # self._checkAttribs(strict) # # check parameter value to see if it is correct # try: self.checkValue(self.value,strict) except ValueError as e: warning("Illegal initial value for parameter\n" + str(e), strict, exception=ValueError) # Set illegal values to None, just like IRAF self.value = None #-------------------------------------------- # public accessor methods #-------------------------------------------- def isLegal(self): """Returns true if current parameter value is legal""" try: # apply a stricter definition of legal here # fixable values have already been fixed # don't accept None values self.checkValue(self.value) return self.value is not None except ValueError: return 0 def setScope(self,value=''): """Set scope value. Written this way to not change the standard set of fields in the comma-separated list. """ # set through dictionary to avoid extra calls to __setattr__ self.__dict__['scope'] = value def setCmdline(self,value=1): """Set cmdline flag""" # set through dictionary to avoid extra calls to __setattr__ if value: self.__dict__['flags'] = self.flags | _cmdlineFlag else: self.__dict__['flags'] = self.flags & ~_cmdlineFlag def isCmdline(self): """Return cmdline flag""" return (self.flags & _cmdlineFlag) == _cmdlineFlag def setChanged(self,value=1): """Set changed flag""" # set through dictionary to avoid another call to __setattr__ if value: self.__dict__['flags'] = self.flags | _changedFlag else: self.__dict__['flags'] = self.flags & ~_changedFlag def isChanged(self): """Return changed flag""" return (self.flags & _changedFlag) == _changedFlag def setFlags(self,value): """Set all flags""" self.__dict__['flags'] = value def isLearned(self, mode=None): """Return true if this parameter is learned Hidden parameters are not learned; automatic parameters inherit behavior from package/cl; other parameters are learned. If mode is set, it determines how automatic parameters behave. If not set, cl.mode parameter determines behavior. """ if "l" in self.mode: return 1 if "h" in self.mode: return 0 if "a" in self.mode: if mode is None: mode = 'ql' # that is, iraf.cl.mode if "h" in mode and "l" not in mode: return 0 return 1 #-------------------------------------------- # other public methods #-------------------------------------------- def getPrompt(self): """Alias for getWithPrompt() for backward compatibility""" return self.getWithPrompt() def getWithPrompt(self): """Interactively prompt for parameter value""" if self.prompt: pstring = self.prompt.split("\n")[0].strip() else: pstring = self.name if self.choice: schoice = list(map(self.toString, self.choice)) pstring = pstring + " (" + "|".join(schoice) + ")" elif self.min not in [None, INDEF] or \ self.max not in [None, INDEF]: pstring = pstring + " (" if self.min not in [None, INDEF]: pstring = pstring + self.toString(self.min) pstring = pstring + ":" if self.max not in [None, INDEF]: pstring = pstring + self.toString(self.max) pstring = pstring + ")" # add current value as default if self.value is not None: pstring = pstring + " (" + self.toString(self.value,quoted=1) + ")" pstring = pstring + ": " # don't redirect stdin/out unless redirected filehandles are also ttys # or unless originals are NOT ttys stdout = sys.__stdout__ try: if sys.stdout.isatty() or not stdout.isatty(): stdout = sys.stdout except AttributeError: pass stdin = sys.__stdin__ try: if sys.stdin.isatty() or not stdin.isatty(): stdin = sys.stdin except AttributeError: pass # print prompt, suppressing both newline and following space stdout.write(pstring) stdout.flush() ovalue = irafutils.tkreadline(stdin) value = ovalue.strip() # loop until we get an acceptable value while (1): try: # null input usually means use current value as default # check it anyway since it might not be acceptable if value == "": value = self._nullPrompt() self.set(value) # None (no value) is not acceptable value after prompt if self.value is not None: return # if not EOF, keep looping if ovalue == "": stdout.flush() raise EOFError("EOF on parameter prompt") print("Error: specify a value for the parameter") except ValueError as e: print(str(e)) stdout.write(pstring) stdout.flush() ovalue = irafutils.tkreadline(stdin) value = ovalue.strip() def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"): """Return value of this parameter as a string (or in native format if native is non-zero.)""" if field and field != "p_value": # note p_value comes back to this routine, so shortcut that case return self._getField(field,native=native,prompt=prompt) # may prompt for value if prompt flag is set if prompt: self._optionalPrompt(mode) if index is not None: raise SyntaxError("Parameter "+self.name+" is not an array") if native: rv = self.value else: rv = self.toString(self.value) return rv def set(self, value, field=None, index=None, check=1): """Set value of this parameter from a string or other value. Field is optional parameter field (p_prompt, p_minimum, etc.) Index is optional array index (zero-based). Set check=0 to assign the value without checking to see if it is within the min-max range or in the choice list.""" if index is not None: raise SyntaxError("Parameter "+self.name+" is not an array") if field: self._setField(value,field,check=check) else: if check: self.value = self.checkValue(value) else: self.value = self._coerceValue(value) self.setChanged() def checkValue(self,value,strict=0): """Check and convert a parameter value. Raises an exception if the value is not permitted for this parameter. Otherwise returns the value (converted to the right type.) """ v = self._coerceValue(value,strict) return self.checkOneValue(v,strict) def checkOneValue(self,v,strict=0): """Checks a single value to see if it is in range or choice list Allows indirection strings starting with ")". Assumes v has already been converted to right value by _coerceOneValue. Returns value if OK, or raises ValueError if not OK. """ if v in [None, INDEF] or (isinstance(v,str) and v[:1] == ")"): return v elif v == "": # most parameters treat null string as omitted value return None elif self.choice is not None and v not in self.choiceDict: schoice = list(map(self.toString, self.choice)) schoice = "|".join(schoice) raise ValueError("Parameter %s: " "value %s is not in choice list (%s)" % (self.name, str(v), schoice)) elif (self.min not in [None, INDEF] and vself.max): raise ValueError("Parameter %s: " "value `%s' is greater than maximum `%s'" % (self.name, str(v), str(self.max))) return v def dpar(self, cl=1): """Return dpar-style executable assignment for parameter Default is to write CL version of code; if cl parameter is false, writes Python executable code instead. """ sval = self.toString(self.value, quoted=1) if not cl: if sval == "": sval = "None" s = "%s = %s" % (self.name, sval) return s def fullName(self): """ Return the fully-qualified name (inclues scope if used) """ return makeFullName(self.scope, self.name) # scope can be None or '' def pretty(self,verbose=0): """Return pretty list description of parameter""" # split prompt lines and add blanks in later lines to align them plines = self.prompt.split('\n') for i in range(len(plines)-1): plines[i+1] = 32*' ' + plines[i+1] plines = '\n'.join(plines) namelen = min(len(self.name), 12) pvalue = self.get(prompt=0,lpar=1) alwaysquoted = ['s', 'f', '*gcur', '*imcur', '*ukey', 'pset'] if self.type in alwaysquoted and self.value is not None: pvalue = '"' + pvalue + '"' if self.mode == "h": s = "%13s = %-15s %s" % ("("+self.name[:namelen], pvalue+")", plines) else: s = "%13s = %-15s %s" % (self.name[:namelen], pvalue, plines) if not verbose: return s if self.choice is not None: s = s + "\n" + 32*" " + "|" nline = 33 for i in range(len(self.choice)): sch = str(self.choice[i]) + "|" s = s + sch nline = nline + len(sch) + 1 if nline > 80: s = s + "\n" + 32*" " + "|" nline = 33 elif self.min not in [None, INDEF] or self.max not in [None, INDEF]: s = s + "\n" + 32*" " if self.min not in [None, INDEF]: s = s + str(self.min) + " <= " s = s + self.name if self.max not in [None, INDEF]: s = s + " <= " + str(self.max) return s def save(self, dolist=0): """Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file. """ quoted = not dolist fields = 7*[""] fields[0] = self.name fields[1] = self.type fields[2] = self.mode fields[3] = self.toString(self.value,quoted=quoted) if self.choice is not None: schoice = list(map(self.toString, self.choice)) schoice.insert(0,'') schoice.append('') fields[4] = repr('|'.join(schoice)) elif self.min not in [None,INDEF]: fields[4] = self.toString(self.min,quoted=quoted) if self.max not in [None,INDEF]: fields[5] = self.toString(self.max,quoted=quoted) if self.prompt: if quoted: sprompt = repr(self.prompt) else: sprompt = self.prompt # prompt can have embedded newlines (which are printed) sprompt = sprompt.replace(r'\012', '\n') sprompt = sprompt.replace(r'\n', '\n') fields[6] = sprompt # delete trailing null parameters for i in [6,5,4]: if fields[i] != "": break del fields[i] if dolist: return fields else: return ','.join(fields) #-------------------------------------------- # special methods to give desired object syntax #-------------------------------------------- # allow parameter object to be used in arithmetic expression def __coerce__(self, other): return coerce(self.get(native=1), other) # fields are accessible as attributes def __getattr__(self,field): if field[:1] == '_': raise AttributeError(field) try: return self._getField(field, native=1) except SyntaxError as e: if field in _IrafPar_attr_dict: # handle odd-ball case of new code accessing par's new # attr (e.g. scope), with old-code-cached version of par return _IrafPar_attr_dict[field] # return unused default else: raise AttributeError(str(e)) def __setattr__(self,attr,value): # don't allow any new parameters to be added if attr in self.__dict__: self.__dict__[attr] = value elif isParField(attr): #XXX should check=0 be used here? self._setField(value, attr) else: raise AttributeError("No attribute %s for parameter %s" % (attr, self.name)) def __deepcopy__(self, memo): """Deep copy of this parameter object""" new = _EmptyClass() # shallow copy of dictionary suffices for most attributes new.__dict__ = self.__dict__.copy() # value, choice may be lists of atomic items if isinstance(self.value, list): new.value = list(self.value) if isinstance(self.choice, list): new.choice = list(self.choice) # choiceDict is OK with shallow copy because it will # always be reset if choices change new.__class__ = self.__class__ return new def __getstate__(self): """Return state info for pickle""" # choiceDict gets reconstructed if self.choice is None: return self.__dict__ else: d = self.__dict__.copy() d['choiceDict'] = None return d def __setstate__(self, state): """Restore state info from pickle""" self.__dict__ = state if self.choice is not None: self._setChoiceDict() def __str__(self): """Return readable description of parameter""" s = "<" + self.__class__.__name__ + " " + self.name + " " + self.type s = s + " " + self.mode + " " + repr(self.value) if self.choice is not None: schoice = list(map(self.toString, self.choice)) s = s + " |" + "|".join(schoice) + "|" else: s = s + " " + repr(self.min) + " " + repr(self.max) s = s + ' "' + self.prompt + '">' return s #-------------------------------------------- # private methods -- may be used by subclasses, but should # not be needed outside this module #-------------------------------------------- def _checkAttribs(self,strict=0): # by default no restrictions on attributes pass def _setChoice(self,s,strict=0): """Set choice parameter from string s""" clist = _getChoice(s,strict) self.choice = list(map(self._coerceValue, clist)) self._setChoiceDict() def _setChoiceDict(self): """Create dictionary for choice list""" # value is name of choice parameter (same as key) self.choiceDict = {} for c in self.choice: self.choiceDict[c] = c def _nullPrompt(self): """Returns value to use when answer to prompt is null string""" # most parameters just keep current default (even if None) return self.value def _optionalPrompt(self, mode): """Interactively prompt for parameter if necessary Prompt for value if (1) mode is hidden but value is undefined or bad, or (2) mode is query and value was not set on command line Never prompt for "u" mode parameters, which are local variables. """ if (self.mode == "h") or (self.mode == "a" and mode == "h"): # hidden parameter if not self.isLegal(): self.getWithPrompt() elif self.mode == "u": # "u" is a special mode used for local variables in CL scripts # They should never prompt under any circumstances if not self.isLegal(): raise ValueError( "Attempt to access undefined local variable `%s'" % self.name) else: # query parameter if self.isCmdline()==0: self.getWithPrompt() def _getPFilename(self,native,prompt): """Get p_filename field for this parameter Same as get for non-list params """ return self.get(native=native,prompt=prompt) def _getPType(self): """Get underlying datatype for this parameter Just self.type for normal params """ return self.type def _getField(self, field, native=0, prompt=1): """Get a parameter field value""" try: # expand field name using minimum match field = _getFieldDict[field] except KeyError as e: # re-raise the exception with a bit more info raise SyntaxError("Cannot get field " + field + " for parameter " + self.name + "\n" + str(e)) if field == "p_value": # return value of parameter # Note that IRAF returns the filename for list parameters # when p_value is used. I consider this a bug, and it does # not appear to be used by any cl scripts or SPP programs # in either IRAF or STSDAS. It is also in conflict with # the IRAF help documentation. I am making p_value exactly # the same as just a simple CL parameter reference. return self.get(native=native,prompt=prompt) elif field == "p_name": return self.name elif field == "p_xtype": return self.type elif field == "p_type": return self._getPType() elif field == "p_mode": return self.mode elif field == "p_prompt": return self.prompt elif field == "p_scope": return self.scope elif field == "p_default" or field == "p_filename": # these all appear to be equivalent -- they just return the # current PFilename of the parameter (which is the same as the value # for non-list parameters, and is the filename for list parameters) return self._getPFilename(native,prompt) elif field == "p_maximum": if native: return self.max else: return self.toString(self.max) elif field == "p_minimum": if self.choice is not None: if native: return self.choice else: schoice = list(map(self.toString, self.choice)) return "|" + "|".join(schoice) + "|" else: if native: return self.min else: return self.toString(self.min) else: # XXX unimplemented fields: # p_length: maximum string length in bytes -- what to do with it? raise RuntimeError("Program bug in IrafPar._getField()\n" + "Requested field " + field + " for parameter " + self.name) def _setField(self, value, field, check=1): """Set a parameter field value""" try: # expand field name using minimum match field = _setFieldDict[field] except KeyError as e: raise SyntaxError("Cannot set field " + field + " for parameter " + self.name + "\n" + str(e)) if field == "p_prompt": self.prompt = irafutils.removeEscapes(irafutils.stripQuotes(value)) elif field == "p_value": self.set(value,check=check) elif field == "p_filename": # this is only relevant for list parameters (*imcur, *gcur, etc.) self.set(value,check=check) elif field == "p_scope": self.scope = value elif field == "p_maximum": self.max = self._coerceOneValue(value) elif field == "p_minimum": if isinstance(value,str) and '|' in value: self._setChoice(irafutils.stripQuotes(value)) else: self.min = self._coerceOneValue(value) elif field == "p_mode": # not doing any type or value checking here -- setting mode is # rare, so assume that it is being done correctly self.mode = irafutils.stripQuotes(value) else: raise RuntimeError("Program bug in IrafPar._setField()" + "Requested field " + field + " for parameter " + self.name) def _coerceValue(self,value,strict=0): """Coerce parameter to appropriate type Should accept None or null string. """ return self._coerceOneValue(value,strict) def _coerceOneValue(self,value,strict=0): """Coerce a scalar parameter to the appropriate type Default implementation simply prevents direct use of base class. Should accept None or null string. """ raise NotImplementedError("class IrafPar cannot be used directly") # ----------------------------------------------------- # IRAF array parameter base class # ----------------------------------------------------- class IrafArrayPar(IrafPar): """IRAF array parameter class""" def __init__(self,fields,strict=0): orig_len = len(fields) if orig_len < 3: raise SyntaxError("At least 3 fields must be given") # # all the attributes that are going to get defined # self.__dict__.update(_IrafPar_attr_dict) self.name = fields[0] self.type = fields[1] self.mode = fields[2] self.__dict__['shape'] = None # # for array parameters, dimensions follow mode field # and values come from fields after prompt # if len(fields)<4 or fields[3] is None: raise ValueError("Missing dimension field for array parameter") ndim = int(fields[3]) if len(fields) < 4+2*ndim: raise ValueError("Missing array shape fields for array parameter") shape = [] array_size = 1 for i in range(ndim): shape.append(int(fields[4+2*i])) array_size = array_size*shape[-1] self.shape = tuple(shape) nvstart = 7+2*ndim fields.extend([""]*(nvstart-len(fields))) fields.extend([None]*(nvstart+array_size-len(fields))) if len(fields) > nvstart+array_size: raise SyntaxError("Too many values for array" + " for parameter " + self.name) # self.value = [None]*array_size self.value = self._coerceValue(fields[nvstart:],strict) if fields[nvstart-3] is not None and '|' in fields[nvstart-3]: self._setChoice(fields[nvstart-3].strip(),strict) if fields[nvstart-2] is not None: if orig_len < nvstart: warning("Max value illegal when choice list given" + " for parameter " + self.name + " (probably missing comma)", strict) # try to recover by assuming max string is prompt #XXX risky -- all init values might be off by one fields[nvstart-1] = fields[nvstart-2] fields[nvstart-2] = None else: warning("Max value illegal when choice list given" + " for parameter " + self.name, strict) else: self.min = self._coerceOneValue(fields[nvstart-3],strict) self.max = self._coerceOneValue(fields[nvstart-2],strict) if fields[nvstart-1] is not None: self.prompt = irafutils.removeEscapes( irafutils.stripQuotes(fields[nvstart-1])) else: self.prompt = '' if self.min not in [None, INDEF] and \ self.max not in [None, INDEF] and self.max < self.min: warning("Maximum " + str(self.max) + " is less than minimum " + \ str(self.min) + " for parameter " + self.name, strict) self.min, self.max = self.max, self.min # # check attributes to make sure they are appropriate for # this parameter type (e.g. some do not allow choice list # or min/max) # self._checkAttribs(strict) # # check parameter value to see if it is correct # try: self.checkValue(self.value,strict) except ValueError as e: warning("Illegal initial value for parameter\n" + str(e), strict, exception=ValueError) # Set illegal values to None, just like IRAF self.value = None #-------------------------------------------- # public methods #-------------------------------------------- def save(self, dolist=0): """Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file. """ quoted = not dolist array_size = 1 for d in self.shape: array_size = d*array_size ndim = len(self.shape) fields = (7+2*ndim+len(self.value))*[""] fields[0] = self.name fields[1] = self.type fields[2] = self.mode fields[3] = str(ndim) next = 4 for d in self.shape: fields[next] = str(d); next += 1 fields[next] = '1'; next += 1 nvstart = 7+2*ndim if self.choice is not None: schoice = list(map(self.toString, self.choice)) schoice.insert(0,'') schoice.append('') fields[nvstart-3] = repr('|'.join(schoice)) elif self.min not in [None,INDEF]: fields[nvstart-3] = self.toString(self.min,quoted=quoted) # insert an escaped line break before min field if quoted: fields[nvstart-3] = '\\\n' + fields[nvstart-3] if self.max not in [None,INDEF]: fields[nvstart-2] = self.toString(self.max,quoted=quoted) if self.prompt: if quoted: sprompt = repr(self.prompt) else: sprompt = self.prompt # prompt can have embedded newlines (which are printed) sprompt = sprompt.replace(r'\012', '\n') sprompt = sprompt.replace(r'\n', '\n') fields[nvstart-1] = sprompt for i in range(len(self.value)): fields[nvstart+i] = self.toString(self.value[i],quoted=quoted) # insert an escaped line break before value fields if dolist: return fields else: fields[nvstart] = '\\\n' + fields[nvstart] return ','.join(fields) def dpar(self, cl=1): """Return dpar-style executable assignment for parameter Default is to write CL version of code; if cl parameter is false, writes Python executable code instead. Note that dpar doesn't even work for arrays in the CL, so we just use Python syntax here. """ sval = list(map(self.toString, self.value, len(self.value)*[1])) for i in range(len(sval)): if sval[i] == "": sval[i] = "None" s = "%s = [%s]" % (self.name, ', '.join(sval)) return s def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"): """Return value of this parameter as a string (or in native format if native is non-zero.)""" if field: return self._getField(field,native=native,prompt=prompt) # may prompt for value if prompt flag is set #XXX should change _optionalPrompt so we prompt for each element of #XXX the array separately? I think array parameters are #XXX not useful as non-hidden params. if prompt: self._optionalPrompt(mode) if index is not None: sumindex = self._sumindex(index) try: if native: return self.value[sumindex] else: return self.toString(self.value[sumindex]) except IndexError: # should never happen raise SyntaxError("Illegal index [" + repr(sumindex) + "] for array parameter " + self.name) elif native: # return object itself for an array because it is # indexable, can have values assigned, etc. return self else: # return blank-separated string of values for array return str(self) def set(self, value, field=None, index=None, check=1): """Set value of this parameter from a string or other value. Field is optional parameter field (p_prompt, p_minimum, etc.) Index is optional array index (zero-based). Set check=0 to assign the value without checking to see if it is within the min-max range or in the choice list.""" if index is not None: sumindex = self._sumindex(index) try: value = self._coerceOneValue(value) if check: self.value[sumindex] = self.checkOneValue(value) else: self.value[sumindex] = value return except IndexError: # should never happen raise SyntaxError("Illegal index [" + repr(sumindex) + "] for array parameter " + self.name) if field: self._setField(value,field,check=check) else: if check: self.value = self.checkValue(value) else: self.value = self._coerceValue(value) self.setChanged() def checkValue(self,value,strict=0): """Check and convert a parameter value. Raises an exception if the value is not permitted for this parameter. Otherwise returns the value (converted to the right type.) """ v = self._coerceValue(value,strict) for i in range(len(v)): self.checkOneValue(v[i],strict=strict) return v #-------------------------------------------- # special methods #-------------------------------------------- # array parameters can be subscripted # note subscripts start at zero, unlike CL subscripts # that start at one def __getitem__(self, index): return self.get(index=index,native=1) def __setitem__(self, index, value): self.set(value, index=index) def __str__(self): """Return readable description of parameter""" # This differs from non-arrays in that it returns a # print string with just the values. That's because # the object itself is returned as the native value. sv = list(map(str, self.value)) for i in range(len(sv)): if self.value[i] is None: sv[i] = "INDEF" return ' '.join(sv) def __len__(self): return len(self.value) #-------------------------------------------- # private methods #-------------------------------------------- def _sumindex(self, index=None): """Convert tuple index to 1-D index into value""" try: ndim = len(index) except TypeError: # turn index into a 1-tuple index = (index,) ndim = 1 if len(self.shape) != ndim: raise ValueError("Index to %d-dimensional array %s has too %s dimensions" % (len(self.shape), self.name, ["many","few"][len(self.shape) > ndim])) sumindex = 0 for i in range(ndim-1,-1,-1): index1 = index[i] if index1 < 0 or index1 >= self.shape[i]: raise ValueError("Dimension %d index for array %s is out of bounds (value=%d)" % (i+1, self.name, index1)) sumindex = index1 + sumindex*self.shape[i] return sumindex def _getPType(self): """Get underlying datatype for this parameter (strip off 'a' array params)""" return self.type[1:] def _coerceValue(self,value,strict=0): """Coerce parameter to appropriate type Should accept None or null string. Must be an array. """ try: if isinstance(value,str): # allow single blank-separated string as input value = value.split() if len(value) != len(self.value): raise IndexError v = len(self.value)*[0] for i in range(len(v)): v[i] = self._coerceOneValue(value[i],strict) return v except (IndexError, TypeError): raise ValueError("Value must be a " + repr(len(self.value)) + "-element array for " + self.name) def isLegal(self): """Dont call checkValue for arrays""" try: return self.value is not None except ValueError: return 0 # ----------------------------------------------------- # IRAF string parameter mixin class # ----------------------------------------------------- class _StringMixin: """IRAF string parameter mixin class""" #-------------------------------------------- # public methods #-------------------------------------------- def toString(self, value, quoted=0): """Convert a single (non-array) value of the appropriate type for this parameter to a string""" if value is None: return "" elif quoted: return repr(value) else: return value # slightly modified checkOneValue allows minimum match for # choice strings and permits null string as value def checkOneValue(self,v,strict=0): if v is None or v[:1] == ")": return v elif self.choice is not None: try: v = self.choiceDict[v] except minmatch.AmbiguousKeyError: clist = self.choiceDict.getall(v) raise ValueError("Parameter %s: " "ambiguous value `%s', could be %s" % (self.name, str(v), "|".join(clist))) except KeyError: raise ValueError("Parameter %s: " "value `%s' is not in choice list (%s)" % (self.name, str(v), "|".join(self.choice))) elif (self.min is not None and vself.max): raise ValueError("Parameter %s: " "value `%s' is greater than maximum `%s'" % (self.name, str(v), str(self.max))) return v #-------------------------------------------- # private methods #-------------------------------------------- def _checkAttribs(self, strict): """Check initial attributes to make sure they are legal""" if self.min: warning("Minimum value not allowed for string-type parameter " + self.name, strict) self.min = None if self.max: if not self.prompt: warning("Maximum value not allowed for string-type parameter " + self.name + " (probably missing comma)", strict) # try to recover by assuming max string is prompt self.prompt = self.max else: warning("Maximum value not allowed for string-type parameter " + self.name, strict) self.max = None # If not in strict mode, allow file (f) to act just like string (s). # Otherwise choice is also forbidden for file type if strict and self.type == "f" and self.choice: warning("Illegal choice value for type '" + self.type + "' for parameter " + self.name, strict) self.choice = None def _setChoiceDict(self): """Create min-match dictionary for choice list""" # value is full name of choice parameter self.choiceDict = minmatch.MinMatchDict() for c in self.choice: self.choiceDict.add(c, c) def _nullPrompt(self): """Returns value to use when answer to prompt is null string""" # for string, null string is a legal value # keep current default unless it is None if self.value is None: return "" else: return self.value def _coerceOneValue(self,value,strict=0): if value is None: return value elif isinstance(value,str): # strip double quotes and remove escapes before quotes return irafutils.removeEscapes(irafutils.stripQuotes(value)) else: return str(value) # ----------------------------------------------------- # IRAF string parameter class # ----------------------------------------------------- class IrafParS(_StringMixin, IrafPar): """IRAF string parameter class""" pass # ----------------------------------------------------- # IRAF string array parameter class # ----------------------------------------------------- class IrafParAS(_StringMixin,IrafArrayPar): """IRAF string array parameter class""" pass # ----------------------------------------------------- # IRAF boolean parameter mixin class # ----------------------------------------------------- class _BooleanMixin: """IRAF boolean parameter mixin class""" #-------------------------------------------- # public methods #-------------------------------------------- def toString(self, value, quoted=0): if value in [None, INDEF]: return "" elif isinstance(value,str): # presumably an indirection value ')task.name' if quoted: return repr(value) else: return value else: # must be internal yes, no value return str(value) #-------------------------------------------- # private methods #-------------------------------------------- def _checkAttribs(self, strict): """Check initial attributes to make sure they are legal""" if self.min: warning("Minimum value not allowed for boolean-type parameter " + self.name, strict) self.min = None if self.max: if not self.prompt: warning("Maximum value not allowed for boolean-type parameter " + self.name + " (probably missing comma)", strict) # try to recover by assuming max string is prompt self.prompt = self.max else: warning("Maximum value not allowed for boolean-type parameter " + self.name, strict) self.max = None if self.choice: warning("Choice values not allowed for boolean-type parameter " + self.name, strict) self.choice = None # accepts special yes, no objects, integer values 0,1 or # string 'yes','no' and variants # internal value is yes, no, None/INDEF, or indirection string def _coerceOneValue(self,value,strict=0): if value == INDEF: return INDEF elif value is None or value == "": return None elif value in (1, 1.0, yes, "yes", "YES", "y", "Y", True): return yes elif value in (0, 0.0, no, "no", "NO", "n", "N", False): return no elif isinstance(value,str): v2 = irafutils.stripQuotes(value.strip()) if v2 == "" or v2 == "INDEF" or \ ((not strict) and (v2.upper() == "INDEF")): return INDEF elif v2[0:1] == ")": # assume this is indirection -- just save it as a string return v2 raise ValueError("Parameter %s: illegal boolean value %s or type %s" % (self.name, repr(value), str(type(value)))) # ----------------------------------------------------- # IRAF boolean parameter class # ----------------------------------------------------- class IrafParB(_BooleanMixin,IrafPar): """IRAF boolean parameter class""" pass # ----------------------------------------------------- # IRAF boolean array parameter class # ----------------------------------------------------- class IrafParAB(_BooleanMixin,IrafArrayPar): """IRAF boolean array parameter class""" pass # ----------------------------------------------------- # IRAF integer parameter mixin class # ----------------------------------------------------- class _IntMixin: """IRAF integer parameter mixin class""" #-------------------------------------------- # public methods #-------------------------------------------- def toString(self, value, quoted=0): if value is None: return "" else: return str(value) #-------------------------------------------- # private methods #-------------------------------------------- # coerce value to integer def _coerceOneValue(self,value,strict=0): if value == INDEF: return INDEF elif value is None or isinstance(value,int): return value elif value in ("", "None", "NONE"): return None elif isinstance(value,float): # try converting to integer try: return int(value) except (ValueError, OverflowError): pass elif isinstance(value,str): s2 = irafutils.stripQuotes(value.strip()) if s2 == "INDEF" or \ ((not strict) and (s2.upper() == "INDEF")): return INDEF elif s2[0:1] == ")": # assume this is indirection -- just save it as a string return s2 elif s2[-1:] == "x": # hexadecimal return int(s2[:-1],16) elif "." in s2: # try interpreting as a float and converting to integer try: return int(float(s2)) except (ValueError, OverflowError): pass else: try: return int(s2) except ValueError: pass else: # maybe it has an int method try: return int(value) except ValueError: pass raise ValueError("Parameter %s: illegal integer value %s" % (self.name, repr(value))) # ----------------------------------------------------- # IRAF integer parameter class # ----------------------------------------------------- class IrafParI(_IntMixin,IrafPar): """IRAF integer parameter class""" pass # ----------------------------------------------------- # IRAF integer array parameter class # ----------------------------------------------------- class IrafParAI(_IntMixin,IrafArrayPar): """IRAF integer array parameter class""" pass # ----------------------------------------------------- # Strict integer parameter mixin class # ----------------------------------------------------- class _StrictIntMixin(_IntMixin): """Strict integer parameter mixin class""" #-------------------------------------------- # public methods #-------------------------------------------- def toString(self, value, quoted=0): return str(value) #-------------------------------------------- # private methods #-------------------------------------------- # coerce value to integer def _coerceOneValue(self,value,strict=0): if value is None or isinstance(value,int): return value elif isinstance(value,str): s2 = irafutils.stripQuotes(value.strip()) if s2[-1:] == "x": # hexadecimal return int(s2[:-1],16) elif s2 == '': raise ValueError('Parameter '+self.name+ \ ': illegal empty integer value') else: # see if it is a stringified int try: return int(s2) except ValueError: pass # otherwise it is not a strict integer raise ValueError("Parameter %s: illegal integer value %s" % (self.name, repr(value))) # ----------------------------------------------------- # Strict integer parameter class # ----------------------------------------------------- class StrictParI(_StrictIntMixin,IrafPar): """Strict integer parameter class""" pass # ----------------------------------------------------- # IRAF real parameter mixin class # ----------------------------------------------------- _re_d = re.compile(r'[Dd]') _re_colon = re.compile(r':') class _RealMixin: """IRAF real parameter mixin class""" #-------------------------------------------- # public methods #-------------------------------------------- def toString(self, value, quoted=0): if value is None: return "" else: return str(value) #-------------------------------------------- # private methods #-------------------------------------------- def _checkAttribs(self, strict): """Check initial attributes to make sure they are legal""" if self.choice: warning("Choice values not allowed for real-type parameter " + self.name, strict) self.choice = None # coerce value to real def _coerceOneValue(self,value,strict=0): if value == INDEF: return INDEF elif value is None or isinstance(value,float): return value elif value in ("", "None", "NONE"): return None elif isinstance(value, int_types): return float(value) elif isinstance(value,str): s2 = irafutils.stripQuotes(value.strip()) if s2 == "INDEF" or \ ((not strict) and (s2.upper() == "INDEF")): return INDEF elif s2[0:1] == ")": # assume this is indirection -- just save it as a string return s2 # allow +dd:mm:ss.s sexagesimal format for floats fvalue = 0.0 vscale = 1.0 vsign = 1 i1 = 0 mm = _re_colon.search(s2) if mm is not None: if s2[0:1] == "-": i1 = 1 vsign = -1 elif s2[0:1] == "+": i1 = 1 while mm is not None: i2 = mm.start() fvalue = fvalue + int(s2[i1:i2])/vscale i1 = i2+1 vscale = vscale*60.0 mm = _re_colon.search(s2,i1) # special handling for d exponential notation mm = _re_d.search(s2,i1) try: if mm is None: return vsign*(fvalue + float(s2[i1:])/vscale) else: return vsign*(fvalue + \ float(s2[i1:mm.start()]+"E"+s2[mm.end():])/vscale) except ValueError: pass else: # maybe it has a float method try: return float(value) except ValueError: pass raise ValueError("Parameter %s: illegal float value %s" % (self.name, repr(value))) # ----------------------------------------------------- # IRAF real parameter class # ----------------------------------------------------- class IrafParR(_RealMixin,IrafPar): """IRAF real parameter class""" pass # ----------------------------------------------------- # IRAF real array parameter class # ----------------------------------------------------- class IrafParAR(_RealMixin,IrafArrayPar): """IRAF real array parameter class""" pass # ----------------------------------------------------- # Strict real parameter mixin class # ----------------------------------------------------- class _StrictRealMixin(_RealMixin): """Strict real parameter mixin class""" #-------------------------------------------- # public methods #-------------------------------------------- def toString(self, value, quoted=0): return str(value) #-------------------------------------------- # private methods #-------------------------------------------- # coerce value to real def _coerceOneValue(self,value,strict=0): if value is None or isinstance(value,float): return value elif isinstance(value, int_types): return float(value) elif isinstance(value,str): s2 = irafutils.stripQuotes(value.strip()) if s2 == '': raise ValueError('Parameter '+self.name+ \ ': illegal empty float value') # allow +dd:mm:ss.s sexagesimal format for floats fvalue = 0.0 vscale = 1.0 vsign = 1 i1 = 0 mm = _re_colon.search(s2) if mm is not None: if s2[0:1] == "-": i1 = 1 vsign = -1 elif s2[0:1] == "+": i1 = 1 while mm is not None: i2 = mm.start() fvalue = fvalue + int(s2[i1:i2])/vscale i1 = i2+1 vscale = vscale*60.0 mm = _re_colon.search(s2,i1) # special handling for d exponential notation mm = _re_d.search(s2,i1) try: if mm is None: return vsign*(fvalue + float(s2[i1:])/vscale) else: return vsign*(fvalue + \ float(s2[i1:mm.start()]+"E"+s2[mm.end():])/vscale) except ValueError: pass # see if it's a stringified float try: return float(s2) except ValueError: raise ValueError("Parameter %s: illegal float value %s" % (self.name, repr(value))) # Otherwise it is not a strict float raise ValueError("Parameter %s: illegal float value %s" % (self.name, repr(value))) # ----------------------------------------------------- # Strict real parameter class # ----------------------------------------------------- class StrictParR(_StrictRealMixin,IrafPar): """Strict real parameter class""" pass # ----------------------------------------------------- # Utility routine for parsing choice string # ----------------------------------------------------- _re_choice = re.compile(r'\|') def _getChoice(s, strict): clist = s.split("|") # string is allowed to start and end with "|", so ignore initial # and final empty strings if not clist[0]: del clist[0] if len(clist)>1 and not clist[-1]: del clist[-1] return clist stsci.tools-3.4.12/lib/stsci/tools/bitmask.py0000644001120100020070000006613313241163620022633 0ustar jhunkSTSCI\science00000000000000""" A module that provides functions for manipulating bitmasks and data quality (DQ) arrays. :Authors: Mihai Cara (contact: help@stsci.edu) :License: ``_ """ import sys import warnings import six import numpy as np from astropy.utils import deprecated __version__ = '1.1.1' __vdate__ = '30-January-2018' __author__ = 'Mihai Cara' __all__ = ['interpret_bits_value', 'interpret_bit_flags', 'bitmask2mask', 'bitfield_to_boolean_mask', 'is_bit_flag'] # Revision history: # 0.1.0 (29-March-2015) - initial release based on code from stsci.skypac # 0.1.1 (21-February-2017) - documentation typo fix # 0.2.0 (23-February-2017) - performance and stability improvements. Changed # default output mask type from numpy.uint8 to numpy.bool_. # 1.0.0 (16-March-2017) - Multiple enhancements: # 1. Deprecated 'interpret_bits_value()'in favor of # 'interpret_bit_flags()' which now takes 'flip_bits' argument to flip # bits in (list of) integer flags. # 2. Deprecated 'bitmask2mask()' in favor of 'bitfield_to_boolean_mask()' # which now also takes 'flip_bits' argument. # 3. Renamed arguments of 'interpret_bit_flags()' and # 'bitfield_to_boolean_mask()' to be more technically correct. # 4. 'interpret_bit_flags()' and 'bitfield_to_boolean_mask()' now # accept Python lists of bit flags (in addition to integer bitmasks # and string comma- (or '+') separated lists of bit flags). # 5. Added 'is_bit_flag()' function to check if an integer number has # only one bit set (i.e., that it is a power of 2). # 1.1.0 (29-January-2018) - Multiple enhancements: # 1. Added support for long type in Python 2.7 in # `interpret_bit_flags()` and `bitfield_to_boolean_mask()`. # 2. `interpret_bit_flags()` now always returns `int` (or `int` or `long` # in Python 2.7). Previously when input was of integer-like type # (i.e., `numpy.uint64`), it was not converted to Python `int`. # 3. `bitfield_to_boolean_mask()` will no longer crash when # `ignore_flags` argument contains bit flags beyond what the type of # the argument `bitfield` can hold. # 1.1.1 (30-January-2018) - Improved filtering of high bits in flags. # INT_TYPE = (int, long,) if sys.version_info < (3,) else (int,) MAX_UINT_TYPE = np.maximum_sctype(np.uint) SUPPORTED_FLAGS = int(np.bitwise_not( 0, dtype=MAX_UINT_TYPE, casting='unsafe' )) def is_bit_flag(n): """ Verifies if the input number is a bit flag (i.e., an integer number that is an integer power of 2). Parameters ---------- n : int A positive integer number. Non-positive integers are considered not to be "flags". Returns ------- bool ``True`` if input ``n`` is a bit flag and ``False`` if it is not. """ if n < 1: return False return bin(n).count('1') == 1 def _is_int(n): return ( (isinstance(n, INT_TYPE) and not isinstance(n, bool)) or (isinstance(n, np.generic) and np.issubdtype(n, np.integer)) ) def interpret_bit_flags(bit_flags, flip_bits=None): """ Converts input bit flags to a single integer value (bitmask) or `None`. When input is a list of flags (either a Python list of integer flags or a sting of comma- or '+'-separated list of flags), the returned bitmask is obtained by summing input flags. .. note:: In order to flip the bits of the returned bitmask, for input of `str` type, prepend '~' to the input string. '~' must be prepended to the *entire string* and not to each bit flag! For input that is already a bitmask or a Python list of bit flags, set `flip_bits` for `True` in order to flip the bits of the returned bitmask. Parameters ---------- bit_flags : int, str, list, None An integer bitmask or flag, `None`, a string of comma- or '+'-separated list of integer bit flags, or a Python list of integer bit flags. If `bit_flags` is a `str` and if it is prepended with '~', then the output bitmask will have its bits flipped (compared to simple sum of input flags). For input `bit_flags` that is already a bitmask or a Python list of bit flags, bit-flipping can be controlled through `flip_bits` parameter. flip_bits : bool, None Indicates whether or not to flip the bits of the returned bitmask obtained from input bit flags. This parameter must be set to `None` when input `bit_flags` is either `None` or a Python list of flags. Returns ------- bitmask : int or None Returns and integer bit mask formed from the input bit value or `None` if input `bit_flags` parameter is `None` or an empty string. If input string value was prepended with '~' (or `flip_bits` was set to `True`), then returned value will have its bits flipped (inverse mask). Examples -------- >>> from stsci.tools.bitmask import interpret_bit_flags >>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28)) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16')) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16])) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True)) '1111111111100011' """ has_flip_bits = flip_bits is not None flip_bits = bool(flip_bits) allow_non_flags = False if _is_int(bit_flags): return (~int(bit_flags) if flip_bits else int(bit_flags)) elif bit_flags is None: if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' must be set to 'None' when " "input 'bit_flags' is None." ) return None elif isinstance(bit_flags, six.string_types): if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' is not permitted for " "comma-separated string lists of bit flags. Prepend '~' to " "the string to indicate bit-flipping." ) bit_flags = str(bit_flags).strip() if bit_flags.upper() in ['', 'NONE', 'INDEF']: return None # check whether bitwise-NOT is present and if it is, check that it is # in the first position: bitflip_pos = bit_flags.find('~') if bitflip_pos == 0: flip_bits = True bit_flags = bit_flags[1:].lstrip() else: if bitflip_pos > 0: raise ValueError("Bitwise-NOT must precede bit flag list.") flip_bits = False # basic check for correct use of parenthesis: while True: nlpar = bit_flags.count('(') nrpar = bit_flags.count(')') if nlpar == 0 and nrpar == 0: break if nlpar != nrpar: raise ValueError("Unbalanced parantheses in bit flag list.") lpar_pos = bit_flags.find('(') rpar_pos = bit_flags.rfind(')') if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1): raise ValueError("Incorrect syntax (incorrect use of " "parenthesis) in bit flag list.") bit_flags = bit_flags[1:-1].strip() if ',' in bit_flags: bit_flags = bit_flags.split(',') elif '+' in bit_flags: bit_flags = bit_flags.split('+') else: if bit_flags == '': raise ValueError( "Empty bit flag lists not allowed when either bitwise-NOT " "or parenthesis are present." ) bit_flags = [bit_flags] allow_non_flags = len(bit_flags) == 1 elif hasattr(bit_flags, '__iter__'): if not all([_is_int(flag) for flag in bit_flags]): raise TypeError("Each bit flag in a list must be an integer.") else: raise TypeError("Unsupported type for argument 'bit_flags'.") bitset = set(map(int, bit_flags)) if len(bitset) != len(bit_flags): warnings.warn("Duplicate bit flags will be ignored") bitmask = 0 for v in bitset: if not is_bit_flag(v) and not allow_non_flags: raise ValueError("Input list contains invalid (not powers of two) " "bit flags") bitmask += v if flip_bits: bitmask = ~bitmask return bitmask def bitfield_to_boolean_mask(bitfield, ignore_flags=0, flip_bits=None, good_mask_value=True, dtype=np.bool_): """ bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \ good_mask_value=True, dtype=numpy.bool\_) Converts an array of bit fields to a boolean (or integer) mask array according to a bitmask constructed from the supplied bit flags (see ``ignore_flags`` parameter). This function is particularly useful to convert data quality arrays to boolean masks with selective filtering of DQ flags. Parameters ---------- bitfield : numpy.ndarray An array of bit flags. By default, values different from zero are interpreted as "bad" values and values equal to zero are considered as "good" values. However, see ``ignore_flags`` parameter on how to selectively ignore some bits in the ``bitfield`` array data. ignore_flags : int, str, list, None (Default = 0) An integer bitmask, a Python list of bit flags, a comma- or '+'-separated string list of integer bit flags that indicate what bits in the input ``bitfield`` should be *ignored* (i.e., zeroed), or `None`. | Setting ``ignore_flags`` to `None` effectively will make `bitfield_to_boolean_mask` interpret all ``bitfield`` elements as "good" regardless of their value. | When ``ignore_flags`` argument is an integer bitmask, it will be combined using bitwise-NOT and bitwise-AND with each element of the input ``bitfield`` array (``~ignore_flags & bitfield``). If the resultant bitfield element is non-zero, that element will be interpreted as a "bad" in the output boolean mask and it will be interpreted as "good" otherwise. ``flip_bits`` parameter may be used to flip the bits (``bitwise-NOT``) of the bitmask thus effectively changing the meaning of the ``ignore_flags`` parameter from "ignore" to "use only" these flags. .. note:: Setting ``ignore_flags`` to 0 effectively will assume that all non-zero elements in the input ``bitfield`` array are to be interpreted as "bad". | When ``ignore_flags`` argument is an Python list of integer bit flags, these flags are added together to create an integer bitmask. Each item in the list must be a flag, i.e., an integer that is an integer power of 2. In order to flip the bits of the resultant bitmask, use ``flip_bits`` parameter. | Alternatively, ``ignore_flags`` may be a string of comma- or '+'-separated list of integer bit flags that should be added together to create an integer bitmask. For example, both ``'4,8'`` and ``'4+8'`` are equivalent and indicate that bit flags 4 and 8 in the input ``bitfield`` array should be ignored when generating boolean mask. .. note:: ``'None'``, ``'INDEF'``, and empty (or all white space) strings are special values of string ``ignore_flags`` that are interpreted as `None`. .. note:: Each item in the list must be a flag, i.e., an integer that is an integer power of 2. In addition, for convenience, an arbitrary **single** integer is allowed and it will be interpretted as an integer bitmask. For example, instead of ``'4,8'`` one could simply provide string ``'12'``. .. note:: When ``ignore_flags`` is a `str` and when it is prepended with '~', then the meaning of ``ignore_flags`` parameters will be reversed: now it will be interpreted as a list of bit flags to be *used* (or *not ignored*) when deciding which elements of the input ``bitfield`` array are "bad". Following this convention, an ``ignore_flags`` string value of ``'~0'`` would be equivalent to setting ``ignore_flags=None``. .. warning:: Because prepending '~' to a string ``ignore_flags`` is equivalent to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used with string ``ignore_flags`` and it must be set to `None`. flip_bits : bool, None (Default = None) Specifies whether or not to invert the bits of the bitmask either supplied directly through ``ignore_flags`` parameter or built from the bit flags passed through ``ignore_flags`` (only when bit flags are passed as Python lists of integer bit flags). Occasionally, it may be useful to *consider only specific bit flags* in the ``bitfield`` array when creating a boolean mask as opposite to *ignoring* specific bit flags as ``ignore_flags`` behaves by default. This can be achieved by inverting/flipping the bits of the bitmask created from ``ignore_flags`` flags which effectively changes the meaning of the ``ignore_flags`` parameter from "ignore" to "use only" these flags. Setting ``flip_bits`` to `None` means that no bit flipping will be performed. Bit flipping for string lists of bit flags must be specified by prepending '~' to string bit flag lists (see documentation for ``ignore_flags`` for more details). .. warning:: This parameter can be set to either `True` or `False` **ONLY** when ``ignore_flags`` is either an integer bitmask or a Python list of integer bit flags. When ``ignore_flags`` is either `None` or a string list of flags, ``flip_bits`` **MUST** be set to `None`. good_mask_value : int, bool (Default = True) This parameter is used to derive the values that will be assigned to the elements in the output boolean mask array that correspond to the "good" bit fields (that are 0 after zeroing bits specified by ``ignore_flags``) in the input ``bitfield`` array. When ``good_mask_value`` is non-zero or `True` then values in the output boolean mask array corresponding to "good" bit fields in ``bitfield`` will be `True` (if ``dtype`` is `numpy.bool_`) or 1 (if ``dtype`` is of numerical type) and values of corresponding to "bad" flags will be `False` (or 0). When ``good_mask_value`` is zero or `False` then the values in the output boolean mask array corresponding to "good" bit fields in ``bitfield`` will be `False` (if ``dtype`` is `numpy.bool_`) or 0 (if ``dtype`` is of numerical type) and values of corresponding to "bad" flags will be `True` (or 1). dtype : data-type (Default = numpy.bool\_) The desired data-type for the output binary mask array. Returns ------- mask : numpy.ndarray Returns an array of the same dimensionality as the input ``bitfield`` array whose elements can have two possible values, e.g., `True` or `False` (or 1 or 0 for integer ``dtype``) according to values of to the input ``bitfield`` elements, ``ignore_flags`` parameter, and the ``good_mask_value`` parameter. Examples -------- >>> from stsci.tools import bitmask >>> import numpy as np >>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]]) >>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=int) array([[1, 1, 0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 1, 0, 0, 1]]) >>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=bool) array([[ True, True, False, False, True, False, False, True], [False, False, True, True, True, False, False, True]], dtype=bool) >>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, good_mask_value=0, dtype=int) array([[0, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 0, 0, 1, 0, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=~6, good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, flip_bits=True, good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags='~(2+4)', good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=[2, 4], flip_bits=True, good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) """ bitfield = np.asarray(bitfield) if not np.issubdtype(bitfield.dtype, np.integer): raise TypeError("Input bitfield array must be of integer type.") ignore_mask = interpret_bit_flags(ignore_flags, flip_bits=flip_bits) if ignore_mask is None: if good_mask_value: mask = np.ones_like(bitfield, dtype=dtype) else: mask = np.zeros_like(bitfield, dtype=dtype) return mask # filter out bits beyond the maximum supported by the data type: ignore_mask = ignore_mask & SUPPORTED_FLAGS # invert the "ignore" mask: ignore_mask = np.bitwise_not(ignore_mask, dtype=bitfield.dtype, casting='unsafe') mask = np.empty_like(bitfield, dtype=np.bool_) np.bitwise_and(bitfield, ignore_mask, out=mask, casting='unsafe') if good_mask_value: np.logical_not(mask, out=mask) return mask.astype(dtype=dtype, subok=False, copy=False) @deprecated(since='3.4.6', message='', name='interpret_bits_value', alternative='interpret_bit_flags') def interpret_bits_value(val): """ Converts input bits value from string to a single integer value or None. If a comma- or '+'-separated set of values are provided, they are summed. .. note:: In order to flip the bits of the final result (after summation), for input of `str` type, prepend '~' to the input string. '~' must be prepended to the *entire string* and not to each bit flag! Parameters ---------- val : int, str, None An integer bit mask or flag, `None`, or a comma- or '+'-separated string list of integer bit values. If `val` is a `str` and if it is prepended with '~', then the output bit mask will have its bits flipped (compared to simple sum of input val). Returns ------- bitmask : int or None Returns and integer bit mask formed from the input bit value or `None` if input `val` parameter is `None` or an empty string. If input string value was prepended with '~', then returned value will have its bits flipped (inverse mask). Examples -------- >>> "{0:016b}".format(0xFFFF & interpret_bits_value(28) ) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bits_value('4,8,16') ) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bits_value('~4,8,16') ) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bits_value('~(4+8+16)') ) '1111111111100011' """ if isinstance(val, int) or val is None: return val else: val = str(val).strip() if val.startswith('~'): flip_bits = True val = val[1:].lstrip() else: flip_bits = False if val.startswith('('): if val.endswith(')'): val = val[1:-1].strip() else: raise ValueError('Unbalanced parantheses or incorrect syntax.') if ',' in val: valspl = val.split(',') bitmask = 0 for v in valspl: bitmask += int(v) elif '+' in val: valspl = val.split('+') bitmask = 0 for v in valspl: bitmask += int(v) elif val.upper() in ['', 'NONE', 'INDEF']: return None else: bitmask = int(val) if flip_bits: bitmask = ~bitmask return bitmask @deprecated(since='3.4.6', message='', name='bitmask2mask', alternative='bitfield_to_boolean_mask') def bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=np.bool_): """ bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=numpy.bool\_) Interprets an array of bit flags and converts it to a "binary" mask array. This function is particularly useful to convert data quality arrays to binary masks. Parameters ---------- bitmask : numpy.ndarray An array of bit flags. Values different from zero are interpreted as "bad" values and values equal to zero are considered as "good" values. However, see `ignore_bits` parameter on how to ignore some bits in the `bitmask` array. ignore_bits : int, str, None An integer bit mask, `None`, or a comma- or '+'-separated string list of integer bit values that indicate what bits in the input `bitmask` should be *ignored* (i.e., zeroed). If `ignore_bits` is a `str` and if it is prepended with '~', then the meaning of `ignore_bits` parameters will be reversed: now it will be interpreted as a list of bits to be *used* (or *not ignored*) when deciding what elements of the input `bitmask` array are "bad". The `ignore_bits` parameter is the integer sum of all of the bit values from the input `bitmask` array that should be considered "good" when creating the output binary mask. For example, if values in the `bitmask` array can be combinations of 1, 2, 4, and 8 flags and one wants to consider that values having *only* bit flags 2 and/or 4 as being "good", then `ignore_bits` should be set to 2+4=6. Then a `bitmask` element having values 2,4, or 6 will be considered "good", while an element with a value, e.g., 1+2=3, 4+8=12, etc. will be interpreted as "bad". Alternatively, one can enter a comma- or '+'-separated list of integer bit flags that should be added to obtain the final "good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to setting `ignore_bits` to 12. See :py:func:`interpret_bits_value` for examples. | Setting `ignore_bits` to `None` effectively will interpret all `bitmask` elements as "good" regardless of their value. | Setting `ignore_bits` to 0 effectively will assume that all non-zero elements in the input `bitmask` array are to be interpreted as "bad". | In order to reverse the meaning of the `ignore_bits` parameter from indicating bits in the values of `bitmask` elements that should be ignored when deciding which elements are "good" (these are the elements that are zero after ignoring `ignore_bits`), to indicating the bits should be used exclusively in deciding whether a `bitmask` element is "good", prepend '~' to the string value. For example, in order to use **only** (or **exclusively**) flags 4 and 8 (2nd and 3rd bits) in the values of the input `bitmask` array when deciding whether or not that element is "good", set `ignore_bits` to ``~4+8``, or ``~4,8 To obtain the same effect with an `int` input value (except for 0), enter -(4+8+1)=-9. Following this convention, a `ignore_bits` string value of ``'~0'`` would be equivalent to setting ``ignore_bits=None``. good_mask_value : int, bool (Default = 1) This parameter is used to derive the values that will be assigned to the elements in the output `mask` array that correspond to the "good" flags (that are 0 after zeroing bits specified by `ignore_bits`) in the input `bitmask` array. When `good_mask_value` is non-zero or `True` then values in the output mask array corresponding to "good" bit flags in `bitmask` will be 1 (or `True` if `dtype` is `bool`) and values of corresponding to "bad" flags will be 0. When `good_mask_value` is zero or `False` then values in the output mask array corresponding to "good" bit flags in `bitmask` will be 0 (or `False` if `dtype` is `bool`) and values of corresponding to "bad" flags will be 1. dtype : data-type (Default = numpy.uint8) The desired data-type for the output binary mask array. Returns ------- mask : numpy.ndarray Returns an array whose elements can have two possible values, e.g., 1 or 0 (or `True` or `False` if `dtype` is `bool`) according to values of to the input `bitmask` elements, `ignore_bits` parameter, and the `good_mask_value` parameter. Examples -------- >>> from stsci.tools import bitmask >>> import numpy as np >>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]]) >>> bitmask.bitmask2mask(dqbits, ignore_bits=0, dtype=int) array([[1, 1, 0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 1, 0, 0, 1]]) >>> bitmask.bitmask2mask(dqbits, ignore_bits=0, dtype=bool) array([[ True, True, False, False, True, False, False, True], [False, False, True, True, True, False, False, True]], dtype=bool) >>> bitmask.bitmask2mask(dqbits, ignore_bits=6, good_mask_value=0, dtype=int) array([[0, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 0, 0, 1, 0, 0]]) >>> bitmask.bitmask2mask(dqbits, ignore_bits=~6, good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitmask2mask(dqbits, ignore_bits='~(2+4)', good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) """ if not np.issubdtype(bitmask.dtype, np.integer): raise TypeError("Input 'bitmask' array must be of integer type.") ignore_bits = interpret_bits_value(ignore_bits) if ignore_bits is None: if good_mask_value: mask = np.ones_like(bitmask, dtype=dtype) else: mask = np.zeros_like(bitmask, dtype=dtype) return mask ignore_bits = ~bitmask.dtype.type(ignore_bits) mask = np.empty_like(bitmask, dtype=np.bool_) np.bitwise_and(bitmask, ignore_bits, out=mask, casting='unsafe') if good_mask_value: np.logical_not(mask, out=mask) return mask.astype(dtype=dtype, subok=False, copy=False) stsci.tools-3.4.12/lib/stsci/tools/capable.py0000644001120100020070000001551313241163620022564 0ustar jhunkSTSCI\science00000000000000""" Learn basic capabilities here (e.g. can we display graphics?). This is meant to be fast and light, having no complicated dependencies, so that any module can fearlessly import this without adverse affects or performance concerns. $Id$ """ from __future__ import division # confidence high import os, sys PY3K = sys.version_info[0] > 2 descrip = "basic capabilities file, last edited: 28 Dec 2017" def is_darwin_and_x(): """ Convenience function. Returns True if is an X11-linked Python/tkinter build on OSX. This is intended to be quick and easy without further imports. As a result, this relies on the assumption that on OSX, PyObjC is installed (only) in the Framework builds of Python. """ if not sys.platform == 'darwin': return False return which_darwin_linkage() == "x11" def which_darwin_linkage(force_otool_check=False): """ Convenience function. Returns one of ('x11', 'aqua') in answer to the question of whether this is an X11-linked Python/tkinter, or a natively built (framework, Aqua) one. This is only for OSX. This relies on the assumption that on OSX, PyObjC is installed in the Framework builds of Python. If it doesn't find PyObjC, this inspects the actual tkinter library binary via otool. One driving requirement here is to try to make the determination quickly and quietly without actually importing/loading any GUI libraries. We even want to avoid importing tkinter if we can. """ # sanity check assert sys.platform=='darwin', 'Incorrect usage, not on OSX' # If not forced to run otool, then make some quick and dirty # simple checks/assumptions, which do not add to startup time and do not # attempt to initialize any graphics. if not force_otool_check: # There will (for now) only ever be an aqua-linked Python/tkinter # when using Ureka on darwin, so this is an easy short-circuit check. if 'UR_DIR' in os.environ: return "aqua" # There will *usually* be PyObjC modules on sys.path on the natively- # linked Python. This is assumed to be always correct on Python 2.x, as # of 2012. This is kludgy but quick and effective. sp = ",".join(sys.path) sp = sp.lower().strip(',') if '/pyobjc' in sp or 'pyobjc,' in sp or 'pyobjc/' in sp or sp.endswith('pyobjc'): return "aqua" # Try one more thing - look for the physical PyObjC install dir under site-packages # The assumption above using sys.path does not seem to be correct as of the # combination of Python2.7.9/PyObjC3.0.4/2015. sitepacksloc = os.path.split(os.__file__)[0]+'/site-packages/objc' if os.path.exists(sitepacksloc): return "aqua" # OK, no trace of PyObjC found - need to fall through to the forced otool check. # Use otool shell command if PY3K: import tkinter as TKNTR else: import Tkinter as TKNTR import subprocess try: tk_dyn_lib = TKNTR._tkinter.__file__ except AttributeError: # happens on Ureka if 'UR_DIR' in os.environ: return 'aqua' else: return 'unknown' libs = subprocess.check_output(('/usr/bin/otool', '-L', tk_dyn_lib)).decode('ascii') if libs.find('/libX11.') >= 0: return "x11" else: return "aqua" def get_dc_owner(raises, mask_if_self): """ Convenience function to return owner of /dev/console. If raises is True, this raises an exception on any error. If not, it returns any error string as the owner name. If owner is self, and if mask_if_self, returns "".""" try: from pwd import getpwuid owner_uid = os.stat('/dev/console').st_uid self_uid = os.getuid() if mask_if_self and owner_uid == self_uid: return "" owner_name = getpwuid(owner_uid).pw_name return owner_name except Exception as e: if raises: raise e else: return str(e) OF_GRAPHICS = True if 'PYRAF_NO_DISPLAY' in os.environ or 'PYTOOLS_NO_DISPLAY' in os.environ: OF_GRAPHICS = False if OF_GRAPHICS and sys.platform == 'darwin': # # On OSX, there is an AppKit error where Python itself will abort if # tkinter operations (e.g. tkinter._test() ...) are attempted when running # from a remote terminal. In these situations, it is not even safe to put # the code in a try/except block, since the AppKit error seems to happen # *asynchronously* within ObjectiveC code. See PyRAF ticket #149. # # SO, let's try a quick simple test here (only on OSX) to find out if we # are the "console user". If we are not, then we don't even want to attempt # any windows/graphics calls. See "console user" here: # http://developer.apple.com/library/mac/#technotes/tn2083/_index.html # If we are the console user, we own /dev/console and can read from it. # When no one is logged in, /dev/console is owned by "root". When user "bob" # is logged in locally/physically, /dev/console is owned by "bob". # However, if "bob" restarts the X server while logged in, /dev/console # may be owned by "sysadmin" - so we check for that. # if 'PYRAF_YES_DISPLAY' not in os.environ: # the use of PYRAF_YES_DISPLAY is a temporary override while we # debug why a user might have no read-acces to /dev/console dc_owner = get_dc_owner(False, False) OF_GRAPHICS = dc_owner == 'sysadmin' or os.access("/dev/console", os.R_OK) # Add a double-check for remote X11 users. We *think* this is a smaller # set of cases, so we do it last minute here: if not OF_GRAPHICS: # On OSX, but logged in remotely. Normally (with native build) this # means there are no graphics. But, what if they're calling an # X11-linked Python? Then we should allow graphics to be attempted. OF_GRAPHICS = is_darwin_and_x() # OF_GRAPHICS will be True here in only two cases (2nd should be rare): # An OSX Python build linked with X11, or # An OSX Python build linked natively where PyObjC was left out # After all that, we may have decided that we want graphics. Now # that we know it is ok to try to import tkinter, we can test if it # is there. If it is not, we are not capable of graphics. if OF_GRAPHICS : try : if PY3K: import tkinter as TKNTR else: import Tkinter as TKNTR except ImportError : TKINTER_IMPORT_FAILED = 1 OF_GRAPHICS = False # Using tkFileDialog from PyRAF (and maybe in straight TEAL) is crashing python # itself on OSX only. Allow on Linux. Mac: use this until PyRAF #171 fixed. OF_TKFD_IN_EPAR = True if sys.platform == 'darwin' and OF_GRAPHICS and \ not is_darwin_and_x(): # if framework ver OF_TKFD_IN_EPAR = 'TEAL_TRY_TKFD' in list(os.environ.keys()) stsci.tools-3.4.12/lib/stsci/tools/cfgpars.py0000644001120100020070000016556713112074216022640 0ustar jhunkSTSCI\science00000000000000""" Contains the ConfigObjPars class and any related functionality. $Id$ """ from __future__ import absolute_import, division, print_function # confidence high import copy, glob, os, stat, sys if sys.version_info[0] > 2: string_types = str else: string_types = basestring # ConfigObj modules from . import configobj, validate # Local modules from . import basicpar, eparoption, irafutils, taskpars, vtor_checks # Globals and useful functions APP_NAME = 'TEAL' TASK_NAME_KEY = '_task_name_' class DuplicateKeyError(Exception): pass class NoCfgFileError(Exception): pass def getAppDir(): """ Return our application dir. Create it if it doesn't exist. """ # Be sure the resource dir exists theDir = os.path.expanduser('~/.')+APP_NAME.lower() if not os.path.exists(theDir): try: os.mkdir(theDir) except OSError: print('Could not create "'+theDir+'" to save GUI settings.') theDir = "./"+APP_NAME.lower() return theDir def getObjectFromTaskArg(theTask, strict, setAllToDefaults): """ Take the arg (usually called theTask), which can be either a subclass of ConfigObjPars, or a string package name, or a .cfg filename - no matter what it is - take it and return a ConfigObjPars object. strict - bool - warning severity, passed to the ConfigObjPars() ctor setAllToDefaults - bool - if theTask is a pkg name, force all to defaults """ # Already in the form we need (instance of us or of subclass) if isinstance(theTask, ConfigObjPars): if setAllToDefaults: raise RuntimeError('Called getObjectFromTaskArg with existing'+\ ' object AND setAllToDefaults - is unexpected use case.') # If it is an existing object, make sure it's internal param list is # up to date with it's ConfigObj dict, since the user may have manually # edited the dict before calling us. theTask.syncParamList(False) # use strict somehow? # Note - some validation is done here in IrafPar creation, but it is # not the same validation done by the ConfigObj s/w (no check funcs). # Do we want to do that too here? return theTask # For example, a .cfg file if os.path.isfile(str(theTask)): try: return ConfigObjPars(theTask, strict=strict, setAllToDefaults=setAllToDefaults) except KeyError: # this might just be caused by a file sitting in the local cwd with # the same exact name as the package we want to import, let's see if theTask.find('.') > 0: # it has an extension, like '.cfg' raise # this really was an error # else we drop down to the next step - try it as a pkg name # Else it must be a Python package name to load if isinstance(theTask, str) and setAllToDefaults: # NOTE how we pass the task name string in setAllToDefaults return ConfigObjPars('', setAllToDefaults=theTask, strict=strict) else: return getParsObjForPyPkg(theTask, strict) def getEmbeddedKeyVal(cfgFileName, kwdName, dflt=None): """ Read a config file and pull out the value of a given keyword. """ # Assume this is a ConfigObj file. Use that s/w to quickly read it and # put it in dict format. Assume kwd is at top level (not in a section). # The input may also be a .cfgspc file. # # Only use ConfigObj here as a tool to generate a dict from a file - do # not use the returned object as a ConfigObj per se. As such, we can call # with "simple" format, ie. no cfgspc, no val'n, and "list_values"=False. try: junkObj = configobj.ConfigObj(cfgFileName, list_values=False) except: if kwdName == TASK_NAME_KEY: raise KeyError('Can not parse as a parameter config file: '+ \ '\n\t'+os.path.realpath(cfgFileName)) else: raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \ '\n\t'+os.path.realpath(cfgFileName)) if kwdName in junkObj: retval = junkObj[kwdName] del junkObj return retval # Not found if dflt is not None: del junkObj return dflt else: if kwdName == TASK_NAME_KEY: raise KeyError('Can not parse as a parameter config file: '+ \ '\n\t'+os.path.realpath(cfgFileName)) else: raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \ '\n\t'+os.path.realpath(cfgFileName)) def findCfgFileForPkg(pkgName, theExt, pkgObj=None, taskName=None): """ Locate the configuration files for/from/within a given python package. pkgName is a string python package name. This is used unless pkgObj is given, in which case pkgName is taken from pkgObj.__name__. theExt is either '.cfg' or '.cfgspc'. If the task name is known, it is given as taskName, otherwise one is determined using the pkgName. Returns a tuple of (package-object, cfg-file-name). """ # arg check ext = theExt if ext[0] != '.': ext = '.'+theExt # Do the import, if needed pkgsToTry = {} if pkgObj: pkgsToTry[pkgObj.__name__] = pkgObj else: # First try something simple like a regular or dotted import try: fl = [] if pkgName.find('.') > 0: fl = [ pkgName[:pkgName.rfind('.')], ] pkgsToTry[str(pkgName)] = __import__(str(pkgName), fromlist=fl) except: throwIt = True # One last case to try is something like "csc_kill" from # "acstools.csc_kill", but this convenience capability will only be # allowed if the parent pkg (acstools) has already been imported. if isinstance(pkgName, string_types) and pkgName.find('.') < 0: matches = [x for x in sys.modules.keys() \ if x.endswith("."+pkgName)] if len(matches)>0: throwIt = False for mmm in matches: pkgsToTry[mmm] = sys.modules[mmm] if throwIt: raise NoCfgFileError("Unfound package or "+ext+" file via: "+\ "import "+str(pkgName)) # Now that we have the package object (or a few of them to try), for each # one find the .cfg or .cfgspc file, and return # Return as soon as ANY match is found. for aPkgName in pkgsToTry: aPkg = pkgsToTry[aPkgName] path = os.path.dirname(aPkg.__file__) if len(path) < 1: path = '.' flist = irafutils.rglob(path, "*"+ext) if len(flist) < 1: continue # Go through these and find the first one for the assumed or given task # name. The task name for 'BigBlackBox.drizzle' would be 'drizzle'. if taskName is None: taskName = aPkgName.split(".")[-1] flist.sort() for f in flist: # A .cfg file gets checked for _task_name_=val, but a .cfgspc file # will have a string check function signature as the val. if ext == '.cfg': itsTask = getEmbeddedKeyVal(f, TASK_NAME_KEY, '') else: # .cfgspc sigStr = getEmbeddedKeyVal(f, TASK_NAME_KEY, '') # .cfgspc file MUST have an entry for TASK_NAME_KEY w/ a default itsTask = vtor_checks.sigStrToKwArgsDict(sigStr)['default'] if itsTask == taskName: # We've found the correct file in an installation area. Return # the package object and the found file. return aPkg, f # What, are you still here? raise NoCfgFileError('No valid '+ext+' files found in package: "'+ \ str(pkgName)+'" for task: "'+str(taskName)+'"') def findAllCfgTasksUnderDir(aDir): """ Finds all installed tasks by examining any .cfg files found on disk at and under the given directory, as an installation might be. This returns a dict of { file name : task name } """ retval = {} for f in irafutils.rglob(aDir, '*.cfg'): retval[f] = getEmbeddedKeyVal(f, TASK_NAME_KEY, '') return retval def getCfgFilesInDirForTask(aDir, aTask, recurse=False): """ This is a specialized function which is meant only to keep the same code from needlessly being much repeated throughout this application. This must be kept as fast and as light as possible. This checks a given directory for .cfg files matching a given task. If recurse is True, it will check subdirectories. If aTask is None, it returns all files and ignores aTask. """ if recurse: flist = irafutils.rglob(aDir, '*.cfg') else: flist = glob.glob(aDir+os.sep+'*.cfg') if aTask: retval = [] for f in flist: try: if aTask == getEmbeddedKeyVal(f, TASK_NAME_KEY, ''): retval.append(f) except Exception as e: print('Warning: '+str(e)) return retval else: return flist def getParsObjForPyPkg(pkgName, strict): """ Locate the appropriate ConfigObjPars (or subclass) within the given package. NOTE this begins the same way as getUsrCfgFilesForPyPkg(). Look for .cfg file matches in these places, in this order: 1 - any named .cfg file in current directory matching given task 2 - if there exists a ~/.teal/.cfg file 3 - any named .cfg file in SOME*ENV*VAR directory matching given task 4 - the installed default .cfg file (with the given package) """ # Get the python package and it's .cfg file - need this no matter what installedPkg, installedFile = findCfgFileForPkg(pkgName, '.cfg') theFile = None tname = getEmbeddedKeyVal(installedFile, TASK_NAME_KEY) # See if the user has any of their own .cfg files in the cwd for this task if theFile is None: flist = getCfgFilesInDirForTask(os.getcwd(), tname) if len(flist) > 0: if len(flist) == 1: # can skip file times sort theFile = flist[0] else: # There are a few different choices. In the absence of # requirements to the contrary, just take the latest. Set up a # list of tuples of (mtime, fname) so we can sort by mtime. ftups = [ (os.stat(f)[stat.ST_MTIME], f) for f in flist] ftups.sort() theFile = ftups[-1][1] # See if the user has any of their own app-dir .cfg files for this task if theFile is None: flist = getCfgFilesInDirForTask(getAppDir(), tname) # verifies tname flist = [f for f in flist if os.path.basename(f) == tname+'.cfg'] if len(flist) > 0: theFile = flist[0] assert len(flist) == 1, str(flist) # should never happen # Add code to check an env. var defined area? (speak to users first) # Did we find one yet? If not, use the installed version useInstVer = False if theFile is None: theFile = installedFile useInstVer = True # Create a stand-in instance from this file. Force a read-only situation # if we are dealing with the installed, (expected to be) unwritable file. return ConfigObjPars(theFile, associatedPkg=installedPkg, forceReadOnly=useInstVer, strict=strict) def getUsrCfgFilesForPyPkg(pkgName): """ See if the user has one of their own local .cfg files for this task, such as might be created automatically during the save of a read-only package, and return their names. """ # Get the python package and it's .cfg file thePkg, theFile = findCfgFileForPkg(pkgName, '.cfg') # See if the user has any of their own local .cfg files for this task tname = getEmbeddedKeyVal(theFile, TASK_NAME_KEY) flist = getCfgFilesInDirForTask(getAppDir(), tname) return flist def checkSetReadOnly(fname, raiseOnErr = False): """ See if we have write-privileges to this file. If we do, and we are not supposed to, then fix that case. """ if os.access(fname, os.W_OK): # We can write to this but it is supposed to be read-only. Fix it. # Take away usr-write, leave group and other alone, though it # may be simpler to just force/set it to: r--r--r-- or r-------- irafutils.setWritePrivs(fname, False, ignoreErrors= not raiseOnErr) def flattenDictTree(aDict): """ Takes a dict of vals and dicts (so, a tree) as input, and returns a flat dict (only one level) as output. All key-vals are moved to the top level. Sub-section dict names (keys) are ignored/dropped. If there are name collisions, an error is raised. """ retval = {} for k in aDict: val = aDict[k] if isinstance(val, dict): # This val is a dict, get its data (recursively) into a flat dict subDict = flattenDictTree(val) # Merge its dict of data into ours, watching for NO collisions rvKeySet = set(retval.keys()) sdKeySet = set(subDict.keys()) intr = rvKeySet.intersection(sdKeySet) if len(intr) > 0: raise DuplicateKeyError("Flattened dict already has "+ \ "key(s): "+str(list(intr))+" - cannot flatten this.") else: retval.update(subDict) else: if k in retval: raise DuplicateKeyError("Flattened dict already has key: "+\ k+" - cannot flatten this.") else: retval[k] = val return retval def countKey(theDict, name): """ Return the number of times the given par exists in this dict-tree, since the same key name may be used in different sections/sub-sections. """ retval = 0 for key in theDict: val = theDict[key] if isinstance(val, dict): retval += countKey(val, name) # recurse else: if key == name: retval += 1 # can't break, even tho we found a hit, other items on # this level will not be named "name", but child dicts # may have further counts return retval def findFirstPar(theDict, name, _depth=0): """ Find the given par. Return tuple: (its own (sub-)dict, its value). Returns the first match found, without checking whether the given key name is unique or whether it is used in multiple sections. """ for key in theDict: val = theDict[key] # print _depth*' ', key, str(val)[:40] if isinstance(val, dict): retval = findFirstPar(val, name, _depth=_depth+1) # recurse if retval is not None: return retval # else keep looking else: if key == name: return theDict, theDict[name] # else keep looking # if we get here then we have searched this whole (sub)-section and its # descendants, and found no matches. only raise if we are at the top. if _depth == 0: raise KeyError(name) else: return None def findScopedPar(theDict, scope, name): """ Find the given par. Return tuple: (its own (sub-)dict, its value). """ # Do not search (like findFirstPar), but go right to the correct # sub-section, and pick it up. Assume it is there as stated. if len(scope): theDict = theDict[scope] # ! only goes one level deep - enhance ! return theDict, theDict[name] # KeyError if unfound def setPar(theDict, name, value): """ Sets a par's value without having to give its scope/section. """ section, previousVal = findFirstPar(theDict, name) # "section" is the actual object, not a copy section[name] = value def mergeConfigObj(configObj, inputDict): """ Merge the inputDict values into an existing given configObj instance. The inputDict is a "flat" dict - it has no sections/sub-sections. The configObj may have sub-sections nested to any depth. This will raise a DuplicateKeyError if one of the inputDict keys is used more than once in configObj (e.g. within two different sub-sections). """ # Expanded upon Warren's version in astrodrizzle # Verify that all inputDict keys in configObj are unique within configObj for key in inputDict: if countKey(configObj, key) > 1: raise DuplicateKeyError(key) # Now update configObj with each inputDict item for key in inputDict: setPar(configObj, key, inputDict[key]) def integrityTestAllPkgCfgFiles(pkgObj, output=True): """ Given a package OBJECT, inspect it and find all installed .cfg file- using tasks under it. Then them one at a time via integrityTestTaskCfgFile, and report any/all errors. """ assert type(pkgObj) == type(os), \ "Expected module arg, got: "+str(type(pkgObj)) taskDict = findAllCfgTasksUnderDir(os.path.dirname(pkgObj.__file__)) # taskDict is { cfgFileName : taskName } errors = [] for fname in taskDict: taskName = taskDict[fname] try: if taskName: if output: print('In '+pkgObj.__name__+', checking task: '+ taskName+', file: '+fname) integrityTestTaskCfgFile(taskName, fname) except Exception as e: errors.append(str(e)) assert len(errors) == 0, 'Errors found while integrity testing .cfg '+ \ 'file(s) found under "'+pkgObj.__name__+'":\n'+ \ ('\n'.join(errors)) def integrityTestTaskCfgFile(taskName, cfgFileName=None): """ For a given task, inspect the given .cfg file (or simply find/use its installed .cfg file), and check those values against the defaults found in the installed .cfgspc file. They should be the same. If the file name is not given, the installed one is found and used. """ from . import teal # don't import above, to avoid circular import (may need to mv) if not cfgFileName: ignored, cfgFileName = findCfgFileForPkg(taskName, '.cfg') diffDict = teal.diffFromDefaults(cfgFileName, report=False) if len(diffDict) < 1: return # no error msg = 'The following par:value pairs from "'+cfgFileName+ \ '" are not the correct defaults: '+str(diffDict) raise RuntimeError(msg) class ConfigObjPars(taskpars.TaskPars, configobj.ConfigObj): """ This represents a task's dict of ConfigObj parameters. """ def __init__(self, cfgFileName, forUseWithEpar=True, setAllToDefaults=False, strict=True, associatedPkg=None, forceReadOnly=False): """ cfgFileName - string path/name of .cfg file forUseWithEpar - bool - will this be used in EPAR? setAllToDefaults - string is pkg name to import strict - bool - level of error/warning severity associatedPkg - loaded package object forceReadOnly - bool - make the .cfg file read-only """ self._forUseWithEpar = forUseWithEpar self._rcDir = getAppDir() self._allTriggers = None # all known triggers in this object self._allDepdcs = None # all known dependencies in this object self._allExecutes = None # all known codes-to-execute in this object self._neverWrite = [] # all keys which are NOT written out to .cfg self._debugLogger = None self._debugYetToPost = [] self.__assocPkg = associatedPkg # The __paramList pointer remains the same for the life of this object self.__paramList = [] # Set up ConfigObj stuff assert setAllToDefaults or os.path.isfile(cfgFileName), \ "Config file not found: "+cfgFileName self.__taskName = '' if setAllToDefaults: # they may not have given us a real file name here since they # just want defaults (in .cfgspc) so don't be too picky about # finding and reading the file. if isinstance(setAllToDefaults, str): # here they have very clearly said to load only the defaults # using the given name as the package name - below we will # have it imported in _findAssociatedConfigSpecFile() self.__taskName = setAllToDefaults setAllToDefaults = True cfgFileName = '' # ignore any given .cfg file, don't need one else: possible = os.path.splitext(os.path.basename(cfgFileName))[0] if os.path.isfile(cfgFileName): self.__taskName = getEmbeddedKeyVal(cfgFileName, TASK_NAME_KEY, possible) else: self.__taskName = possible else: # this is the real deal, expect a real file name self.__taskName = getEmbeddedKeyVal(cfgFileName, TASK_NAME_KEY) if forceReadOnly: checkSetReadOnly(cfgFileName) # Find the associated .cfgspc file (first make sure we weren't # given one by mistake) if not cfgFileName.endswith('.cfg') and \ self.__taskName.find('(default=') >= 0: # Handle case where they gave us a .cfgspc by mistake (no .cfg) # (basically reset a few things) cfgSpecPath = os.path.realpath(cfgFileName) setAllToDefaults = True cfgFileName = '' sigStr = getEmbeddedKeyVal(cfgSpecPath, TASK_NAME_KEY, '') self.__taskName = vtor_checks.sigStrToKwArgsDict(sigStr)['default'] else: cfgSpecPath = self._findAssociatedConfigSpecFile(cfgFileName) assert os.path.exists(cfgSpecPath), \ "Matching configspec not found! Expected: "+cfgSpecPath self.debug('ConfigObjPars: .cfg='+str(cfgFileName)+ \ ', .cfgspc='+str(cfgSpecPath)+ \ ', defaults='+str(setAllToDefaults)+', strict='+str(strict)) # Run the ConfigObj ctor. The result of this (if !setAllToDefaults) # is the exact copy of the input file as a dict (ConfigObj). If the # infile had extra pars or missing pars, they are still that way here. if setAllToDefaults: configobj.ConfigObj.__init__(self, configspec=cfgSpecPath) else: configobj.ConfigObj.__init__(self, os.path.abspath(cfgFileName), configspec=cfgSpecPath) # Before we validate (and fill in missing pars), find any lost pars # via this (somewhat kludgy) method suggested by ConfigObj folks. missing = '' # assume no .cfg file if strict and (not setAllToDefaults): # don't even populate this if not strict missing = findTheLost(os.path.abspath(cfgFileName), cfgSpecPath) # Validate it here. We can't skip this step even if we are just # setting all to defaults, since this sets the values. # NOTE - this fills in values for any missing pars ! AND, if our # .cfgspc sets defaults vals, then missing pars are not an error... self._vtor = validate.Validator(vtor_checks.FUNC_DICT) # 'ans' will be True, False, or a dict (anything but True is bad) ans = self.validate(self._vtor, preserve_errors=True, copy=setAllToDefaults) # Note: before the call to validate(), the list returned from # self.keys() is in the order found in self.filename. If that file # was missing items that are in the .cfgspc, they will now show up # in self.keys(), but not necessarily in the same order as the .cfgspc hasTypeErr = ans != True extra = self.listTheExtras(True) # DEAL WITH ERRORS (in this way) # # wrong par type: # strict -> severe error* # not -> severe error # extra par(s) found: # strict -> severe error # not -> warn* # missing par(s): # strict -> warn # not - be silent # # *severe - if in GUI, pop up error & stop (e.g. file load), else raise # *warn - if in GUI, pop up warning, else print it to screen if extra or missing or hasTypeErr: flatStr = '' if ans == False: flatStr = "All values are invalid!" if ans != True and ans != False: flatStr = flattened2str(configobj.flatten_errors(self, ans)) if missing: flatStr += "\n\n"+missing if extra: flatStr += "\n\n"+extra msg = "Validation warnings for: " if hasTypeErr or (strict and extra): msg = "Validation errors for: " msg = msg+os.path.realpath(cfgFileName)+\ "\n\n"+flatStr.strip('\n') if hasTypeErr or (strict and extra): raise RuntimeError(msg) else: # just inform them, but don't throw anything print(msg.replace('\n\n','\n')) # get the initial param list out of the ConfigObj dict self.syncParamList(True) # take note of all trigger logic self.debug(self.triggerLogicToStr()) # see if we are using a package with it's own run() function self._runFunc = None self._helpFunc = None if self.__assocPkg is not None: if hasattr(self.__assocPkg, 'run'): self._runFunc = self.__assocPkg.run if hasattr(self.__assocPkg, 'getHelpAsString'): self._helpFunc = self.__assocPkg.getHelpAsString def setDebugLogger(self, obj): # set the object we can use to post debugging info self._debugLogger = obj # now that we have one, post anything we have saved up (and clear list) if obj and len(self._debugYetToPost) > 0: for msg in self._debugYetToPost: self._debugLogger.debug(msg) self._debugYetToPost = [] def debug(self, msg): if self._debugLogger: self._debugLogger.debug(msg) else: # else just hold onto it until we do have a logger -during the # init phase we may not yet have a logger, yet have stuff to log self._debugYetToPost.append(msg) # add to our little cache def getDefaultSaveFilename(self, stub=False): """ Return name of file where we are expected to be saved if no files for this task have ever been saved, and the user wishes to save. If stub is True, the result will be /_stub.cfg instead of /.cfg. """ if stub: return self._rcDir+os.sep+self.__taskName+'_stub.cfg' else: return self._rcDir+os.sep+self.__taskName+'.cfg' def syncParamList(self, firstTime, preserve_order=True): """ Set or reset the internal param list from the dict's contents. """ # See the note in setParam about this design. # Get latest par values from dict. Make sure we do not # change the id of the __paramList pointer here. new_list = self._getParamsFromConfigDict(self, initialPass=firstTime) # dumpCfgspcTo=sys.stdout) # Have to add this odd last one for the sake of the GUI (still?) if self._forUseWithEpar: new_list.append(basicpar.IrafParS(['$nargs','s','h','N'])) if len(self.__paramList) > 0 and preserve_order: # Here we have the most up-to-date data from the actual data # model, the ConfigObj dict, and we need to use it to fill in # our param list. BUT, we need to preserve the order our list # has had up until now (by unique parameter name). namesInOrder = [p.fullName() for p in self.__paramList] assert len(namesInOrder) == len(new_list), \ 'Mismatch in num pars, had: '+str(len(namesInOrder))+ \ ', now we have: '+str(len(new_list))+', '+ \ str([p.fullName() for p in new_list]) self.__paramList[:] = [] # clear list, keep same pointer # create a flat dict view of new_list, for ease of use in next step new_list_dict = {} # can do in one step in v2.7 for par in new_list: new_list_dict[par.fullName()] = par # populate for fn in namesInOrder: self.__paramList.append(new_list_dict[fn]) else: # Here we just take the data in whatever order it came. self.__paramList[:] = new_list # keep same list pointer def getName(self): return self.__taskName def getPkgname(self): return '' # subclasses override w/ a sensible value def getParList(self, docopy=False): """ Return a list of parameter objects. docopy is ignored as the returned value is not a copy. """ return self.__paramList def getDefaultParList(self): """ Return a par list just like ours, but with all default values. """ # The code below (create a new set-to-dflts obj) is correct, but it # adds a tenth of a second to startup. Clicking "Defaults" in the # GUI does not call this. But this can be used to set the order seen. # But first check for rare case of no cfg file name if self.filename is None: # this is a .cfgspc-only kind of object so far self.filename = self.getDefaultSaveFilename(stub=True) return copy.deepcopy(self.__paramList) tmpObj = ConfigObjPars(self.filename, associatedPkg=self.__assocPkg, setAllToDefaults=True, strict=False) return tmpObj.getParList() def getFilename(self): if self.filename in (None, ''): return self.getDefaultSaveFilename() else: return self.filename def getAssocPkg(self): return self.__assocPkg def canExecute(self): return self._runFunc is not None def isSameTaskAs(self, aCfgObjPrs): """ Return True if the passed in object is for the same task as we are. """ return aCfgObjPrs.getName() == self.getName() # def strictUpdate(self, aDict): # """ Override the current values with those in the given dict. This # is like dict's update, except it doesn't allow new keys and it # verifies the values (it does?!) """ # if aDict is None: # return # for k in aDict: # v = aDict[k] # print("Skipping ovverride key = "+k+", val = "+str(v)) def setParam(self, name, val, scope='', check=1, idxHint=None): """ Find the ConfigObj entry. Update the __paramList. """ theDict, oldVal = findScopedPar(self, scope, name) # Set the value, even if invalid. It needs to be set before # the validation step (next). theDict[name] = val # If need be, check the proposed value. Ideally, we'd like to # (somehow elegantly) only check this one item. For now, the best # shortcut is to only validate this section. if check: ans=self.validate(self._vtor, preserve_errors=True, section=theDict) if ans != True: flatStr = "All values are invalid!" if ans != False: flatStr = flattened2str(configobj.flatten_errors(self, ans)) raise RuntimeError("Validation error: "+flatStr) # Note - this design needs work. Right now there are two copies # of the data: the ConfigObj dict, and the __paramList ... # We rely on the idxHint arg so we don't have to search the __paramList # every time this is called, which could really slows things down. assert idxHint is not None, "ConfigObjPars relies on a valid idxHint" assert name == self.__paramList[idxHint].name, \ 'Error in setParam, name: "'+name+'" != name at idxHint: "'+\ self.__paramList[idxHint].name+'", idxHint: '+str(idxHint) self.__paramList[idxHint].set(val) def saveParList(self, *args, **kw): """Write parameter data to filename (string or filehandle)""" if 'filename' in kw: filename = kw['filename'] if not filename: filename = self.getFilename() if not filename: raise ValueError("No filename specified to save parameters") if hasattr(filename,'write'): fh = filename absFileName = os.path.abspath(fh.name) else: absFileName = os.path.expanduser(filename) absDir = os.path.dirname(absFileName) if len(absDir) and not os.path.isdir(absDir): os.makedirs(absDir) fh = open(absFileName,'w') numpars = len(self.__paramList) if self._forUseWithEpar: numpars -= 1 if not self.final_comment: self.final_comment = [''] # force \n at EOF # Empty the ConfigObj version of section.defaults since that is based # on an assumption incorrect for us, and override with our own list. # THIS IS A BIT OF MONKEY-PATCHING! WATCH FUTURE VERSION CHANGES! # See Trac ticket #762. while len(self.defaults): self.defaults.pop(-1) # empty it, keeping ref for key in self._neverWrite: self.defaults.append(key) # Note also that we are only overwriting the top/main section's # "defaults" list, but EVERY [sub-]section has such an attribute... # Now write to file, delegating work to ConfigObj (note that ConfigObj # write() skips any items listed by name in the self.defaults list) self.write(fh) fh.close() retval = str(numpars) + " parameters written to " + absFileName self.filename = absFileName # reset our own ConfigObj filename attr self.debug('Keys not written: '+str(self.defaults)) return retval def run(self, *args, **kw): """ This may be overridden by a subclass. """ if self._runFunc is not None: # remove the two args sent by EditParDialog which we do not use if 'mode' in kw: kw.pop('mode') if '_save' in kw: kw.pop('_save') return self._runFunc(self, *args, **kw) else: raise taskpars.NoExecError('No way to run task "'+self.__taskName+\ '". You must either override the "run" method in your '+ \ 'ConfigObjPars subclass, or you must supply a "run" '+ \ 'function in your package.') def triggerLogicToStr(self): """ Print all the trigger logic to a string and return it. """ try: import json except ImportError: return "Cannot dump triggers/dependencies/executes (need json)" retval = "TRIGGERS:\n"+json.dumps(self._allTriggers, indent=3) retval += "\nDEPENDENCIES:\n"+json.dumps(self._allDepdcs, indent=3) retval += "\nTO EXECUTE:\n"+json.dumps(self._allExecutes, indent=3) retval += "\n" return retval def getHelpAsString(self): """ This may be overridden by a subclass. """ if self._helpFunc is not None: return self._helpFunc() else: return 'No help string found for task "'+self.__taskName+ \ '". \n\nThe developer must either override the '+\ 'getHelpAsString() method in their ConfigObjPars \n'+ \ 'subclass, or they must supply such a function in their package.' def _findAssociatedConfigSpecFile(self, cfgFileName): """ Given a config file, find its associated config-spec file, and return the full pathname of the file. """ # Handle simplest 2 cases first: co-located or local .cfgspc file retval = "."+os.sep+self.__taskName+".cfgspc" if os.path.isfile(retval): return retval retval = os.path.dirname(cfgFileName)+os.sep+self.__taskName+".cfgspc" if os.path.isfile(retval): return retval # Also try the resource dir retval = self.getDefaultSaveFilename()+'spc' # .cfgspc if os.path.isfile(retval): return retval # Now try and see if there is a matching .cfgspc file in/under an # associated package, if one is defined. if self.__assocPkg is not None: x, theFile = findCfgFileForPkg(None, '.cfgspc', pkgObj = self.__assocPkg, taskName = self.__taskName) return theFile # Finally try to import the task name and see if there is a .cfgspc # file in that directory x, theFile = findCfgFileForPkg(self.__taskName, '.cfgspc', taskName = self.__taskName) if os.path.exists(theFile): return theFile # unfound raise NoCfgFileError('Unfound config-spec file for task: "'+ \ self.__taskName+'"') def _getParamsFromConfigDict(self, cfgObj, scopePrefix='', initialPass=False, dumpCfgspcTo=None): """ Walk the given ConfigObj dict pulling out IRAF-like parameters into a list. Since this operates on a dict this can be called recursively. This is also our chance to find and pull out triggers and such dependencies. """ # init retval = [] if initialPass and len(scopePrefix) < 1: self._posArgs = [] # positional args [2-tuples]: (index,scopedName) # FOR SECURITY: the following 3 chunks of data, # _allTriggers, _allDepdcs, _allExecutes, # are collected ONLY from the .cfgspc file self._allTriggers = {} self._allDepdcs = {} self._allExecutes = {} # start walking ("tell yer story walkin, buddy") # NOTE: this relies on the "in" operator returning keys in the # order that they exist in the dict (which depends on ConfigObj keeping # the order they were found in the original file) for key in cfgObj: val = cfgObj[key] # Do we need to skip this - if not a par, like a rule or something toBeHidden = isHiddenName(key) if toBeHidden: if key not in self._neverWrite and key != TASK_NAME_KEY: self._neverWrite.append(key) # yes TASK_NAME_KEY is hidden, but it IS output to the .cfg # a section if isinstance(val, dict): if not toBeHidden: if len(list(val.keys()))>0 and len(retval)>0: # Here is where we sneak in the section comment # This is so incredibly kludgy (as the code was), it # MUST be revamped eventually! This is for the epar GUI. prevPar = retval[-1] # Use the key (or its comment?) as the section header prevPar.set(prevPar.get('p_prompt')+'\n\n'+key, field='p_prompt', check=0) if dumpCfgspcTo: dumpCfgspcTo.write('\n['+key+']\n') # a logical grouping (append its params) pfx = scopePrefix+'.'+key pfx = pfx.strip('.') retval = retval + self._getParamsFromConfigDict(val, pfx, initialPass, dumpCfgspcTo) # recurse else: # a param fields = [] choicesOrMin = None fields.append(key) # name dtype = 's' cspc = None if cfgObj.configspec: cspc = cfgObj.configspec.get(key) # None if not found chk_func_name = '' chk_args_dict = {} if cspc: chk_func_name = cspc[:cspc.find('(')] chk_args_dict = vtor_checks.sigStrToKwArgsDict(cspc) if chk_func_name.find('option') >= 0: dtype = 's' # convert the choices string to a list (to weed out kwds) x = cspc[cspc.find('(')+1:-1] # just the options() args # cspc e.g.: option_kw("poly5","nearest","linear", default="poly5", comment="Interpolant (poly5,nearest,linear)") x = x.split(',') # tokenize # but! comment value may have commas in it, find it # using it's equal sign, rm all after it has_eq = [i for i in x if i.find('=')>=0] if len(has_eq) > 0: x = x[: x.index(has_eq[0]) ] # rm spaces, extra quotes; rm kywd arg pairs x = [i.strip("' ") for i in x if i.find('=')<0] choicesOrMin = '|'+'|'.join(x)+'|' # IRAF format for enums elif chk_func_name.find('boolean') >= 0: dtype = 'b' elif chk_func_name.find('float_or_') >= 0: dtype = 'r' elif chk_func_name.find('float') >= 0: dtype = 'R' elif chk_func_name.find('integer_or_') >= 0: dtype = 'i' elif chk_func_name.find('integer') >= 0: dtype = 'I' elif chk_func_name.find('action') >= 0: dtype = 'z' fields.append(dtype) fields.append('a') if type(val)==bool: if val: fields.append('yes') else: fields.append('no') else: fields.append(val) fields.append(choicesOrMin) fields.append(None) # Primarily use description from .cfgspc file (0). But, allow # overrides from .cfg file (1) if different. dscrp0 = chk_args_dict.get('comment','').strip() # ok if missing dscrp1 = cfgObj.inline_comments[key] if dscrp1 is None: dscrp1 = '' while len(dscrp1) > 0 and dscrp1[0] in (' ','#'): dscrp1 = dscrp1[1:] # .cfg file comments start with '#' dscrp1 = dscrp1.strip() # Now, decide what to do/say about the descriptions if len(dscrp1) > 0: dscrp = dscrp0 if dscrp0 != dscrp1: # allow override if different dscrp = dscrp1+eparoption.DSCRPTN_FLAG # flag it if initialPass: if dscrp0 == '' and cspc is None: # this is a case where this par isn't in the # .cfgspc; ignore, it is caught/error later pass else: self.debug('Description of "'+key+ \ '" overridden, from: '+repr(dscrp0)+\ ' to: '+repr(dscrp1)) fields.append(dscrp) else: # set the field for the GUI fields.append(dscrp0) # ALSO set it in the dict so it is written to file later cfgObj.inline_comments[key] = '# '+dscrp0 # This little section, while never intended to be used during # normal operation, could save a lot of manual work. if dumpCfgspcTo: junk = cspc junk = key+' = '+junk.strip() if junk.find(' comment=')<0: junk = junk[:-1]+", comment="+ \ repr(irafutils.stripQuotes(dscrp1.strip()))+")" dumpCfgspcTo.write(junk+'\n') # Create the par if not toBeHidden or chk_func_name.find('action')==0: par = basicpar.parFactory(fields, True) par.setScope(scopePrefix) retval.append(par) # else this is a hidden key # The next few items require a fully scoped name absKeyName = scopePrefix+'.'+key # assumed to be unique # Check for pars marked to be positional args if initialPass: pos = chk_args_dict.get('pos') if pos: # we'll sort them later, on demand self._posArgs.append( (int(pos), scopePrefix, key) ) # Check for triggers and/or dependencies if initialPass: # What triggers what? (thats why theres an 's' in the kwd) # try "trigger" (old) if chk_args_dict.get('trigger'): print("WARNING: outdated version of .cfgspc!! for "+ self.__taskName+", 'trigger' unused for "+ absKeyName) # try "triggers" trgs = chk_args_dict.get('triggers') if trgs and len(trgs)>0: # eg. _allTriggers['STEP2.xy'] == ('_rule1_','_rule3_') assert absKeyName not in self._allTriggers, \ 'More than 1 of these in .cfgspc?: '+absKeyName # we force this to always be a sequence if isinstance(trgs, (list,tuple)): self._allTriggers[absKeyName] = trgs else: self._allTriggers[absKeyName] = (trgs,) # try "executes" excs = chk_args_dict.get('executes') if excs and len(excs)>0: # eg. _allExecutes['STEP2.xy'] == ('_rule1_','_rule3_') assert absKeyName not in self._allExecutes, \ 'More than 1 of these in .cfgspc?: '+absKeyName # we force this to always be a sequence if isinstance(excs, (list,tuple)): self._allExecutes[absKeyName] = excs else: self._allExecutes[absKeyName] = (excs,) # Dependencies? (besides these used here, may someday # add: 'range_from', 'warn_if', etc.) depName = None if not depName: depType = 'active_if' depName = chk_args_dict.get(depType) # e.g. =='_rule1_' if not depName: depType = 'inactive_if' depName = chk_args_dict.get(depType) if not depName: depType = 'is_set_by' depName = chk_args_dict.get(depType) if not depName: depType = 'set_yes_if' depName = chk_args_dict.get(depType) if not depName: depType = 'set_no_if' depName = chk_args_dict.get(depType) if not depName: depType = 'is_disabled_by' depName = chk_args_dict.get(depType) # NOTE - the above few lines stops at the first dependency # found (depName) for a given par. If, in the future a # given par can have >1 dependency than we need to revamp!! if depName: # Add to _allDepdcs dict: (val is dict of pars:types) # # e.g. _allDepdcs['_rule1_'] == \ # {'STEP3.ra': 'active_if', # 'STEP3.dec': 'active_if', # 'STEP3.azimuth': 'inactive_if'} if depName in self._allDepdcs: thisRulesDict = self._allDepdcs[depName] assert not absKeyName in thisRulesDict, \ 'Cant yet handle multiple actions for the '+ \ 'same par and the same rule. For "'+depName+ \ '" dict was: '+str(thisRulesDict)+ \ ' while trying to add to it: '+\ str({absKeyName:depType}) thisRulesDict[absKeyName] = depType else: self._allDepdcs[depName] = {absKeyName:depType} # else no dependencies found for this chk_args_dict return retval def getTriggerStrings(self, parScope, parName): """ For a given item (scope + name), return all strings (in a tuple) that it is meant to trigger, if any exist. Returns None is none. """ # The data structure of _allTriggers was chosen for how easily/quickly # this particular access can be made here. fullName = parScope+'.'+parName return self._allTriggers.get(fullName) # returns None if unfound def getParsWhoDependOn(self, ruleName): """ Find any parameters which depend on the given trigger name. Returns None or a dict of {scopedName: dependencyName} from _allDepdcs. """ # The data structure of _allDepdcs was chosen for how easily/quickly # this particular access can be made here. return self._allDepdcs.get(ruleName) def getExecuteStrings(self, parScope, parName): """ For a given item (scope + name), return all strings (in a tuple) that it is meant to execute, if any exist. Returns None is none. """ # The data structure of _allExecutes was chosen for how easily/quickly # this particular access can be made here. fullName = parScope+'.'+parName return self._allExecutes.get(fullName) # returns None if unfound def getPosArgs(self): """ Return a list, in order, of any parameters marked with "pos=N" in the .cfgspc file. """ if len(self._posArgs) < 1: return [] # The first item in the tuple is the index, so we now sort by it self._posArgs.sort() # Build a return list retval = [] for idx, scope, name in self._posArgs: theDict, val = findScopedPar(self, scope, name) retval.append(val) return retval def getKwdArgs(self, flatten = False): """ Return a dict of all normal dict parameters - that is, all parameters NOT marked with "pos=N" in the .cfgspc file. This will also exclude all hidden parameters (metadata, rules, etc). """ # Start with a full deep-copy. What complicates this method is the # idea of sub-sections. This dict can have dicts as values, and so on. dcopy = self.dict() # ConfigObj docs say this is a deep-copy # First go through the dict removing all positional args for idx,scope,name in self._posArgs: theDict, val = findScopedPar(dcopy, scope, name) # 'theDict' may be dcopy, or it may be a dict under it theDict.pop(name) # Then go through the dict removing all hidden items ('_item_name_') for k in list(dcopy.keys()): if isHiddenName(k): dcopy.pop(k) # Done with the nominal operation if not flatten: return dcopy # They have asked us to flatten the structure - to bring all parameters # up to the top level, even if they are in sub-sections. So we look # for values that are dicts. We will throw something if we end up # with name collisions at the top level as a result of this. return flattenDictTree(dcopy) def canPerformValidation(self): """ Override this so we can do our own validation. tryValue() will be called as a result. """ return True def knowAsNative(self): """ Override so we can keep native types in the internal dict. """ return True def tryValue(self, name, val, scope=''): """ For the given item name (and scope), we are being asked to try the given value to see if it would pass validation. We are not to set it, but just try it. We return a tuple: If it fails, we return: (False, the last known valid value). On success, we return: (True, None). """ # SIMILARITY BETWEEN THIS AND setParam() SHOULD BE CONSOLIDATED! # Set the value, even if invalid. It needs to be set before # the validation step (next). theDict, oldVal = findScopedPar(self, scope, name) if oldVal == val: return (True, None) # assume oldVal is valid theDict[name] = val # Check the proposed value. Ideally, we'd like to # (somehow elegantly) only check this one item. For now, the best # shortcut is to only validate this section. ans=self.validate(self._vtor, preserve_errors=True, section=theDict) # No matter what ans is, immediately return the item to its original # value since we are only checking the value here - not setting. theDict[name] = oldVal # Now see what the validation check said errStr = '' if ans != True: flatStr = "All values are invalid!" if ans != False: flatStr = flattened2str(configobj.flatten_errors(self, ans)) errStr = "Validation error: "+flatStr # for now this info is unused # Done if len(errStr): return (False, oldVal) # was an error else: return (True, None) # val is OK def listTheExtras(self, deleteAlso): """ Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items. """ # get list of extras extras = configobj.get_extra_values(self) # extras is in format: [(sections, key), (sections, key), ] # but we need: [(sections, key, result), ...] - set all results to # a bool just to make it the right shape. BUT, since we are in # here anyway, make that bool mean something - hide info in it about # whether that extra item is a section (1) or just a single par (0) # # simplified, this is: expanded = [ (x+(abool,)) for x in extras] expanded = [ (x+ \ ( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \ ) for x in extras] retval = '' if expanded: retval = flattened2str(expanded, extra=1) # but before we return, delete them (from ourself!) if requested to if deleteAlso: for tup_to_del in extras: target = self # descend the tree to the dict where this items is located. # (this works because target is not a copy (because the dict # type is mutable)) location = tup_to_del[0] for subdict in location: target = target[subdict] # delete it target.pop(tup_to_del[1]) return retval # ---------------------------- helper functions -------------------------------- def findTheLost(config_file, configspec_file, skipHidden=True): """ Find any lost/missing parameters in this cfg file, compared to what the .cfgspc says should be there. This method is recommended by the ConfigObj docs. Return a stringified list of item errors. """ # do some sanity checking, but don't (yet) make this a serious error if not os.path.exists(config_file): print("ERROR: Config file not found: "+config_file) return [] if not os.path.exists(configspec_file): print("ERROR: Configspec file not found: "+configspec_file) return [] tmpObj = configobj.ConfigObj(config_file, configspec=configspec_file) simval = configobj.SimpleVal() test = tmpObj.validate(simval) if test == True: return [] # If we get here, there is a dict returned of {key1: bool, key2: bool} # which matches the shape of the config obj. We need to walk it to # find the Falses, since they are the missing pars. missing = [] flattened = configobj.flatten_errors(tmpObj, test) # But, before we move on, skip/eliminate any 'hidden' items from our list, # since hidden items are really supposed to be missing from the .cfg file. if len(flattened) > 0 and skipHidden: keepers = [] for tup in flattened: keep = True # hidden section if len(tup[0])>0 and isHiddenName(tup[0][-1]): keep = False # hidden par (in a section, or at the top level) elif tup[1] is not None and isHiddenName(tup[1]): keep = False if keep: keepers.append(tup) flattened = keepers flatStr = flattened2str(flattened, missing=True) return flatStr def isHiddenName(astr): """ Return True if this string name denotes a hidden par or section """ if astr is not None and len(astr) > 2 and astr.startswith('_') and \ astr.endswith('_'): return True else: return False def flattened2str(flattened, missing=False, extra=False): """ Return a pretty-printed multi-line string version of the output of flatten_errors. Know that flattened comes in the form of a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) so we turn that into a string. Set missing to True if all the input problems are from missing items. Set extra to True if all the input problems are from extra items. """ if flattened is None or len(flattened) < 1: return '' retval = '' for sections, key, result in flattened: # Name the section and item, to start the message line if sections is None or len(sections) == 0: retval += '\t"'+key+'"' elif len(sections) == 1: if key is None: # a whole section is missing at the top-level; see if hidden junk = sections[0] if isHiddenName(junk): continue # this missing or extra section is not an error else: retval += '\tSection "'+sections[0]+'"' else: retval += '\t"'+sections[0]+'.'+key+'"' else: # len > 1 joined = '.'.join(sections) joined = '"'+joined+'"' if key is None: retval += '\tSection '+joined else: retval += '\t"'+key+'" from '+joined # End the msg line with "what seems to be the trouble" with this one if missing and result==False: retval += ' is missing.' elif extra: if result: retval += ' is an unexpected section. Is your file out of date?' else: retval += ' is an unexpected parameter. Is your file out of date?' elif isinstance(result, bool): retval += ' has an invalid value' else: retval += ' is invalid, '+result.message retval += '\n\n' return retval.rstrip() stsci.tools-3.4.12/lib/stsci/tools/check_files.py0000644001120100020070000003615213112074217023436 0ustar jhunkSTSCI\science00000000000000from __future__ import division, print_function # confidence high from stsci.tools import parseinput, fileutil from astropy.io import fits import os def checkFiles(filelist,ivmlist = None): """ - Converts waiver fits sciece and data quality files to MEF format - Converts GEIS science and data quality files to MEF format - Checks for stis association tables and splits them into single imsets - Removes files with EXPTIME=0 and the corresponding ivm files - Removes files with NGOODPIX == 0 (to exclude saturated images) - Removes files with missing PA_V3 keyword The list of science files should match the list of ivm files at the end. """ newfilelist, ivmlist = checkFITSFormat(filelist, ivmlist) # check for STIS association files. This must be done before # the other checks in order to handle correctly stis # assoc files #if fits.getval(newfilelist[0], 'INSTRUME') == 'STIS': newfilelist, ivmlist = checkStisFiles(newfilelist, ivmlist) if newfilelist == []: return [], [] removed_expt_files = check_exptime(newfilelist) newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_expt_files) if newfilelist == []: return [], [] removed_ngood_files = checkNGOODPIX(newfilelist) newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_ngood_files) if newfilelist == []: return [], [] removed_pav3_files = checkPA_V3(newfilelist) newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_pav3_files) newfilelist, ivmlist = update_input(newfilelist, ivmlist,[]) if newfilelist == []: return [], [] return newfilelist, ivmlist def checkFITSFormat(filelist, ivmlist=None): """ This code will check whether or not files are GEIS or WAIVER FITS and convert them to MEF if found. It also keeps the IVMLIST consistent with the input filelist, in the case that some inputs get dropped during the check/conversion. """ if ivmlist is None: ivmlist = [None for l in filelist] sci_ivm = list(zip(filelist, ivmlist)) removed_files, translated_names, newivmlist = convert2fits(sci_ivm) newfilelist, ivmlist = update_input(filelist, ivmlist, removed_files) if newfilelist == [] and translated_names == []: return [], [] elif translated_names != []: newfilelist.extend(translated_names) ivmlist.extend(newivmlist) return newfilelist, ivmlist def checkStisFiles(filelist, ivmlist=None): newflist = [] newilist = [] removed_files = [] assoc_files = [] assoc_ilist = [] if len(filelist) != len(ivmlist): errormsg = "Input file list and ivm list have different lenghts\n" errormsg += "Quitting ...\n" raise ValueError(errormsg) for t in zip(filelist, ivmlist): if fits.getval(t[0], 'INSTRUME') != 'STIS': newflist.append(t[0]) newilist.append(t[1]) continue if isSTISSpectroscopic(t[0]): removed_files.append(t[0]) continue sci_count = stisObsCount(t[0]) if sci_count >1: newfilenames = splitStis(t[0], sci_count) assoc_files.extend(newfilenames) removed_files.append(t[0]) if (isinstance(t[1], tuple) and t[1][0] is not None) or \ (not isinstance(t[1], tuple) and t[1] is not None): print('Does not handle STIS IVM files and STIS association files\n') else: asn_ivmlist = list(zip(sci_count * [None], newfilenames)) assoc_ilist.extend(asn_ivmlist) elif sci_count == 1: newflist.append(t[0]) newilist.append(t[1]) else: errormsg = "No valid 'SCI extension in STIS file\n" raise ValueError(errormsg) stisExt2PrimKw([t[0]]) newflist.extend(assoc_files) newilist.extend(assoc_ilist) return newflist, newilist def check_exptime(filelist): """ Removes files with EXPTIME==0 from filelist. """ removed_files = [] for f in filelist: try: exptime = fileutil.getHeader(f+'[sci,1]')['EXPTIME'] except KeyError: removed_files.append(f) print("Warning: There are files without keyword EXPTIME") continue if exptime <= 0: removed_files.append(f) print("Warning: There are files with zero exposure time: keyword EXPTIME = 0.0") if removed_files != []: print("Warning: Removing the following files from input list") for f in removed_files: print('\t',f) return removed_files def checkNGOODPIX(filelist): """ Only for ACS, WFC3 and STIS, check NGOODPIX If all pixels are 'bad' on all chips, exclude this image from further processing. Similar checks requiring comparing 'driz_sep_bits' against WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be done separately (and later). """ removed_files = [] supported_instruments = ['ACS','STIS','WFC3'] for inputfile in filelist: if fileutil.getKeyword(inputfile,'instrume') in supported_instruments: file = fits.open(inputfile) ngood = 0 for extn in file: if 'EXTNAME' in extn.header and extn.header['EXTNAME'] == 'SCI': ngood += extn.header['NGOODPIX'] file.close() if (ngood == 0): removed_files.append(inputfile) if removed_files != []: print("Warning: Files without valid pixels detected: keyword NGOODPIX = 0.0") print("Warning: Removing the following files from input list") for f in removed_files: print('\t',f) return removed_files def update_input(filelist, ivmlist=None, removed_files=None): """ Removes files flagged to be removed from the input filelist. Removes the corresponding ivm files if present. """ newfilelist = [] if removed_files == []: return filelist, ivmlist else: sci_ivm = list(zip(filelist, ivmlist)) for f in removed_files: result = [sci_ivm.remove(t) for t in sci_ivm if t[0] == f ] ivmlist = [el[1] for el in sci_ivm] newfilelist = [el[0] for el in sci_ivm] return newfilelist, ivmlist def stisObsCount(input): """ Input: A stis multiextension file Output: Number of stis science extensions in input """ count = 0 f = fits.open(input) for ext in f: if 'extname' in ext.header: if (ext.header['extname'].upper() == 'SCI'): count += 1 f.close() return count def splitStis(stisfile, sci_count): """ :Purpose: Split a STIS association file into multiple imset MEF files. Split the corresponding spt file if present into single spt files. If an spt file can't be split or is missing a Warning is printed. Returns ------- names: list a list with the names of the new flt files. """ newfiles = [] f = fits.open(stisfile) hdu0 = f[0].copy() for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = f[('sci',count)].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits') try: # Verify error array exists if f[('err', count)].data is None: raise ValueError # Verify dq array exists if f[('dq', count)].data is None: raise ValueError # Copy the err extension hdu = f[('err',count)].copy() fitsobj.append(hdu) # Copy the dq extension hdu = f[('dq',count)].copy() fitsobj.append(hdu) fitsobj[1].header['EXTVER'] = 1 fitsobj[2].header['EXTVER'] = 1 fitsobj[3].header['EXTVER'] = 1 except ValueError: print('\nWarning:') print('Extension version %d of the input file %s does not' %(count, stisfile)) print('contain all required image extensions. Each must contain') print('populates SCI, ERR and DQ arrays.') continue # Determine if the file you wish to create already exists on the disk. # If the file does exist, replace it. if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) newfiles.append(newfilename) f.close() sptfilename = fileutil.buildNewRootname(stisfile, extn='_spt.fits') try: sptfile = fits.open(sptfilename) except IOError: print('SPT file not found %s \n' % sptfilename) return newfiles if sptfile: hdu0 = sptfile[0].copy() try: for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = sptfile[count].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits') fitsobj[1].header['EXTVER'] = 1 if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) except: print("Warning: Unable to split spt file %s " % sptfilename) sptfile.close() return newfiles def stisExt2PrimKw(stisfiles): """ Several kw which are usuall yin the primary header are in the extension header for STIS. They are copied to the primary header for convenience. List if kw: 'DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME' """ kw_list = ['DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME'] for sfile in stisfiles: d = {} for k in kw_list: d[k] = fits.getval(sfile, k, ext=1) for item in d.items(): fits.setval(sfile, item[0], value=item[1], comment='Copied from extension header') def isSTISSpectroscopic(fname): if fits.getval(fname, 'OBSTYPE') == 'SPECTROSCOPIC': print("Warning: STIS spectroscopic files detected") print("Warning: Removing %s from input list" % fname) return True else: return False def checkPA_V3(fnames): removed_files = [] for f in fnames: try: pav3 = fits.getval(f, 'PA_V3') except KeyError: rootname = fits.getval(f, 'ROOTNAME') sptfile = rootname+'_spt.fits' if fileutil.findFile(sptfile): try: pav3 = fits.getval(sptfile, 'PA_V3') except KeyError: print("Warning: Files without keyword PA_V3 detected") removed_files.append(f) fits.setval(f, 'PA_V3', value=pav3) else: print("Warning: Files without keyword PA_V3 detected") removed_files.append(f) if removed_files != []: print("Warning: Removing the following files from input list") for f in removed_files: print('\t',f) return removed_files def convert2fits(sci_ivm): """ Checks if a file is in WAIVER of GEIS format and converts it to MEF """ removed_files = [] translated_names = [] newivmlist = [] for file in sci_ivm: #find out what the input is # if science file is not found on disk, add it to removed_files for removal try: imgfits,imgtype = fileutil.isFits(file[0]) except IOError: print("Warning: File %s could not be found" %file[0]) print("Warning: Removing file %s from input list" %file[0]) removed_files.append(file[0]) continue # Check for existence of waiver FITS input, and quit if found. # Or should we print a warning and continue but not use that file if imgfits and imgtype == 'waiver': newfilename = waiver2mef(file[0], convert_dq=True) if newfilename is None: print("Removing file %s from input list - could not convert WAIVER format to MEF\n" %file[0]) removed_files.append(file[0]) else: removed_files.append(file[0]) translated_names.append(newfilename) newivmlist.append(file[1]) # If a GEIS image is provided as input, create a new MEF file with # a name generated using 'buildFITSName()' # Convert the corresponding data quality file if present if not imgfits: newfilename = geis2mef(file[0], convert_dq=True) if newfilename is None: print("Removing file %s from input list - could not convert GEIS format to MEF\n" %file[0]) removed_files.append(file[0]) else: removed_files.append(file[0]) translated_names.append(newfilename) newivmlist.append(file[1]) return removed_files, translated_names, newivmlist def waiver2mef(sciname, newname=None, convert_dq=True): """ Converts a GEIS science file and its corresponding data quality file (if present) to MEF format Writes out both files to disk. Returns the new name of the science image. """ def convert(file): newfilename = fileutil.buildNewRootname(file, extn='_c0h.fits') try: newimage = fileutil.openImage(file,writefits=True, fitsname=newfilename,clobber=True) del newimage return newfilename except IOError: print('Warning: File %s could not be found' % file) return None newsciname = convert(sciname) if convert_dq: dq_name = convert(fileutil.buildNewRootname(sciname, extn='_c1h.fits')) return newsciname def geis2mef(sciname, convert_dq=True): """ Converts a GEIS science file and its corresponding data quality file (if present) to MEF format Writes out both files to disk. Returns the new name of the science image. """ def convert(file): newfilename = fileutil.buildFITSName(file) try: newimage = fileutil.openImage(file,writefits=True, fitsname=newfilename, clobber=True) del newimage return newfilename except IOError: print('Warning: File %s could not be found' % file) return None newsciname = convert(sciname) if convert_dq: dq_name = convert(sciname.split('.')[0] + '.c1h') return newsciname def countInput(input): files = parseinput.parseinput(input) count = len(files[0]) for f in files[0]: if fileutil.isFits(f)[0]: try: ins = fits.getval(f, 'INSTRUME') except: # allow odd fits files; do not stop the count ins = None if ins == 'STIS': count += (stisObsCount(f)-1) return count stsci.tools-3.4.12/lib/stsci/tools/clipboard_helper.py0000644001120100020070000000552513017116245024477 0ustar jhunkSTSCI\science00000000000000""" Usually copying to and from the clipboard in an app is handled automatically and correctly on a given platform, when the user applies the right keystrokes or mouse events for that platform. In some corner cases this might not be true, so this module exists to help facilitate any needed copying or pasting. For now, this is tkinter based, but it is imported lazily. $Id$ """ from __future__ import division, print_function # confidence high import sys from . import irafutils _theRoot = None _lastSel = '' # our own copy of the last selected text (for PRIMARY) # Install our own PRIMARY request handler. def ch_handler(offset=0, length=-1, **kw): """ Handle standard PRIMARY clipboard access. Note that offset and length are passed as strings. This differs from CLIPBOARD. """ global _lastSel offset = int(offset) length = int(length) if length < 0: length = len(_lastSel) return _lastSel[offset:offset+length] # X11 apps (e.g. xterm) seem to use PRIMARY for select=copy and midmouse=paste # Other X11 apps seem to use CLIPBOARD for ctl-c=copy and ?ctl-v?=paste # OS X seems to use CLIPBOARD for everything, which is Cmd-C and Cmd-V # Described here: http://wiki.tcl.tk/1217 "Primary Transfer vs. the Clipboard" # See also: http://www.tcl.tk/man/tcl8.5/TkCmd/selection.htm # and: http://www.tcl.tk/man/tcl8.5/TkCmd/clipboard.htm def put(text, cbname): """ Put the given string into the given clipboard. """ global _lastSel _checkTkInit() if cbname == 'CLIPBOARD': _theRoot.clipboard_clear() if text: # for clipboard_append, kwds can be -displayof, -format, or -type _theRoot.clipboard_append(text) return if cbname == 'PRIMARY': _lastSel = text _theRoot.selection_handle(ch_handler, selection='PRIMARY') # we need to claim/own it so that ch_handler is used _theRoot.selection_own(selection='PRIMARY') # could add command arg for a func to be called when we lose ownership return raise RuntimeError("Unexpected clipboard name: "+str(cbname)) def get(cbname): """ Get the contents of the given clipboard. """ _checkTkInit() if cbname == 'PRIMARY': try: return _theRoot.selection_get(selection='PRIMARY') except: return None if cbname == 'CLIPBOARD': try: return _theRoot.selection_get(selection='CLIPBOARD') except: return None raise RuntimeError("Unexpected clipboard name: "+str(cbname)) def dump(): _checkTkInit() print ('primary = '+str(get('PRIMARY'))) print ('clipboard = '+str(get('CLIPBOARD'))) print ('owner = '+str(_theRoot.selection_own_get())) def _checkTkInit(): """ Make sure the tkinter root is defined. """ global _theRoot _theRoot = irafutils.init_tk_default_root() stsci.tools-3.4.12/lib/stsci/tools/compmixin.py0000644001120100020070000001671113112074217023201 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python # """ This module is from Lennart Regebro's ComparableMixin class, available at: http://regebro.wordpress.com/2010/12/13/ python-implementing-rich-comparison-the-correct-way/ The idea is to prevent you from having to define lt,le,eq,ne,etc... This may no longer be necessary after the functools total_ordering decorator (Python v2.7) is available on all Python versions supported by our software. For simple comparisons, all that is necessary is to derive your class from ComparableMixin and override the _cmpkey() method. For more complex comparisons (where type-checking needs to occur and comparisons to other types are allowed), simply override _compare() instead of _cmpkey(). BEWARE that comparing different types has different results in Python 2.x versus Python 3.x: Python 2.7 >>> 'a' < 2 False Python 3.2.1 >>> 'a' < 2 Traceback (most recent call last): File "", line 1, in TypeError: unorderable types: str() < int() """ from __future__ import print_function import sys if sys.version_info[0] < 3: string_types = basestring else: string_types = str class ComparableMixin(object): def _compare(self, other, method): try: return method(self._cmpkey(), other._cmpkey()) except (AttributeError, TypeError): # _cmpkey not implemented, or return different type, # so I can't compare with "other". return NotImplemented def __lt__(self, other): return self._compare(other, lambda s,o: s < o) def __le__(self, other): return self._compare(other, lambda s,o: s <= o) def __eq__(self, other): return self._compare(other, lambda s,o: s == o) def __ge__(self, other): return self._compare(other, lambda s,o: s >= o) def __gt__(self, other): return self._compare(other, lambda s,o: s > o) def __ne__(self, other): return self._compare(other, lambda s,o: s != o) class ComparableIntBaseMixin(ComparableMixin): """ For those classes which, at heart, are comparable to integers. """ def _compare(self, other, method): if isinstance(other, self.__class__): # two objects of same class return method(self._cmpkey(), other._cmpkey()) else: return method(int(self._cmpkey()), int(other)) class ComparableFloatBaseMixin(ComparableMixin): """ For those classes which, at heart, are comparable to floats. """ def _compare(self, other, method): if isinstance(other, self.__class__): # two objects of same class return method(self._cmpkey(), other._cmpkey()) else: return method(float(self._cmpkey()), float(other)) # ----------------------------------------------------------------------------- # this class is only used for testing this module! class SimpleStrUnitTest(ComparableMixin): def __init__(self, v): self.val = str(v) # all input turned to string def __str__(self): return str(self.val) def _cmpkey(self): return self.val # this class is only used for testing this module! class AnyTypeUnitTest(ComparableMixin): def __init__(self, v): self.val = v # leave all input typed as is def __str__(self): return str(self.val) # define this instead of _cmpkey - handle ALL sorts of scenarios, # except intentionally don't compare self strings (strlen>1) with integers # so we have a case which fails in our test below def _compare(self, other, method): if isinstance(other, self.__class__): return self._compare(other.val, method) # recurse, get 2 logic below if isinstance(other, string_types): return method(str(self.val), other) elif other is None and self.val is None: return method(0, 0) elif other is None: return method(str(self.val), '') # coerce to str compare elif isinstance(other, int): # handle ONLY case where self.val is a single char or an int if isinstance(self.val, string_types) and len(self.val)==1: return method(ord(self.val), other) else: return method(int(self.val), other) # assume we are int-like try: return method(self.val, other) except (AttributeError, TypeError): return NotImplemented # ----------------------------------------------------------------------------- def test(): a = SimpleStrUnitTest('a') b = SimpleStrUnitTest('b') c = SimpleStrUnitTest('c') two = SimpleStrUnitTest(2) # compare two SimpleStrUnitTest objects assert str(a>b) == "False" assert str(a=two) == "True" assert str(b==two) == "False" assert str([str(jj) for jj in sorted([b,a,two,c])])=="['2', 'a', 'b', 'c']" print('Success in first set') x = AnyTypeUnitTest('x') y = AnyTypeUnitTest('yyy') z = AnyTypeUnitTest(0) nn = AnyTypeUnitTest(None) # compare two AnyTypeUnitTest objects assert str(x>y) == "False" assert str(xz) == "True" assert str(x!=z) == "True" assert str(z!=z) == "False" assert str(z==z) == "True" assert str(y=nn) == "True" assert str(y==nn) == "False" assert str(nn==nn) == "True" assert str([str(jj) for jj in sorted([y,x,nn,z])]) == "['None', '0', 'x', 'yyy']" print('Success in second set') # compare AnyTypeUnitTest objects to built-in types assert str(x<0) == "False" assert str(x<=0) == "False" assert str(x>0) == "True" assert str(x!=0) == "True" assert str(x==0) == "False" assert str(xNone) == "True" assert str(x!=None) == "True" assert str(x==None) == "False" assert str(x<"abc") == "False" assert str(x<="abc") == "False" assert str(x>"abc") == "True" assert str(x!="abc") == "True" assert str(x=="abc") == "False" assert str(yNone) == "True" assert str(y!=None) == "True" assert str(y==None) == "False" assert str(y<"abc") == "False" assert str(y<="abc") == "False" assert str(y>"abc") == "True" assert str(y!="abc") == "True" assert str(y=="abc") == "False" print('Success in third set') # all of the above should work without errors; now raise some print('yyy == 0 ?') try: y == z # AnyTypeUnitTest intentionally doesn't compare strlen>1 to ints assert 0, 'Exception expected but not found' except ValueError: print(' ... exception handled') print('sorted([0, yyy]) ?') try: sorted([z,y]) assert 0, 'Exception expected but not found' except ValueError: print(' ... exception handled') print('Test successful') # ----------------------------------------------------------------------------- if __name__=='__main__': # in case something else imports this file test() stsci.tools-3.4.12/lib/stsci/tools/configobj.py0000644001120100020070000025541313006721301023135 0ustar jhunkSTSCI\science00000000000000# configobj.py # A config file reader/writer that supports nested sections in config files. # Copyright (C) 2005-2010 Michael Foord, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # nico AT tekNico DOT net # ConfigObj 4 # http://www.voidspace.org.uk/python/configobj.html # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. from __future__ import absolute_import, division, generators import os import re import sys from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE # To conditionally use version dependent code PY3K = sys.version_info[0] > 2 if PY3K: string_types = str else: string_types = basestring # imported lazily to avoid startup performance hit if it isn't used compiler = None # A dictionary mapping BOM to # the encoding to decode with, and what to set the # encoding attribute to. BOMS = { BOM_UTF8: ('utf_8', None), BOM_UTF16_BE: ('utf16_be', 'utf_16'), BOM_UTF16_LE: ('utf16_le', 'utf_16'), BOM_UTF16: ('utf_16', 'utf_16'), } # All legal variants of the BOM codecs. # TODO: the list of aliases is not meant to be exhaustive, is there a # better way ? BOM_LIST = { 'utf_16': 'utf_16', 'u16': 'utf_16', 'utf16': 'utf_16', 'utf-16': 'utf_16', 'utf16_be': 'utf16_be', 'utf_16_be': 'utf16_be', 'utf-16be': 'utf16_be', 'utf16_le': 'utf16_le', 'utf_16_le': 'utf16_le', 'utf-16le': 'utf16_le', 'utf_8': 'utf_8', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf-8': 'utf_8', } # Map of encodings to the BOM to write. BOM_SET = { 'utf_8': BOM_UTF8, 'utf_16': BOM_UTF16, 'utf16_be': BOM_UTF16_BE, 'utf16_le': BOM_UTF16_LE, None: BOM_UTF8 } def match_utf8(encoding): return BOM_LIST.get(encoding.lower()) == 'utf_8' # Quote strings used for writing values squot = "'%s'" dquot = '"%s"' noquot = "%s" wspace_plus = ' \r\n\v\t\'"' tsquot = '"""%s"""' tdquot = "'''%s'''" # Sentinel for use in getattr calls to replace hasattr MISSING = object() __version__ = '4.7.2' try: any except NameError: def any(iterable): for entry in iterable: if entry: return True return False __all__ = ( '__version__', 'DEFAULT_INDENT_TYPE', 'DEFAULT_INTERPOLATION', 'ConfigObjError', 'NestingError', 'ParseError', 'DuplicateError', 'ConfigspecError', 'ConfigObj', 'SimpleVal', 'InterpolationError', 'InterpolationLoopError', 'MissingInterpolationOption', 'RepeatSectionError', 'ReloadError', 'UnreprError', 'UnknownType', 'flatten_errors', 'get_extra_values' ) DEFAULT_INTERPOLATION = 'configparser' DEFAULT_INDENT_TYPE = ' ' MAX_INTERPOL_DEPTH = 10 OPTION_DEFAULTS = { 'interpolation': True, 'raise_errors': False, 'list_values': True, 'create_empty': False, 'file_error': False, 'configspec': None, 'stringify': True, # option may be set to one of ('', ' ', '\t') 'indent_type': None, 'encoding': None, 'default_encoding': None, 'unrepr': False, 'write_empty_values': False, } def getObj(s): global compiler if compiler is None: import compiler s = "a=" + s p = compiler.parse(s) return p.getChildren()[1].getChildren()[0].getChildren()[1] class UnknownType(Exception): pass class Builder(object): def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return list(map(self.build, o.getChildren())) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} if PY3K: i = map(self.build, o.getChildren()) for el in i: d[el] = next(i) else: i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = list(map(self.build_Const, o.getChildren())) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s return _builder.build(getObj(s)) class ConfigObjError(SyntaxError): """ This is the base class for all errors that ConfigObj raises. It is a subclass of SyntaxError. """ def __init__(self, message='', line_number=None, line=''): self.line = line self.line_number = line_number SyntaxError.__init__(self, message) class NestingError(ConfigObjError): """ This error indicates a level of nesting that doesn't match. """ class ParseError(ConfigObjError): """ This error indicates that a line is badly written. It is neither a valid ``key = value`` line, nor a valid section marker line. """ class ReloadError(IOError): """ A 'reload' operation failed. This exception is a subclass of ``IOError``. """ def __init__(self): IOError.__init__(self, 'reload failed, filename is not set.') class DuplicateError(ConfigObjError): """ The keyword or section specified already exists. """ class ConfigspecError(ConfigObjError): """ An error occured whilst parsing a configspec. """ class InterpolationError(ConfigObjError): """Base class for the two interpolation errors.""" class InterpolationLoopError(InterpolationError): """Maximum interpolation depth exceeded in string interpolation.""" def __init__(self, option): InterpolationError.__init__( self, 'interpolation loop detected in value "%s".' % option) class RepeatSectionError(ConfigObjError): """ This error indicates additional sections in a section with a ``__many__`` (repeated) section. """ class MissingInterpolationOption(InterpolationError): """A value specified for interpolation was missing.""" def __init__(self, option): msg = 'missing option "%s" in interpolation.' % option InterpolationError.__init__(self, msg) class UnreprError(ConfigObjError): """An error parsing in unrepr mode.""" class InterpolationEngine(object): """ A helper class to help perform string interpolation. This class is an abstract base class; its descendants perform the actual work. """ # compiled regexp to use in self.interpolate() _KEYCRE = re.compile(r"%\(([^)]*)\)s") _cookie = '%' def __init__(self, section): # the Section instance that "owns" this engine self.section = section def interpolate(self, key, value): # short-cut if not self._cookie in value: return value def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. ``value``: the string we're trying to interpolate. ``section``: the section in which that string was found ``backtrail``: a dict to keep track of where we've been, to detect and prevent infinite recursion loops This is similar to a depth-first-search algorithm. """ # Have we been here already? if (key, section.name) in backtrail: # Yes - infinite loop detected raise InterpolationLoopError(key) # Place a marker on our backtrail so we won't come back here again backtrail[(key, section.name)] = 1 # Now start the actual work match = self._KEYCRE.search(value) while match: # The actual parsing of the match is implementation-dependent, # so delegate to our helper function k, v, s = self._parse_match(match) if k is None: # That's the signal that no further interpolation is needed replacement = v else: # Further interpolation may be needed to obtain final value replacement = recursive_interpolate(k, v, s, backtrail) # Replace the matched string with its final value start, end = match.span() value = ''.join((value[:start], replacement, value[end:])) new_search_start = start + len(replacement) # Pick up the next interpolation key, if any, for next time # through the while loop match = self._KEYCRE.search(value, new_search_start) # Now safe to come back here again; remove marker from backtrail del backtrail[(key, section.name)] return value # Back in interpolate(), all we have to do is kick off the recursive # function with appropriate starting values value = recursive_interpolate(key, value, self.section, {}) return value def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None and not isinstance(val, Section): break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None and not isinstance(val, Section): break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section def _parse_match(self, match): """Implementation-dependent helper function. Will be passed a match object corresponding to the interpolation key we just found (e.g., "%(foo)s" or "$foo"). Should look up that key in the appropriate config file section (using the ``_fetch()`` helper function) and return a 3-tuple: (key, value, section) ``key`` is the name of the key we're looking for ``value`` is the value found for that key ``section`` is a reference to the section where it was found ``key`` and ``section`` should be None if no further interpolation should be performed on the resulting value (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() class ConfigParserInterpolation(InterpolationEngine): """Behaves like ConfigParser.""" _cookie = '%' _KEYCRE = re.compile(r"%\(([^)]*)\)s") def _parse_match(self, match): key = match.group(1) value, section = self._fetch(key) return key, value, section class TemplateInterpolation(InterpolationEngine): """Behaves like string.Template.""" _cookie = '$' _delimiter = '$' _KEYCRE = re.compile(r""" \$(?: (?P\$) | # Two $ signs (?P[_a-z][_a-z0-9]*) | # $name format {(?P[^}]*)} # ${name} format ) """, re.IGNORECASE | re.VERBOSE) def _parse_match(self, match): # Valid name (in or out of braces): fetch value from section key = match.group('named') or match.group('braced') if key is not None: value, section = self._fetch(key) return key, value, section # Escaped delimiter (e.g., $$): return single delimiter if match.group('escaped') is not None: # Return None for key and section to indicate it's time to stop return None, self._delimiter, None # Anything else: ignore completely, just return it unchanged return None, match.group(), None interpolation_engines = { 'configparser': ConfigParserInterpolation, 'template': TemplateInterpolation, } def __newobj__(cls, *args): # Hack for pickle return cls.__new__(cls, *args) class Section(dict): """ A dictionary-like object that represents a section in a config file. It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. Iteration follows the order: scalars, then sections. """ def __setstate__(self, state): dict.update(self, state[0]) self.__dict__.update(state[1]) def __reduce__(self): state = (dict(self), self.__dict__) return (__newobj__, (self.__class__,), state) def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above * depth is the depth level of this section * main is the main ConfigObj * indict is a dictionary to initialise the section with """ if indict is None: indict = {} dict.__init__(self) # used for nesting level *and* interpolation self.parent = parent # used for the interpolation attribute self.main = main # level of nesting depth of this Section self.depth = depth # purely for information self.name = name # self._initialise() # we do this explicitly so that __setitem__ is used properly # (rather than just passing to ``dict.__init__``) for entry, value in indict.items(): self[entry] = value def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] # the sequence of sections in this Section self.sections = [] # for comments :-) self.comments = {} self.inline_comments = {} # the configspec self.configspec = None # for defaults self.defaults = [] self.default_values = {} self.extra_values = [] self._created = False def _interpolate(self, key, value): try: # do we already have an interpolation engine? engine = self._interpolation_engine except AttributeError: # not yet: first time running _interpolate(), so pick the engine name = self.main.interpolation if name == True: # note that "if name:" would be incorrect here # backwards-compatibility: interpolation=True means use default name = DEFAULT_INTERPOLATION name = name.lower() # so that "Template", "template", etc. all work class_ = interpolation_engines.get(name, None) if class_ is None: # invalid value for self.main.interpolation self.main.interpolation = False return value else: # save reference to engine so we don't have to do this again engine = self._interpolation_engine = class_(self) # let the engine do the actual work return engine.interpolate(key, value) def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) if self.main.interpolation: if isinstance(val, str): return self._interpolate(key, val) if isinstance(val, list): def _check(entry): if isinstance(entry, string_types): return self._interpolate(key, entry) return entry new = [_check(entry) for entry in val] if new != val: return new return val def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. ``unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, string_types): raise ValueError('The key "%s" is not a string.' % key) # add the comment if key not in self.comments: self.comments[key] = [] self.inline_comments[key] = '' # remove the entry from defaults if key in self.defaults: self.defaults.remove(key) # if isinstance(value, Section): if key not in self: self.sections.append(key) dict.__setitem__(self, key, value) elif isinstance(value, dict) and not unrepr: # First create the new depth level, # then create the section if key not in self: self.sections.append(key) new_depth = self.depth + 1 dict.__setitem__( self, key, Section( self, new_depth, self.main, indict=value, name=key)) else: if key not in self: self.scalars.append(key) if not self.main.stringify: if isinstance(value, string_types): pass elif isinstance(value, (list, tuple)): for entry in value: if not isinstance(entry, string_types): raise TypeError('Value is not a string "%s".' % entry) else: raise TypeError('Value is not a string "%s".' % value) dict.__setitem__(self, key, value) def __delitem__(self, key): """Remove items from the sequence when deleting.""" dict. __delitem__(self, key) if key in self.scalars: self.scalars.remove(key) else: self.sections.remove(key) del self.comments[key] del self.inline_comments[key] def get(self, key, default=None): """A version of ``get`` that doesn't bypass string interpolation.""" try: return self[key] except KeyError: return default def update(self, indict): """ A version of update that uses our ``__setitem__``. """ for entry in indict: self[entry] = indict[entry] def pop(self, key, default=MISSING): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ try: val = self[key] except KeyError: if default is MISSING: raise val = default else: del self[key] return val def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None self.defaults = [] self.extra_values = [] def setdefault(self, key, default=None): """A version of setdefault that sets sequence if appropriate.""" try: return self[key] except KeyError: self[key] = default return self[key] def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return list(zip((self.scalars + self.sections), list(self.values()))) def keys(self): """D.keys() -> list of D's keys""" return (self.scalars + self.sections) def values(self): """D.values() -> list of D's values""" return [self[key] for key in (self.scalars + self.sections)] def iteritems(self): """D.iteritems() -> an iterator over the (key, value) items of D""" return iter(list(self.items())) def iterkeys(self): """D.iterkeys() -> an iterator over the keys of D""" return iter((self.scalars + self.sections)) __iter__ = iterkeys def itervalues(self): """D.itervalues() -> an iterator over the values of D""" return iter(list(self.values())) def __repr__(self): """x.__repr__() <==> repr(x)""" def _getval(key): try: return self[key] except MissingInterpolationOption: return dict.__getitem__(self, key) return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) for key in (self.scalars + self.sections)]) __str__ = __repr__ __str__.__doc__ = "x.__str__() <==> str(x)" # Extra methods - not in a normal dictionary def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() >>> n == a 1 >>> n is a 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ for key, val in list(indict.items()): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, string_types): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val) def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2' """ return int(self[key]) def as_float(self, key): """ A convenience method which coerces the specified value to a float. If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') Traceback (most recent call last): ValueError: invalid literal for float(): fish >>> a['b'] = '1' >>> a.as_float('b') 1.0 >>> a['b'] = '3.2' >>> a.as_float('b') 3.2000000000000002 """ return float(self[key]) def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """ result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result] def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults() class ConfigObj(Section): """An object to read, create, and write config files.""" _keyword = re.compile(r'''^ # line start (\s*) # indentation ( # keyword (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"=].*?) # no quotes ) \s*=\s* # divider (.*) # value (including list values and comments) $ # line end ''', re.VERBOSE) _sectionmarker = re.compile(r'''^ (\s*) # 1: indentation ((?:\[\s*)+) # 2: section marker open ( # 3: section name open (?:"\s*\S.*?\s*")| # at least one non-space with double quotes (?:'\s*\S.*?\s*')| # at least one non-space with single quotes (?:[^'"\s].*?) # at least one non-space unquoted ) # section name close ((?:\s*\])+) # 4: section marker close \s*(\#.*)? # 5: optional comment $''', re.VERBOSE) # this regexp pulls list values out as a single string # or single values and comments # FIXME: this regex adds a '' to the end of comma terminated lists # workaround in ``_handle_value`` _valueexp = re.compile(r'''^ (?: (?: ( (?: (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#][^,\#]*?) # unquoted ) \s*,\s* # comma )* # match all list items ending in a comma (if any) ) ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#\s][^,]*?)| # unquoted (?:(? 1: msg = "Parsing failed with several errors.\nFirst error %s" % info error = ConfigObjError(msg) else: error = self._errors[0] # set the errors attribute; it's a list of tuples: # (error_type, message, line_number) error.errors = self._errors # set the config attribute error.config = self raise error # delete private attributes del self._errors if configspec is None: self.configspec = None else: self._handle_configspec(configspec) def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS # initialise a few variables self.filename = None self._errors = [] self.raise_errors = options['raise_errors'] self.interpolation = options['interpolation'] self.list_values = options['list_values'] self.create_empty = options['create_empty'] self.file_error = options['file_error'] self.stringify = options['stringify'] self.indent_type = options['indent_type'] self.encoding = options['encoding'] self.default_encoding = options['default_encoding'] self.BOM = False self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] self.initial_comment = [] self.final_comment = [] self.configspec = None if self._inspec: self.list_values = False # Clear section attributes as well Section._initialise(self) def __repr__(self): def _getval(key): try: return self[key] except MissingInterpolationOption: return dict.__getitem__(self, key) return ('ConfigObj({%s})' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) for key in (self.scalars + self.sections)])) def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in list(BOMS.items()): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): ### BOM discovered ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in list(BOMS.items()): if not isinstance(BOM, str) or not line.startswith(BOM): continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF8 - don't decode if isinstance(infile, string_types): return infile.splitlines(True) else: return infile # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, just return if isinstance(infile, string_types): # infile read from a file will be a single string return infile.splitlines(True) return infile def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if self.encoding: return aString.decode('ascii') else: return aString def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, string_types): # can't be unicode # NOTE: Could raise a ``UnicodeDecodeError`` return infile.decode(encoding).splitlines(True) for i, line in enumerate(infile): # NOTE: The isinstance test here handles mixed lists of unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` if PY3K: if not isinstance(line, str): infile[i] = line.decode(encoding) else: if not isinstance(line, unicode): infile[i] = line.decode(encoding) return infile def _decode_element(self, line): """Decode element to unicode if necessary.""" if not self.encoding: return line if isinstance(line, str) and self.default_encoding: return line.decode(self.default_encoding) return line def _str(self, value): """ Used by ``stringify`` within validate, to turn non-string values into strings. """ if not isinstance(value, string_types): return str(value) else: return value def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: value, comment, cur_index = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if type(e) == UnknownType: msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError() def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = text % cur_index error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error) def _unquote(self, value): """Return an unquoted version of a value""" if not value: # should only happen during parsing of lists raise SyntaxError if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, string_types): if self.stringify: value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif '"' in value: quot = squot else: quot = dquot return quot def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) if value.find('"""') == -1: quot = tdquot else: quot = tsquot return quot def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment) def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index) def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, _inspec=True) except ConfigObjError as e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError as e: raise IOError('Reading configspec failed: %s' % e) self.configspec = configspec def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} section[entry]._created = True if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry] def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment)) def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment)) def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment) # Public methods def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() >>> a.filename = filename >>> a == ConfigObj('test.ini', raise_errors=True) 1 >>> import os >>> os.remove('test.ini') """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, dict): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ((self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w' and sys.platform == 'win32' and newline == '\r\n'): # Windows specific hack to avoid writing '\r\r\n' newline = '\n' output = self._a_to_u(newline).join(out) if self.encoding: output = output.encode(self.encoding) if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output = BOM_UTF8 + output if not output.endswith(newline): output += newline if outfile is not None: outfile.write(output) else: # !!! write mode was 'wb' but that fails in PY3K and we dont need h = open(self.filename, 'w') h.write(output) h.close() def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from .validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # # section.default_values.clear() #?? configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): section.default_values.pop(entry, None) try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError, validator.baseErrorClass): # No default, bad default or validator has no 'get_default_value' # (e.g. SimpleVal) pass try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass as e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and entry not in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) unvalidated = [] for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: unvalidated.append(entry) continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check == False: ret_true = False elif check == True: ret_false = False else: ret_true = False section.extra_values = unvalidated if preserve_errors and not section._created: # If the section wasn't created (i.e. it wasn't missing) # then we can't return False, we need to preserve errors ret_false = False # if ret_false and preserve_errors and out: # If we are preserving errors, but all # the failures are from missing sections / values # then we can return False. Otherwise there is a # real failure that we need to preserve. ret_false = not any(out.values()) if ret_true: return True elif ret_false: return False return out def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, string_types): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec) class SimpleVal(object): """ A simple validator. Can be used to check that all members expected are present. To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ def __init__(self): self.baseErrorClass = ConfigObjError def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. """ if levels is None: # first time called levels = [] results = [] if res == True: return results if res == False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return results for (key, val) in res.items(): if val == True: continue if isinstance(cfg.get(key), dict): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return results def get_extra_values(conf, _prepend=()): """ Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ out = [] out.extend([(_prepend, name) for name in conf.extra_values]) for name in conf.sections: if name not in conf.extra_values: out.extend(get_extra_values(conf[name], _prepend + (name,))) return out """*A programming language is a medium of expression.* - Paul Graham""" stsci.tools-3.4.12/lib/stsci/tools/convertgeis.py0000644001120100020070000004067113241163620023530 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python # $Id: readgeis.py 10520 2010-10-11 16:39:49Z hack $ """ convertgeis: Read GEIS file and convert it to a waivered-FITS file. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE Usage: convertgeis.py [options] GEISname FITSname GEISname is the input GEIS file in GEIS format, and FITSname is the output file in FITS format. GEISname can be a directory name. In this case, it will try to use all `*.??h` files as input file names. If FITSname is omitted or is a directory name, this task will try to construct the output names from the input names, i.e.: abc.xyh will have an output name of abc_xyf.fits :Options: -h print the help (this text) -n do NOT over-write any pre-existing output file :Example: If used in Pythons script, a user can, e. g.:: >>> import convertgeis >>> hdulist = convertgeis.convert(GEISFileName) (do whatever with hdulist) >>> hdulist.writeto(FITSFileName) The most basic usage from the command line:: convertgeis.py test1.hhh test1_c0f.fits This command will convert the input GEIS file test1.hhh to a waivered-FITS file test1_c0f.fits. From the command line:: convertgeis.py . this will convert all `*.??h` files in the current directory to waivered-FITS files (of corresponding names) and write them in the current directory. Another example of usage from the command line:: convertgeis.py "u*" "*" this will convert all `u*.??h` files in the current directory to waivered-FITS files (of corresponding names) and write them in the current directory. Note that when using wild cards, it is necessary to put them in quotes. """ # Developed by Science Software Branch, STScI, USA. # This version needs pyfits 0.9.6.3 or later # and numpy version 1.0.4 or later from __future__ import division, print_function # confidence high __version__ = "1.0 (25 Feb, 2011), \xa9 AURA" import os, sys from astropy.io import fits import numpy import array if sys.version_info[0] > 2: from functools import reduce # definitions used to convert GEIS record into numpy objects geis_fmt = {'REAL':'f', 'DOUBLE': 'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'} # definitions used to convert data into numpy array for use in fits.Column cols_fmt = {'REAL':'float', 'DOUBLE':'float', 'INTEGER':'int', 'LOGICAL':'S', 'CHARACTER': 'S'} # definitions used to define print format for fits.Column cols_pfmt = {'REAL':'E', 'DOUBLE': 'D', 'INTEGER': 'J', 'LOGICAL':'A', 'CHARACTER': 'A'} # Keywords which require special unit conversion # keywords which are output as long-floats without using exponential formatting kw_DOUBLE = ['CRVAL1','CRVAL2','FPKTTIME','LPKTTIME'] def stsci2(hdulist, filename): """For STScI GEIS files, need to do extra steps.""" # Write output file name to the primary header instrument = hdulist[0].header.get('INSTRUME', '') if instrument in ("WFPC2", "FOC"): hdulist[0].header['FILENAME'] = filename def convert(input): """Input GEIS files "input" will be read and a HDUList object will be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF. The user can use the writeto method to write the HDUList object to a FITS file. """ global dat cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() # Determine starting point for adding Group Parameter Block keywords to Primary header phdr_indx = phdr.index('PSIZE') _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension table groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] cols = [] # column definitions used for extension table cols_dict = {} # provides name access to Column defs _range = range(1, pcount+1) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) # identify keywords which require conversion to special units if ptype in kw_DOUBLE: _type = 'DOUBLE' fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) # Set up definitions for use in creating the group-parameter block table nrpt = '' nbits = str(int(_bytes)*8) if 'CHAR' in _type: nrpt = _bytes nbits = _bytes afmt = cols_fmt[_type]+ nbits if 'LOGICAL' in _type: afmt = cols_fmt[_type] cfmt = cols_pfmt[_type]+nrpt #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt)) cols.append(cols_dict[ptype]) # This keeps the columns in order _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # delete from the end, so it will not conflict with previous delete for i in range(len(phdr)-1, -1, -1): if phdr.cards[i].keyword in _list: del phdr[i] # clean up other primary header keywords phdr['SIMPLE'] = True phdr['GROUPS'] = False _after = 'NAXIS' if _naxis0 > 0: _after += str(_naxis0) phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after) # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() errormsg = "" # Define data array for all groups arr_shape = _naxis[:] arr_shape[0] = gcount arr_stack = numpy.zeros(arr_shape,dtype=_code) loc = 0 for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape) if _uint16: ext_dat += _bzero # Check to see whether there are any NaN's or infs which might indicate # a byte-swapping problem, such as being written out on little-endian # and being read in on big-endian or vice-versa. if _code.find('float') >= 0 and \ (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))): errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had floating point data values =\n" errormsg += "= of NaN and/or Inf. =\n" errormsg += "===================================\n" elif _code.find('int') >= 0: # Check INT data for max values ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat) if ext_dat_exp.max() == int(_bitpix) - 1: # Potential problems with byteswapping errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had integer data values =\n" errormsg += "= with maximum bitvalues. =\n" errormsg += "===================================\n" arr_stack[k] = ext_dat #ext_hdu = fits.hdu.ImageHDU(data=ext_dat) rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats) loc += group_size # Add data from this GPB to table for i in range(1, pcount+1): val = rec[0][i-1] if i in bools: if val: val = 'T' else: val = 'F' cols[i-1].array[k] = val # Based on the first group, add GPB keywords to PRIMARY header if k == 0: # Create separate PyFITS Card objects for each entry in 'rec' # and update Primary HDU with these keywords after PSIZE for i in range(1, pcount+1): #val = rec.field(i-1)[0] val = rec[0][i-1] if val.dtype.kind == 'S': val = val.decode('ascii') if i in bools: if val: val = True else: val = False elif i in floats: # use fromstring, format in Card is deprecated in pyfits 0.9 _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1]) _card = fits.Card.fromstring(_str) else: _card = fits.Card(key=key[i-1], value=val, comment=comm[i-1]) phdr.insert(phdr_indx+i, _card) # deal with bscale/bzero if (_bscale != 1 or _bzero != 0): phdr['BSCALE'] = _bscale phdr['BZERO'] = _bzero #hdulist.append(ext_hdu) # Define new table based on Column definitions ext_table = fits.TableHDU.from_columns(cols) ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS') # Add column descriptions to header of table extension to match stwfits output for i in range(len(key)): ext_table.header.append(fits.Card(keyword=key[i], value=comm[i])) if errormsg != "": errormsg += "===================================\n" errormsg += "= This file may have been =\n" errormsg += "= written out on a platform =\n" errormsg += "= with a different byte-order. =\n" errormsg += "= =\n" errormsg += "= Please verify that the values =\n" errormsg += "= are correct or apply the =\n" errormsg += "= '.byteswap()' method. =\n" errormsg += "===================================\n" print(errormsg) f1.close() hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)]) hdulist.append(ext_table) stsci2(hdulist,input) return hdulist def parse_path(f1, f2): """Parse two input arguments and return two lists of file names""" import glob # if second argument is missing or is a wild card, point it # to the current directory f2 = f2.strip() if f2 == '' or f2 == '*': f2 = './' # if the first argument is a directory, use all GEIS files if os.path.isdir(f1): f1 = os.path.join(f1, '*.??h') list1 = glob.glob(f1) list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.'] # if the second argument is a directory, use file names in the # first argument to construct file names, i.e. # abc.xyh will be converted to abc_xyf.fits if os.path.isdir(f2): list2 = [] for file in list1: name = os.path.split(file)[-1] fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits' list2.append(os.path.join(f2, fitsname)) else: list2 = [s.strip() for s in f2.split(",")] if list1 == [] or list2 == []: err_msg = "" if list1 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f1) if list2 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f2) raise IOError(err_msg) else: return list1, list2 #------------------------------------------------------------------------------- # special initialization when this is the main program if __name__ == "__main__": import getopt try: optlist, args = getopt.getopt(sys.argv[1:], 'hn') except getopt.error as e: print(str(e)) print(__doc__) print("\t", __version__) # initialize default values help = 0 clobber = True # read options for opt, value in optlist: if opt == "-h": help = 1 if opt == "-n": clobber = False if len(args) == 0: help = 1 if (help): print(__doc__) print("\t", __version__) else: if len(args) == 1: args.append('') list1, list2 = parse_path (args[0], args[1]) npairs = min (len(list1), len(list2)) for i in range(npairs): if os.path.exists(list2[i]): if clobber: os.remove(list2[i]) else: print("Output file %s already exists, skip." % list2[i]) break try: hdulist = convert(list1[i]) hdulist.writeto(list2[i]) hdulist.close() print("%s -> %s" % (list1[i], list2[i])) except Exception as e: print("Conversion fails for %s: %s" % (list1[i], str(e))) break """ Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of AURA and its representatives may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ stsci.tools-3.4.12/lib/stsci/tools/convertlog.py0000644001120100020070000001312213071216365023357 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python """ convertlog: Read ASCII trailer file and convert it to a waivered-FITS file. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE Usage: convertlog.py [OPTIONS] trailer_filename :Options: -h print the help (this text) -v print version of task -w --width Width (in chars) for trailer file table column -o --output Name of output FITS trailer file If none is specified, it will convert input file from "rootname.tra" to "rootname_trl.fits" :Example: If used in Pythons script, a user can, e. g.:: >>> import convertlog >>> convertlog.convert(TRLFileName) The most basic usage from the command line:: convertlog test1.tra This command will convert the input ASCII trailer file test1.tra to a waivered-FITS file test1_trl.fits. """ # Developed by Science Software Branch, STScI, USA. from __future__ import division, print_function # confidence high __version__ = "1.0 (7 Jan, 2016), \xa9 AURA" import os import sys from astropy.io import fits import numpy as np import textwrap def convert(input, width=132, output=None, keep=False): """Input ASCII trailer file "input" will be read. The contents will then be written out to a FITS file in the same format as used by 'stwfits' from IRAF. Parameters =========== input : str Filename of input ASCII trailer file width : int Number of characters wide to use for defining output FITS column [Default: 132] output : str Filename to use for writing out converted FITS trailer file If None, input filename will be converted from *.tra -> *_trl.fits [Default: None] keep : bool Specifies whether or not to keep any previously written FITS files [Default: False] """ # open input trailer file trl = open(input) # process all lines lines = np.array([i for text in trl.readlines() for i in textwrap.wrap(text,width=width)]) # close ASCII trailer file now that we have processed all the lines trl.close() if output is None: # create fits file rootname,suffix = os.path.splitext(input) s = suffix[1:].replace('ra','rl') fitsname = "{}_{}{}fits".format(rootname,s,os.path.extsep) else: fitsname = output full_name = os.path.abspath(os.path.join(os.path.curdir,fitsname)) old_file = os.path.exists(full_name) if old_file: if keep: print("ERROR: Trailer file already written out as: {}".format(full_name)) raise IOError else: os.remove(full_name) # Build FITS table and write it out line_fmt = "{}A".format(width) tbhdu = fits.BinTableHDU.from_columns([fits.Column(name='TEXT_FILE',format=line_fmt,array=lines)]) tbhdu.writeto(fitsname) print("Created output FITS filename for trailer:{} {}".format(os.linesep,full_name)) os.remove(input) def usage(): print(__doc__) def main(): import getopt try: optlist, args = getopt.getopt(sys.argv[1:], 'hvkw:o:') except getopt.error as e: print(str(e)) print(__doc__) print("\t", __version__) sys.exit(2) output = None width = 132 keep = False for o, a in optlist: if o in ("-h", "--help"): usage() sys.exit() elif o in ("-o", "--output"): output = a elif o in ("-w", "--width"): width = int(a) elif o in ("-k", "--keep"): keep = True else: assert False, "unhandled option" trl_file = args[0] try: print("Converting {}...".format(trl_file)) convert(trl_file, width=width, output=output,keep=keep) except: print("ERROR: Convertlog failed to convert: {}".format(trl_file)) sys.exit(2) #------------------------------------------------------------------------------- # special initialization when this is the main program if __name__ == "__main__": main() """ Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of AURA and its representatives may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ stsci.tools-3.4.12/lib/stsci/tools/convertwaiveredfits.py0000755001120100020070000005103413241163620025273 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python # $Id$ """ Convert a waivered FITS file to various other formats. Syntax for the command line:: convertwaiveredfits.py [-hm] [-o ,...] FILE ... Convert the waivered FITS files (wFITS) to various formats. The default conversion format is multi-extension FITS (MEF). Options:: -h, --help print this help message and exit -v, --verbose turn on verbose output -m, --multiExtensionConversion convert to multi-extension FITS format (Default) -o, --outputFileName comma separated list of output file specifications one per input FILE Default: input file specification with the last character of the base name changed to `h` in multi-extension FITS format For example, conversion of a WFPC2 waivered FITS file obtained from the MAST archive:: convertwaiveredfits u9zh010bm_c0f.fits This will convert the waivered FITS file ``u9zh010bm_c0f.fits`` to multi-extension FITS format and generate the output file ``u9zh010bm_c0h.fits``. Conversion of multiple FITS files can be done using:: convertwaiveredfits -o out1.fits,out2.fits u9zh010bm_c0f.fits u9zh010bm_c1f.fits This will convert the waivered FITS files ``u9zh010bm_c0f.fits`` and ``u9zh010bm_c1f.fits`` to multi-extension FITS format and generate the output files ``out1.fits`` and ``out2.fits``. Parameters ========== waiveredObject: obj input object representing a waivered FITS file; either a ``astropy.io.fits.HDUList`` object, a file object, or a file specification. outputFileName : string file specification for the output file. Default: `None` - do not generate an output file forceFileOutput: boolean force the generation of an output file when the ``outputFileName`` parameter is `None`; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character ``h`` in multi-extension FITS format. Default: False convertTo: string target conversion type. Default: 'multiExtension' verbose: boolean provide verbose output. Default: `False` Returns ======= hduList : fits.HDUList ``astropy.io.fits`` multi-extension FITS object containing converted output Examples ======== >>> import convertwaiveredfits >>> hdulist = convertwaiveredfits.convertwaiveredfits( ... 'u9zh010bm_c0f.fits', forceFileOutput=True) This will convert the waivered FITS file ``u9zh010bm_c0f.fits`` to multi-extension FITS format and write the output to the file ``u9zh010bm_c0h.fits``; the returned ``HDUList`` is in multi-extension FITS format. >>> import convertwaiveredfits >>> with open('u9zh010bm_c0f.fits', mode='rb') as inFile: ... hdulist = convertwaiveredfits.convertwaiveredfits(inFile, 'out.fits') This will convert the waivered FITS file ``u9zh010bm_c0f.fits`` to multi-extension FITS format and write the output to the file ``out.fits``; the returned ``HDUList`` is in multi-extension FITS format. >>> from astropy.io import fits >>> import convertwaiveredfits >>> with fits.open('u9zh010bm_c0f.fits') as inHdul: ... hdulist = convertwaiveredfits.convertwaiveredfits(inHdul) This will convert the waivered FITS file ``u9zh010bm_c0f.fits`` to multi-extension FITS format; no output file is generated; the returned ``HDUList`` is in multi-extension format. """ from __future__ import division, print_function # confidence high # # ----------------------------------------------------------------------------- # Import required modules # ----------------------------------------------------------------------------- # import os import sys import astropy from astropy.io import fits from distutils.version import LooseVersion if sys.version_info[0] < 3: string_types = basestring else: string_types = str ASTROPY_VER_GE13 = LooseVersion(astropy.__version__) >= LooseVersion('1.3') __version__ = "1.1 (15 June, 2015)" # # ----------------------------------------------------------------------------- # Function definitions # ----------------------------------------------------------------------------- # def _usage(): """ Print a usage message. Parameters: NONE Returns: None Exceptions: NONE """ print("usage: convertwaiveredfits.py [-hmv] [-o , ...] FILE ...") def _processCommandLineArgs(): """ Get the command line arguments Parameters: NONE Returns: files list of file specifications to be converted outputFileNames list of output file specifications (one per input file) Default: a list of None values (one per input file) conversionFormat string indicating the conversion format requested Default: "mulitextension" verbose flag indicating if verbose output is desired Default: False Exceptions: NONE """ import getopt try: opts, args = getopt.getopt(sys.argv[1:], "hvmo:", ["help", "verbose", "multiExtensionConversion", "outputFileName"]) except getopt.GetoptError as e: print(str(e)) _usage() sys.exit(1) conversionFormat = "" outputFileNames = [] verbose = False for o, a in opts: if o in ("-h", "--help"): _usage() print(" Convert the waivered FITS Files (FILEs) to various formats.") print(" The default conversion format is multi-extension FITS.") print(" Options:") print(" -h, --help display this help message and exit") print(" -v, --verbose provide verbose output") print(" -m, --multiExtensionConversion convert to multiExtension FITS format") print(" -o, --outputFileName comma separated list of output file") print(" specifications (one per input FILE)") sys.exit() if o in ("-v", "--verbose"): verbose = True if o in ("-m", "--multiExtensionConversion"): if conversionFormat != "": print("convertwaiveredfits.py: only one conversion format allowed") _usage() sys.exit(1) conversionFormat = "multiExtension" if o in ("-o", "--outputFileName"): outputFileNames = a.split(',') if conversionFormat == "": # # Set the default conversion format if none was provided # conversionFormat = "multiExtension" if not args: print("convertwaiveredfits.py: nothing to convert") _usage() sys.exit(1) else: files = args if outputFileNames: if len(files) != len(outputFileNames): print("convertwaiveredfits.py: number of output file names does not match") print(" the number of FILEs to convert") _usage() sys.exit(1) else: for i in range(0,len(files)): outputFileNames.append(None) return files,outputFileNames,conversionFormat,verbose def _verify(waiveredHdul): """ Verify that the input HDUList is for a waivered FITS file. Parameters: waiveredHdul HDUList object to be verified Returns: None Exceptions: ValueError Input HDUList is not for a waivered FITS file """ if len(waiveredHdul) == 2: # # There must be exactly 2 HDU's # if waiveredHdul[0].header['NAXIS'] > 0: # # The Primary HDU must have some data # if isinstance(waiveredHdul[1], fits.TableHDU): # # The Alternate HDU must be a TableHDU # if waiveredHdul[0].data.shape[0] == \ waiveredHdul[1].data.shape[0] or \ waiveredHdul[1].data.shape[0] == 1: # # The number of arrays in the Primary HDU must match # the number of rows in the TableHDU. This includes # the case where there is only a single array and row. # return # # Not a valid waivered Fits file # raise ValueError("Input object does not represent a valid waivered" + \ " FITS file") def toMultiExtensionFits(waiveredObject, multiExtensionFileName=None, forceFileOutput=False, verbose=False): """ Convert the input waivered FITS object to a multi-extension FITS HDUList object. Generate an output multi-extension FITS file if requested. Parameters: waiveredObject input object representing a waivered FITS file; either a astroyp.io.fits.HDUList object, a file object, or a file specification outputFileName file specification for the output file Default: None - do not generate an output file forceFileOutput force the generation of an output file when the outputFileName parameter is None; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character 'h'. Default: False verbose provide verbose output Default: False Returns: mhdul an HDUList object in multi-extension FITS format. Exceptions: TypeError Input object is not a HDUList, a file object or a file name """ if isinstance(waiveredObject, fits.HDUList): whdul = waiveredObject inputObjectDescription = "HDUList object" else: try: whdul = fits.open(waiveredObject) if isinstance(waiveredObject, string_types): inputObjectDescription = "file " + waiveredObject else: inputObjectDescription = "file " + waiveredObject.name except TypeError: raise TypeError("Input object must be HDUList, file object, " + \ "or file name") _verify(whdul) undesiredPrimaryHeaderKeywords = ['ORIGIN','FITSDATE','FILENAME', 'ALLG-MAX','ALLG-MIN','ODATTYPE', 'SDASMGNU','OPSIZE','CTYPE2', 'CD2_2','CD2_1','CD1_2','CTYPE3', 'CD3_3','CD3_1','CD1_3','CD2_3', 'CD3_2'] # # Create the multi-extension primary header as a copy of the # wavered file primary header # mPHeader = whdul[0].header originalDataType = whdul[0].header.get('ODATTYPE','') # # Remove primary header cards with keywords matching the # list of undesired primary header keywords # for keyword in undesiredPrimaryHeaderKeywords: # # Be careful only to delete the first card that matches # the keyword, not all of the cards # if keyword in mPHeader: del mPHeader[mPHeader.index(keyword)] # # Get the columns from the secondary HDU table # wcols = whdul[1].columns # # Remove primary header cards with keywords matching the # column names in the secondary HDU table # for keyword in wcols.names: if keyword in mPHeader: del mPHeader[keyword] # # Create the PrimaryHDU # mPHdu = fits.PrimaryHDU(header=mPHeader) # # Add the EXTEND card # mPHdu.header.set('EXTEND', value=True, after='NAXIS') # # Add the NEXTEND card. There will be one extension # for each row in the wavered Fits file table HDU. # mPHdu.header['NEXTEND'] = (whdul[1].data.shape[0], 'Number of standard extensions') # # Create the multi-extension file HDUList from the primary header # mhdul = fits.HDUList([mPHdu]) # # Create the extension HDUs for the multi-extension file. There # will be one extension for each row in the wavered file's table. # instrument = mPHeader.get('INSTRUME', '') nrows = whdul[1].data.shape[0] for i in range(0,nrows): # # Create the basic HDU from the data # if nrows == 1: # # Handle case where there is only one row in the table # data = whdul[0].data else: data = whdul[0].data[i] mhdul.append(fits.ImageHDU(data)) # # Add cards to the header for each keyword in the column # names of the secondary HDU table from the wavered file # for keyword,format,unit in zip(wcols.names,wcols.formats,wcols.units): if unit == 'LOGICAL-': # # Handle logical values # if whdul[1].data.field(keyword)[i].strip() == 'T': d = True else: d = False elif format[0] == 'E': # # Handle floating point values # fmt = '%'+format[1:]+'G' d = eval(fmt % float(whdul[1].data.field(keyword)[i])) else: d = whdul[1].data.field(keyword)[i] kw_descr = "" if keyword in whdul[1].header: kw_descr = whdul[1].header[keyword] mhdul[i+1].header[keyword] = (d, kw_descr) # # If original data is unsigned short then scale the data. # if originalDataType == 'USHORT': mhdul[i+1].scale('int16','',bscale=1,bzero=32768) mhdul[i+1].header.set('BSCALE', value=1, before='BZERO') # # For WFPC2 and FOS instruments require additional header cards # if instrument in ('WFPC2','FOC'): # # Add EXTNAME card to header # mhdul[i+1].header['EXTNAME'] = (mPHeader.get('FILETYPE',''), 'extension name') # # Add EXTVER card to the header # mhdul[i+1]._extver = i+1 mhdul[i+1].header.set('EXTVER', value=i+1, comment='extension version number', after='EXTNAME') # # Add the EXPNAME card to the header # mhdul[i+1].header.set('EXPNAME', mPHeader.get('ROOTNAME', ''), '9 character exposure identifier', before='EXTVER') # # Add the INHERIT card to the header. # mhdul[i+1].header.set('INHERIT', True, 'inherit the primary header', after='EXTVER') # # Add the ROOTNAME card to the header # mhdul[i+1].header.set('ROOTNAME', mPHeader.get('ROOTNAME', ''), 'rootname of the observationset', after='INHERIT') if not multiExtensionFileName and forceFileOutput: base,ext = os.path.splitext(whdul[0]._file.name) multiExtensionFileName = base[:-1]+'h'+ext verboseString = "Input " + inputObjectDescription + \ " converted to multi-extension FITS format." if multiExtensionFileName: if instrument in ('WFPC2','FOC'): # # write the FILENAME card to the header for the WFPC2 and FOC # instruments # head,tail = os.path.split(multiExtensionFileName) mhdul[0].header.set('FILENAME', value=tail, after='NEXTEND') if ASTROPY_VER_GE13: mhdul.writeto(multiExtensionFileName, overwrite=True) else: mhdul.writeto(multiExtensionFileName, clobber=True) verboseString = verboseString[:-1] + " and written to " + \ multiExtensionFileName + "." if verbose: print(verboseString) return mhdul def convertwaiveredfits(waiveredObject, outputFileName=None, forceFileOutput=False, convertTo='multiExtension', verbose=False): """ Convert the input waivered FITS object to various formats. The default conversion format is multi-extension FITS. Generate an output file in the desired format if requested. Parameters: waiveredObject input object representing a waivered FITS file; either a astropy.io.fits.HDUList object, a file object, or a file specification outputFileName file specification for the output file Default: None - do not generate an output file forceFileOutput force the generation of an output file when the outputFileName parameter is None; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character `h` in multi-extension FITS format. Default: False convertTo target conversion type Default: 'multiExtension' verbose provide verbose output Default: False Returns: hdul an HDUList object in the requested format. Exceptions: ValueError Conversion type is unknown """ if convertTo == 'multiExtension': func = toMultiExtensionFits else: raise ValueError('Conversion type ' + convertTo + ' unknown') return func(*(waiveredObject,outputFileName,forceFileOutput,verbose)) # # ***************************************************************************** # Main Program callable from the shell # ***************************************************************************** # def main() : files,outputFiles,conversionFormat,verbose = _processCommandLineArgs() for f,outputfile in zip(files,outputFiles): convertwaiveredfits(f,outputfile,True,conversionFormat,verbose) sys.exit() if __name__ == '__main__': main() """ Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of AURA and its representatives may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ stsci.tools-3.4.12/lib/stsci/tools/dialog.py0000644001120100020070000000375013017116245022436 0ustar jhunkSTSCI\science00000000000000#### # Class Dialog # # Purpose # Base class for many dialog box classes. #### """ $Id$ """ from __future__ import division # confidence high import sys PY3K = sys.version_info[0] > 2 if PY3K: from tkinter import * else: from Tkinter import * class Dialog: def __init__(self, master): self.master = master self.top = Toplevel(self.master) self.top.title(self.__class__.__name__) self.top.minsize(1, 1) self.myWaitVar = str(self.top) + 'EndDialogVar' def Show(self): self.SetupDialog() self.CenterDialog() self.top.deiconify() self.top.focus() def TerminateDialog(self, withValue): self.top.setvar(self.myWaitVar, withValue) self.top.withdraw() def DialogCleanup(self): self.top.destroy() self.master.focus() def SetupDialog(self): pass def CenterDialog(self): self.top.withdraw() self.top.update_idletasks() w = self.top.winfo_screenwidth() h = self.top.winfo_screenheight() reqw = self.top.winfo_reqwidth() reqh = self.top.winfo_reqheight() centerx = str((w-reqw)//2) centery = str((h-reqh)//2 - 100) geomStr = "+" + centerx + "+" + centery self.top.geometry(geomStr) #### # Class ModalDialog # # Purpose # Base class for many modal dialog box classes. #### class ModalDialog(Dialog): def __init__(self, master): Dialog__init__(self, master) def Show(self): self.SetupDialog() self.CenterDialog() try: self.top.grab_set() # make it modal except TclError: # This fails on Linux, but does it really HAVE to be modal if sys.platform.lower().find('linux') >= 0: pass else: raise self.top.focus() self.top.deiconify() self.top.waitvar(self.myWaitVar) return int(self.top.getvar(self.myWaitVar)) stsci.tools-3.4.12/lib/stsci/tools/editpar.py0000644001120100020070000021316013112074217022623 0ustar jhunkSTSCI\science00000000000000"""module 'editpar.py' -- main module for generating the EPAR task editor $Id$ Taken from pyraf/lib/epar.py, originally signed "M.D. De La Pena, 2000 Feb. 4" """ from __future__ import absolute_import, division, print_function # confidence high #System level modules import os, sys, tempfile, time from . import capable PY3K = sys.version_info[0] > 2 if capable.OF_GRAPHICS: if PY3K: from tkinter import _default_root from tkinter import * from tkinter.filedialog import asksaveasfilename from tkinter.messagebox import askokcancel, askyesno, showwarning else: from Tkinter import _default_root from Tkinter import * from tkFileDialog import asksaveasfilename from tkMessageBox import askokcancel, askyesno, showwarning # stsci.tools modules from .irafglobals import userWorkingHome from . import basicpar, eparoption, irafutils, taskpars # Constants MINVIEW = 500 MINPARAMS = 25 INPUTWIDTH = 10 VALUEWIDTH = 21 PROMPTWIDTH = 55 DFT_OPT_FILE = "epar.optionDB" TIP = "rollover" DBG = "debug" # The following action types are used within the GUI code. They define what # kind of GUI action actually caused a parameter's value to be adjusted. # This is meant to be like an enum. These values may appear in a task's # task.cfgspc file in a rule. In that file, the value 'always' may be used, in # addition to these values, to indicate a match to all possible action types. GROUP_ACTIONS = ('defaults','init','fopen','entry') # init -> startup of the GUI # defaults -> the user clicked the Defaults or Reset button # fopen -> the user loaded a config file # entry -> the user actually edited a parameter (via mouse or keyboard) # Use these values for startup geometry ***for now*** # PARENT is the main editor window PARENTX = 50 PARENTY = 50 # DCHILD[XY] are amounts each successive child shifts DCHILDX = 50 DCHILDY = 50 # CHILD[XY] is a PSET window CHILDX = PARENTX CHILDY = PARENTY # HELP[XY] is for the help as displayed in a window HELPX = 300 HELPY = 25 class UnfoundParamError(Exception): pass class EditParDialog(object): def __init__(self, theTask, parent=None, isChild=0, title="Parameter Editor", childList=None, resourceDir='.'): # Initialize status message stuff first thing self._leaveStatusMsgUntil = 0 self._msgHistory = [] # all msgs, of all kinds, since we opened self._statusMsgsToShow = [] # keep a *small* number of late msgs self.debug('Starting up the GUI!') # Call our (or a subclass's) _setTaskParsObj() method self._setTaskParsObj(theTask) # Now go back and ensure we have the full taskname; set up other items self._canceled = False self._executed = False self._guiName = title self.taskName = self._taskParsObj.getName() self.pkgName = self._taskParsObj.getPkgname() theParamList = self._taskParsObj.getParList(docopy=1) self._rcDir = resourceDir self.debug('TASK: '+self.taskName+', PKG: '+self.pkgName+ \ ', RC: '+self._rcDir) # setting _tmwm=1 is the slowest motion, 7 seems OK, 10 maybe too fast self._tmwm = int(os.getenv('TEAL_MOUSE_WHEEL_MULTIPLIER', 7)) # Get default parameter values for unlearn - watch return value # NOTE - this may edit/reorder the working paramList if not self._setupDefaultParamList(): return # Ignore the last parameter which is $nargs self.numParams = len(theParamList) - 1 # Set all default master GUI settings, then # allow subclasses to override them self._appName = "Par Editor" self._appHelpString = "No help yet created for this GUI editor" self._useSimpleAutoClose = False # certain buttons close GUI also self._showExecuteButton = True self._showSaveCloseOnExec = True self._saveAndCloseOnExec = True self._showFlaggingChoice = True self._flagNonDefaultVals = None # default not yet set self._showExtraHelpButton = False self._showHelpInBrowser = False self._knowTaskHelpIsHtml = False self._unpackagedTaskTitle = "Task" self._writeProtectOnSaveAs= True self._defaultsButtonTitle = "Defaults" self._optFile = DFT_OPT_FILE self._defSaveAsExt = '.cfg' # Colors self._frmeColor = None # frame of window self._taskColor = None # task label area self._bboxColor = None # button area self._entsColor = None # entries area self._flagColor = "red" # non-default values # give the subclass a chance to disagree self._overrideMasterSettings() # give the subclass a chance to disagree # any settings which depend on overrides if self._flagNonDefaultVals is None: self._flagNonDefaultVals = self._showFlaggingChoice # default # Create the root window as required, but hide it self.parent = parent if self.parent is None: global _default_root if _default_root is None: _default_root = irafutils.init_tk_default_root() # Track whether this is a parent or child window self.isChild = isChild # Set up a color for each of the backgrounds if self.isChild: # self._frmeColor = "LightSteelBlue" self.iconLabel = "EPAR Child" else: self.iconLabel = "EPAR Parent" # help windows do not exist yet self.eparHelpWin = None self.irafHelpWin = None self.logHistWin = None # no last focus widget self.lastFocusWidget = None # Generate the top epar window self.top = top = Toplevel(self.parent,bg=self._frmeColor,visual="best") self.top.withdraw() # hide it while we fill it up with stuff if len(self.pkgName): self.updateTitle(self.pkgName+"."+self.taskName) else: self.updateTitle(self.taskName) self.top.iconname(self.iconLabel) # Read in the tk options database file try: # User's current directory self.top.option_readfile(os.path.join(os.curdir, self._optFile)) except TclError: try: # User's startup directory self.top.option_readfile(os.path.join(userWorkingHome, self._optFile)) except TclError: try: # App default self.top.option_readfile(os.path.join(self._rcDir, self._optFile)) except TclError: if self._optFile != DFT_OPT_FILE: pass else: raise # Create an empty list to hold child dialogs # *** Not a good way, REDESIGN with Mediator! # Also, build the parent menu bar if self.parent is None: self.top.childList = [] elif childList is not None: # all children share a list self.top.childList = childList # Build the EPAR menu bar self.makeMenuBar(self.top) # Create a spacer Frame(self.top, bg=self._taskColor, height=10).pack(side=TOP, fill=X) # Print the package and task names self.printNames(self.top, self.taskName, self.pkgName) # Insert a spacer between the static text and the buttons Frame(self.top, bg=self._taskColor, height=15).pack(side=TOP, fill=X) # Set control buttons at the top of the frame self.buttonBox(self.top) # Insert a spacer between the static text and the buttons Frame(self.top, bg=self._entsColor, height=15).pack(side=TOP, fill=X) # Set up an information Frame at the bottom of the EPAR window # RESIZING is currently disabled. # Do this here so when resizing to a smaller sizes, the parameter # panel is reduced - not the information frame. self.top.status = Label(self.top, text="", relief=SUNKEN, borderwidth=1, anchor=W, bg=self._frmeColor) self.top.status.pack(side=BOTTOM, fill=X, padx=0, pady=3, ipady=3) # Set up a Frame to hold a scrollable Canvas self.top.f = frame = Frame(self.top, relief=RIDGE, borderwidth=1, bg=self._entsColor) # Overlay a Canvas which will hold a Frame self.top.f.canvas = canvas = Canvas(self.top.f, width=100, height=100, takefocus=FALSE, bg=self._entsColor, highlightbackground=self._entsColor) # highlightcolor="black" # black must be the default, since it is blk # Always build the scrollbar, even if number of parameters is small, # to allow window to be resized. # Attach a vertical Scrollbar to the Frame/Canvas self.top.f.vscroll = Scrollbar(self.top.f, orient=VERTICAL, width=11, relief=SUNKEN, activerelief=RAISED, takefocus=FALSE, bg=self._entsColor) canvas['yscrollcommand'] = self.top.f.vscroll.set self.top.f.vscroll['command'] = canvas.yview # Pack the Scrollbar self.top.f.vscroll.pack(side=RIGHT, fill=Y) # enable Page Up/Down keys scroll = canvas.yview_scroll top.bind('', lambda event, fs=scroll: fs(1, "pages")) top.bind('', lambda event, fs=scroll: fs(-1, "pages")) # make up, down arrows and return/shift-return do same as Tab, Shift-Tab top.bind('', self.focusPrev) top.bind('', self.mwl) # on OSX, rolled up or down top.bind('', self.mwl) # on Linux, rolled up top.bind('', self.mwl) # on Linux, rolled down top.bind('', self.focusNext) top.bind('', self.focusPrev) top.bind('', self.focusNext) try: # special shift-tab binding needed for (some? all?) linux systems top.bind('', self.focusPrev) except TclError: # Ignore exception here, the binding can't be relevant # if ISO_Left_Tab is unknown. pass # Pack the Frame and Canvas canvas.pack(side=TOP, expand=TRUE, fill=BOTH) self.top.f.pack(side=TOP, expand=TRUE, fill=BOTH) # Define a Frame to contain the parameter information canvas.entries = Frame(canvas, bg=self._entsColor) # Generate the window to hold the Frame which sits on the Canvas cWindow = canvas.create_window(0, 0, anchor=NW, window=canvas.entries) # Insert a spacer between the Canvas and the information frame Frame(self.top, bg=self._entsColor, height=4).pack(side=TOP, fill=X) # The parent has the control, unless there are children # Fix the geometry of where the windows first appear on the screen if self.parent is None: #self.top.grab_set() # Position this dialog relative to the parent self.top.geometry("+%d+%d" % (PARENTX, PARENTY)) else: #self.parent.grab_release() #self.top.grab_set() # Declare the global variables so they can be updated global CHILDX global CHILDY # Position this dialog relative to the parent CHILDX = CHILDX + DCHILDX CHILDY = CHILDY + DCHILDY self.top.geometry("+%d+%d" % (CHILDX, CHILDY)) # # Now fill in the Canvas Window # # The makeEntries method creates the parameter entry Frame self.makeEntries(canvas.entries, self.top.status) # Force an update of the entry Frame canvas.entries.update() # Determine the size of the entry Frame width = canvas.entries.winfo_width() height = canvas.entries.winfo_height() # Reconfigure the Canvas size based on the Frame. if (self.numParams <= MINPARAMS): viewHeight = height else: # Set the minimum display viewHeight = MINVIEW # Scrollregion is based upon the full size of the entry Frame canvas.config(scrollregion=(0, 0, width, height)) # Smooth scroll self.yscrollincrement = 5 # changed Mar2010, had been 50 a long time canvas.config(yscrollincrement=self.yscrollincrement) # Set the actual viewable region for the Canvas canvas.config(width=width, height=viewHeight) # Force an update of the Canvas canvas.update() # Associate deletion of the main window to a Abort self.top.protocol("WM_DELETE_WINDOW", self.abort) # Trigger all widgets one time before starting in case they have # values which would run a trigger self.checkAllTriggers('init') # Set focus to first parameter self.setViewAtTop() # Finally show it self.top.update() self.top.deiconify() # Enable interactive resizing in height self.top.resizable(width=FALSE, height=TRUE) # Limit maximum window height width = self.top.winfo_width() height = self.top.winfo_height() + height - viewHeight self.top.maxsize(width=width, height=height) self.debug('showing '+self._appName+' main window') # run the mainloop if not self.isChild: self._preMainLoop() self.top.mainloop() self._postMainLoop() def _overrideMasterSettings(self): """ Hook for subclasses to override some attributes if wished. """ return def _preMainLoop(self): """ Hook for subclasses to override if wished. """ return def _postMainLoop(self): """ Hook for subclasses to override if wished. """ return def _showOpenButton(self): """ Should we show the "Open..." button? Subclasses override. """ return True def _setTaskParsObj(self, theTask): """ This method, meant to be overridden by subclasses, generates the _taskParsObj object. theTask can often be either a file name or a TaskPars subclass object. """ # Here we catch if this version is run by accident raise NotImplementedError("EditParDialog is not to be used directly") def _saveGuiSettings(self): """ Hook for subclasses to save off GUI settings somewhere. """ return # skip this by default def updateTitle(self, atitle): if atitle: self.top.title('%s: %s' % (self._guiName, atitle)) else: self.top.title('%s' % (self._guiName)) def checkAllTriggers(self, action): """ Go over all widgets and let them know they have been edited recently and they need to check for any trigger actions. This would be used right after all the widgets have their values set or forced (e.g. via setAllEntriesFromParList). """ for entry in self.entryNo: entry.widgetEdited(action=action, skipDups=False) def freshenFocus(self): """ Did something which requires a new look. Move scrollbar up. This often needs to be delayed a bit however, to let other events in the queue through first. """ self.top.update_idletasks() self.top.after(10, self.setViewAtTop) def setViewAtTop(self): self.entryNo[0].focus_set() self.top.f.canvas.xview_moveto(0.0) self.top.f.canvas.yview_moveto(0.0) def getTaskParsObj(self): """ Simple accessor. Return the _taskParsObj object. """ return self._taskParsObj def mwl(self, event): """Mouse Wheel - under tkinter we seem to need Tk v8.5+ for this """ if event.num == 4: # up on Linux self.top.f.canvas.yview_scroll(-1*self._tmwm, 'units') elif event.num == 5: # down on Linux self.top.f.canvas.yview_scroll(1*self._tmwm, 'units') else: # assume event.delta has the direction, but reversed sign self.top.f.canvas.yview_scroll(-(event.delta)*self._tmwm, 'units') # A bug appeared in Python 2.3 that caused tk_focusNext and # tk_focusPrev to fail. The follwoing two routines now will # trap this error and call "fixed" versions of these tk routines # instead in the event of such errors. def focusNext(self, event): """Set focus to next item in sequence""" try: event.widget.tk_focusNext().focus_set() except TypeError: # see tkinter equivalent code for tk_focusNext to see # commented original version name = event.widget.tk.call('tk_focusNext', event.widget._w) event.widget._nametowidget(str(name)).focus_set() def focusPrev(self, event): """Set focus to previous item in sequence""" try: event.widget.tk_focusPrev().focus_set() except TypeError: # see tkinter equivalent code for tk_focusPrev to see # commented original version name = event.widget.tk.call('tk_focusPrev', event.widget._w) event.widget._nametowidget(str(name)).focus_set() def doScroll(self, event): """Scroll the panel down to ensure widget with focus to be visible Tracks the last widget that doScroll was called for and ignores repeated calls. That handles the case where the focus moves not between parameter entries but to someplace outside the hierarchy. In that case the scrolling is not expected. Returns false if the scroll is ignored, else true. """ canvas = self.top.f.canvas widgetWithFocus = event.widget if widgetWithFocus is self.lastFocusWidget: return FALSE self.lastFocusWidget = widgetWithFocus if widgetWithFocus is None: return TRUE # determine distance of widget from top & bottom edges of canvas y1 = widgetWithFocus.winfo_rooty() y2 = y1 + widgetWithFocus.winfo_height() cy1 = canvas.winfo_rooty() cy2 = cy1 + canvas.winfo_height() yinc = self.yscrollincrement if y1 inputLength: inputLength = len(inputString) # Set up the field widths # Allow extra spaces for buffer and in case the longest parameter # has the hidden parameter indicator self.fieldWidths = {} self.fieldWidths['inputWidth'] = inputLength + 4 self.fieldWidths['valueWidth'] = VALUEWIDTH self.fieldWidths['promptWidth'] = PROMPTWIDTH # Loop over the parameters to create the entries self.entryNo = [None] * self.numParams dfltsVerb = self._defaultsButtonTitle if dfltsVerb[-1]=='s': dfltsVerb = dfltsVerb[:-1] for i in range(self.numParams): scope = theParamList[i].scope eparOpt = self._nonStandardEparOptionFor(theParamList[i].type) cbo = self._defineEditedCallbackObjectFor(scope, theParamList[i].name) hcbo = None if self._knowTaskHelpIsHtml: hcbo = self self.entryNo[i] = eparoption.eparOptionFactory(master, statusBar, theParamList[i], self.defaultParamList[i], self.doScroll, self.fieldWidths, plugIn=eparOpt, editedCallbackObj=cbo, helpCallbackObj=hcbo, mainGuiObj=self, defaultsVerb=dfltsVerb, bg=self._entsColor, indent = scope not in (None, '', '.'), flagging = self._flagNonDefaultVals, flaggedColor=self._flagColor) def _nonStandardEparOptionFor(self, paramTypeStr): """ Hook to allow subclasses to employ their own GUI option type. Return None or a class which derives from EparOption. """ return None def _defineEditedCallbackObjectFor(self, parScope, parName): """ Hook to allow subclasses to set their own callback-containing object to be used when a given option/parameter is edited. See notes in EparOption. """ return None def _isUnpackagedTask(self): """ Hook to allow subclasses to state that this is a rogue task, not affiliated with a specific package, affecting its display. """ return self.pkgName is None or len(self.pkgName) < 1 def _toggleSectionActiveState(self, sectionName, state, skipList): """ Make an entire section (minus skipList items) either active or inactive. sectionName is the same as the param's scope. """ # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # Loop over their assoc. entries for i in range(self.numParams): if theParamList[i].scope == sectionName: if skipList and theParamList[i].name in skipList: # self.entryNo[i].setActiveState(True) # these always active pass # if it started active, we don't need to reactivate it else: self.entryNo[i].setActiveState(state) # Method to print the package and task names and to set up the menu # button for the choice of the display for the task help page def printNames(self, top, taskName, pkgName): topbox = Frame(top, bg=self._taskColor) textbox = Frame(topbox, bg=self._taskColor) # helpbox = Frame(topbox, bg=self._taskColor) # Set up the information strings if self._isUnpackagedTask(): # label for a parameter list is just filename packString = " "+self._unpackagedTaskTitle+" = "+taskName Label(textbox, text=packString, bg=self._taskColor).pack(side=TOP, anchor=W) else: # labels for task packString = " Package = " + pkgName.upper() Label(textbox, text=packString, bg=self._taskColor).pack(side=TOP, anchor=W) taskString = " Task = " + taskName.upper() Label(textbox, text=taskString, bg=self._taskColor).pack(side=TOP, anchor=W) textbox.pack(side=LEFT, anchor=W) topbox.pack(side=TOP, expand=FALSE, fill=X) # Method to set up the parent menu bar def makeMenuBar(self, top): menubar = Frame(top, bd=1, relief=GROOVE, bg=self._frmeColor) # Generate the menus fileMenu = self.makeFileMenu(menubar) if self._showOpenButton(): openMenu = self.makeOpenMenu(menubar) # When redesigned, optionsMenu should only be on the parent #if not self.isChild: # optionsMenu = self.makeOptionsMenu(menubar) optionsMenu = self.makeOptionsMenu(menubar) helpMenu = self.makeHelpMenu(menubar) menubar.pack(fill=X) # Method to generate a "File" menu def makeFileMenu(self, menubar): fileButton = Menubutton(menubar, text='File', bg=self._frmeColor) fileButton.pack(side=LEFT, padx=2) fileButton.menu = Menu(fileButton, tearoff=0) # fileButton.menu.add_command(label="Open...", command=self.pfopen) if self._showExecuteButton: fileButton.menu.add_command(label="Execute", command=self.execute) if self.isChild: fileButton.menu.entryconfigure(0, state=DISABLED) saqlbl ="Save" if self._useSimpleAutoClose: saqlbl += " & Quit" fileButton.menu.add_command(label=saqlbl, command=self.saveAndClose) if not self.isChild: fileButton.menu.add_command(label="Save As...", command=self.saveAs) fileButton.menu.add_separator() fileButton.menu.add_command(label=self._defaultsButtonTitle, command=self.unlearn) fileButton.menu.add_separator() if not self._useSimpleAutoClose: fileButton.menu.add_command(label="Close", command=self.closeGui) fileButton.menu.add_command(label="Cancel", command=self.abort) # Associate the menu with the menu button fileButton["menu"] = fileButton.menu return fileButton def _updateOpen(self): # Get new data flist = self._getOpenChoices() # Delete old choices if self._numOpenMenuItems > 0: self._openMenu.delete(0, self._numOpenMenuItems-1) # Add all new choices self._numOpenMenuItems = len(flist) if self._numOpenMenuItems > 0: for ff in flist: if ff[-3:] == '...': self._openMenu.add_separator() self._numOpenMenuItems += 1 self._openMenu.add_radiobutton(label=ff, command=self.pfopen, variable=self._openMenuChoice, indicatoron=0) # value=ff) ... (same as label) self._openMenuChoice.set(0) # so nothing has check mark next to it else: showwarning(title="No Files To Open", message="No extra "+ \ 'parameter files found for task "'+self.taskName+'".') def _getOpenChoices(self): """ Get the current list of file name choices for the Open button. This is meant for subclasses to override. """ return [] # Method to generate an "Open" menu def makeOpenMenu(self, menubar): self._openMenuChoice = StringVar() # this is used till GUI closes self._numOpenMenuItems = 1 # see dummy openBtn = Menubutton(menubar, text='Open...', bg=self._frmeColor) openBtn.bind("", self.printOpenInfo) openBtn.pack(side=LEFT, padx=2) openBtn.menu = Menu(openBtn, tearoff=0, postcommand=self._updateOpen) openBtn.menu.bind("", self.printOpenInfo) openBtn.menu.add_radiobutton(label=' ', # dummy, no command variable=self._openMenuChoice) # value=fname ... (same as label) if self.isChild: openBtn.menu.entryconfigure(0, state=DISABLED) # Associate the menu with the menu button openBtn["menu"] = openBtn.menu # Keep a ref for ourselves self._openMenu = openBtn.menu return openBtn # Method to generate the "Options" menu for the parent EPAR only def makeOptionsMenu(self, menubar): # Set up the menu for the various choices they have self._helpChoice = StringVar() if self._showHelpInBrowser: self._helpChoice.set("BROWSER") else: self._helpChoice.set("WINDOW") if self._showSaveCloseOnExec: self._execChoice = IntVar() self._execChoice.set(int(self._saveAndCloseOnExec)) optionButton = Menubutton(menubar, text="Options", bg=self._frmeColor) optionButton.pack(side=LEFT, padx=2) optionButton.menu = Menu(optionButton, tearoff=0) optionButton.menu.add_radiobutton(label="Display Task Help in a Window", value="WINDOW", command=self.setHelpType, variable=self._helpChoice) optionButton.menu.add_radiobutton(label="Display Task Help in a Browser", value="BROWSER", command=self.setHelpType, variable=self._helpChoice) if self._showExecuteButton and self._showSaveCloseOnExec: optionButton.menu.add_separator() optionButton.menu.add_checkbutton(label="Save and Close on Execute", command=self.setExecOpt, variable=self._execChoice) if self._showFlaggingChoice: self._flagChoice = IntVar() self._flagChoice.set(int(self._flagNonDefaultVals)) optionButton.menu.add_separator() optionButton.menu.add_checkbutton(label="Flag Non-default Values", command=self.setFlagOpt, variable=self._flagChoice) # Associate the menu with the menu button optionButton["menu"] = optionButton.menu return optionButton def capTaskName(self): """ Return task name with first letter capitalized. """ return self.taskName[:1].upper() + self.taskName[1:] def makeHelpMenu(self, menubar): button = Menubutton(menubar, text='Help', bg=self._frmeColor) button.bind("", self.printHelpInfo) button.pack(side=RIGHT, padx=2) button.menu = Menu(button, tearoff=0) button.menu.bind("", self.printHelpInfo) button.menu.add_command(label=self.capTaskName()+" Help", command=self.showTaskHelp) button.menu.add_command(label=self._appName+" Help", command=self.eparHelp) button.menu.add_separator() button.menu.add_command(label='Show '+self._appName+' Log', command=self.showLogHist) button["menu"] = button.menu return button # Method to set up the action buttons # Create the buttons in an order for good navigation def buttonBox(self, top): box = Frame(top, bg=self._bboxColor, bd=1, relief=SUNKEN) # When the Button is exited, the information clears, and the # Button goes back to the nonactive color. top.bind("", self.clearInfo) # Execute the task if self._showExecuteButton: buttonExecute = Button(box, text="Execute", bg=self._bboxColor, relief=RAISED, command=self.execute, highlightbackground=self._bboxColor) buttonExecute.pack(side=LEFT, padx=5, pady=7) buttonExecute.bind("", self.printExecuteInfo) if not self._useSimpleAutoClose: # separate this button from the others - it's unusual strut = Label(box, text="", bg=self._bboxColor) strut.pack(side=LEFT, padx=20) # EXECUTE button is disabled for child windows if self.isChild: buttonExecute.configure(state=DISABLED) # Save the parameter settings and exit from epar saqlbl ="Save" if self._useSimpleAutoClose: saqlbl += " & Quit" btn = Button(box, text=saqlbl, relief=RAISED, command=self.saveAndClose, bg=self._bboxColor, highlightbackground=self._bboxColor) btn.pack(side=LEFT, padx=5, pady=7) btn.bind("", self.printSaveQuitInfo) # Unlearn all the parameter settings (set back to the defaults) buttonUnlearn = Button(box, text=self._defaultsButtonTitle, relief=RAISED, command=self.unlearn, bg=self._bboxColor, highlightbackground=self._bboxColor) if self._showExtraHelpButton: buttonUnlearn.pack(side=LEFT, padx=5, pady=7) else: buttonUnlearn.pack(side=RIGHT, padx=5, pady=7) buttonUnlearn.bind("", self.printUnlearnInfo) # Buttons to close versus abort this edit session. if not self._useSimpleAutoClose: buttonClose = Button(box, text="Close", relief=RAISED, command=self.closeGui, bg=self._bboxColor, highlightbackground=self._bboxColor) buttonClose.pack(side=LEFT, padx=5, pady=7) buttonClose.bind("", self.printCloseInfo) buttonAbort = Button(box, text="Cancel", bg=self._bboxColor, relief=RAISED, command=self.abort, highlightbackground=self._bboxColor) buttonAbort.pack(side=LEFT, padx=5, pady=7) buttonAbort.bind("", self.printAbortInfo) # Generate the Help button if self._showExtraHelpButton: buttonHelp = Button(box, text=self.capTaskName()+" Help", relief=RAISED, command=self.showTaskHelp, bg=self._bboxColor, highlightbackground=self._bboxColor) buttonHelp.pack(side=RIGHT, padx=5, pady=7) buttonHelp.bind("", self.printHelpInfo) # Pack box.pack(fill=X, expand=FALSE) def setExecOpt(self, event=None): self._saveAndCloseOnExec = bool(self._execChoice.get()) def setFlagOpt(self, event=None): self._flagNonDefaultVals = bool(self._flagChoice.get()) for entry in self.entryNo: entry.setIsFlagging(self._flagNonDefaultVals, True) def setHelpType(self, event=None): """ Determine which method of displaying the help pages was chosen by the user. WINDOW displays in a task generated scrollable window. BROWSER invokes the task's HTML help pages and displays in a browser. """ self._showHelpInBrowser = bool(self._helpChoice.get() == "BROWSER") def eparHelp(self, event=None): self._showAnyHelp('epar') def showTaskHelp(self, event=None): self._showAnyHelp('task') def showParamHelp(self, parName): self._showAnyHelp('task', tag=parName) def showLogHist(self, event=None): self._showAnyHelp('log') # # Define flyover help text associated with the action buttons # def clearInfo(self, event): self.showStatus("") def printHelpInfo(self, event): self.showStatus("Display the help page", cat=TIP) def printUnlearnInfo(self, event): self.showStatus("Set all parameter values to their default settings", cat=TIP) def printSaveQuitInfo(self, event): if self._useSimpleAutoClose: self.showStatus("Save current entries and exit this edit session", cat=TIP) else: self.showStatus("Save the current entries to "+ \ self._taskParsObj.getFilename(), cat=TIP) def printOpenInfo(self, event): self.showStatus( "Load and edit parameter values from a user-specified file", cat=TIP) def printCloseInfo(self, event): self.showStatus("Close this edit session. Save first?", cat=TIP) def printAbortInfo(self, event): self.showStatus( "Abort this edit session, discarding any unsaved changes.",cat=TIP) def printExecuteInfo(self, event): if self._saveAndCloseOnExec: self.showStatus( "Execute the task, and save and exit this edit session", cat=TIP) else: self.showStatus("Execute the task; this window will remain open", cat=TIP) # Process invalid input values and invoke a query dialog def processBadEntries(self, badEntriesList, taskname, canCancel=True): badEntriesString = "Task " + taskname.upper() + " --\n" \ "Invalid values have been entered.\n\n" \ "Parameter Bad Value Reset Value\n" for i in range (len(badEntriesList)): badEntriesString = badEntriesString + \ "%15s %10s %10s\n" % (badEntriesList[i][0], \ badEntriesList[i][1], badEntriesList[i][2]) if canCancel: badEntriesString += '\n"OK" to continue using'+ \ ' the reset values, or "Cancel" to re-enter values?\n' else: badEntriesString += \ "\n All invalid values will return to their 'Reset Value'.\n" # Invoke the modal message dialog if canCancel: return askokcancel("Notice", badEntriesString) else: return showwarning("Notice", badEntriesString) def hasUnsavedChanges(self): """ Determine if there are any edits in the GUI that have not yet been saved (e.g. to a file). This needs to be overridden by a subclass. In the meantime, just default (on the safe side) to everything being ready-to-save. """ return True def closeGui(self, event=None): self.saveAndClose(askBeforeSave=True, forceClose=True) # SAVE/QUIT: save the parameter settings and exit epar def saveAndClose(self, event=None, askBeforeSave=False, forceClose=False): # First, see if we can/should skip the save doTheSave = True if askBeforeSave: if self.hasUnsavedChanges(): doTheSave = askyesno('Save?', 'Save before closing?') else: # no unsaved changes, so no need to save OR even to prompt doTheSave = False # no need to save OR prompt # first save the child parameters, aborting save if # invalid entries were encountered if doTheSave and self.checkSetSaveChildren(): return # Save all the entries and verify them, keeping track of the # invalid entries which have been reset to their original input values self.badEntriesList = None if doTheSave: self.badEntriesList = self.checkSetSaveEntries() # Note, there is a BUG here - if they hit Cancel, the save to # file has occurred anyway (they may not care) - need to refactor. # If there were invalid entries, prepare the message dialog if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.taskName) if not ansOKCANCEL: return # If there were no invalid entries or the user says OK, continue... # Save any GUI settings we care about. This is a good time to do so # even if the window isn't closing, but especially if it is. self._saveGuiSettings() # Done saving. Only close the window if we are running in that mode. if not (self._useSimpleAutoClose or forceClose): return # Remove the main epar window self.top.focus_set() self.top.withdraw() # If not a child window, quit the entire session if not self.isChild: self.top.destroy() self.top.quit() # Declare the global variables so they can be updated global CHILDX global CHILDY # Reset to the start location CHILDX = PARENTX CHILDY = PARENTY # OPEN: load parameter settings from a user-specified file def pfopen(self, event=None): """ Load the parameter settings from a user-specified file. Any epar changes here should be coordinated with the corresponding tpar pfopen function. """ raise NotImplementedError("EditParDialog is not to be used directly") def _getSaveAsFilter(self): """ Return a string to be used as the filter arg to the save file dialog during Save-As. Override for more specific behavior. """ return "*.*" def _saveAsPreSave_Hook(self, fnameToBeUsed): """ Allow a subclass any specific checks right before the save. """ return None def _saveAsPostSave_Hook(self, fnameToBeUsed): """ Allow a subclass any specific checks right after the save. """ return None # SAVE AS: save the parameter settings to a user-specified file def saveAs(self, event=None): """ Save the parameter settings to a user-specified file. Any changes here must be coordinated with the corresponding tpar save_as function. """ self.debug('Clicked Save as...') # On Linux Pers..Dlg causes the cwd to change, so get a copy of current curdir = os.getcwd() # The user wishes to save to a different name writeProtChoice = self._writeProtectOnSaveAs if capable.OF_TKFD_IN_EPAR: # Prompt using native looking dialog fname = asksaveasfilename(parent=self.top, title='Save Parameter File As', defaultextension=self._defSaveAsExt, initialdir=os.path.dirname(self._getSaveAsFilter())) else: # Prompt. (could use tkinter's FileDialog, but this one is prettier) # initWProtState is only used in the 1st call of a session from . import filedlg fd = filedlg.PersistSaveFileDialog(self.top, "Save Parameter File As", self._getSaveAsFilter(), initWProtState=writeProtChoice) if fd.Show() != 1: fd.DialogCleanup() os.chdir(curdir) # in case file dlg moved us return fname = fd.GetFileName() writeProtChoice = fd.GetWriteProtectChoice() fd.DialogCleanup() if not fname: return # canceled # First check the child parameters, aborting save if # invalid entries were encountered if self.checkSetSaveChildren(): os.chdir(curdir) # in case file dlg moved us return # Run any subclass-specific steps right before the save self._saveAsPreSave_Hook(fname) # Verify all the entries (without save), keeping track of the invalid # entries which have been reset to their original input values self.badEntriesList = self.checkSetSaveEntries(doSave=False) # If there were invalid entries, prepare the message dialog if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.taskName) if not ansOKCANCEL: os.chdir(curdir) # in case file dlg moved us return # If there were no invalid entries or the user says OK, finally # save to their stated file. Since we have already processed the # bad entries, there should be none returned. mstr = "TASKMETA: task="+self.taskName+" package="+self.pkgName if self.checkSetSaveEntries(doSave=True, filename=fname, comment=mstr, set_ro=writeProtChoice, overwriteRO=True): os.chdir(curdir) # in case file dlg moved us raise Exception("Unexpected bad entries for: "+self.taskName) # Run any subclass-specific steps right after the save self._saveAsPostSave_Hook(fname) os.chdir(curdir) # in case file dlg moved us # EXECUTE: save the parameter settings and run the task def execute(self, event=None): self.debug('Clicked Execute') # first save the child parameters, aborting save if # invalid entries were encountered if self.checkSetSaveChildren(): return # If we are only executing (no save and close) do so here and return if not self._saveAndCloseOnExec: # First check the parameter values self.badEntriesList = self.checkSetSaveEntries(doSave=False) # If there were invalid entries, show the message dialog if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.taskName) if not ansOKCANCEL: return self.showStatus("Task "+self.taskName+" is running...", keep=2) self._executed = True # note for later use self.runTask() return # Now save the parameter values of the parent self.badEntriesList = self.checkSetSaveEntries() # If there were invalid entries in the parent epar dialog, prepare # the message dialog if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.taskName) if not ansOKCANCEL: return # If there were no invalid entries or the user said OK # Save any GUI settings we care about since window is closing self._saveGuiSettings() # Remove the main epar window self.top.focus_set() self.top.withdraw() self.top.destroy() print("\nTask "+self.taskName+" is running...\n") # Before running the task, clear any already-handled exceptions that # will be erroneously picked up by the task's logger utility. # This is temporary. Remove this line when logging is fixed. try: sys.exc_clear() # not present in PY3K except AttributeError: pass # Run the task try: self._executed = True # note for later use self.runTask() finally: self.top.quit() # Declare the global variables so they can be updated global CHILDX global CHILDY # Reset to the start location CHILDX = PARENTX CHILDY = PARENTY # ABORT: abort this epar session def abort(self, event=None): # Declare the global variables so they can be updated global CHILDX global CHILDY # Reset to the start location CHILDX = PARENTX CHILDY = PARENTY # Give focus back to parent window and abort self.top.focus_set() self.top.withdraw() self._canceled = True # note for later use # Do not destroy the window, just hide it for now. # This is so EXECUTE will not get an error - properly use Mediator. #self.top.destroy() if not self.isChild: self.top.destroy() self.top.quit() # UNLEARN: unlearn all the parameters by setting their values # back to the system default def unlearn(self, event=None): self.debug('Clicked Unlearn') # Reset the values of the parameters self.unlearnAllEntries(self.top.f.canvas.entries) self.freshenFocus() # HTMLHELP: invoke the HTML help def htmlHelp(self, helpString=None, title=None, istask=False, tag=None): """ Pop up the help in a browser window. By default, this tries to show the help for the current task. With the option arguments, it can be used to show any help string. """ # Check the help string. If it turns out to be a URL, launch that, # if not, dump it to a quick and dirty tmp html file to make it # presentable, and pass that file name as the URL. if not helpString: helpString = self.getHelpString(self.pkgName+'.'+self.taskName) if not title: title = self.taskName lwr = helpString.lower() if lwr.startswith("http:") or lwr.startswith("https:") or \ lwr.startswith("file:"): url = helpString if tag and url.find('#') < 0: url += '#'+tag # print('LAUNCHING: '+url) # DBG irafutils.launchBrowser(url, subj=title) else: # Write it to a temp HTML file to display (fd, fname) = tempfile.mkstemp(suffix='.html', prefix='editpar_') os.close(fd) f = open(fname, 'w') if istask and self._knowTaskHelpIsHtml: f.write(helpString) else: f.write(''+title+'\n') f.write('

'+title+'

\n') f.write('
\n'+helpString+'\n
') f.close() irafutils.launchBrowser("file://"+fname, subj=title) def _showAnyHelp(self, kind, tag=None): """ Invoke task/epar/etc. help and put the page in a window. This same logic is used for GUI help, task help, log msgs, etc. """ # sanity check assert kind in ('epar', 'task', 'log'), 'Unknown help kind: '+str(kind) #----------------------------------------- # See if they'd like to view in a browser #----------------------------------------- if self._showHelpInBrowser or (kind == 'task' and self._knowTaskHelpIsHtml): if kind == 'epar': self.htmlHelp(helpString=self._appHelpString, title='Parameter Editor Help') if kind == 'task': self.htmlHelp(istask=True, tag=tag) if kind == 'log': self.htmlHelp(helpString='\n'.join(self._msgHistory), title=self._appName+' Event Log') return #----------------------------------------- # Now try to pop up the regular Tk window #----------------------------------------- wins = {'epar':self.eparHelpWin, 'task':self.irafHelpWin, 'log': self.logHistWin, } window = wins[kind] try: if window.state() != NORMAL: window.deiconify() window.tkraise() return except (AttributeError, TclError): pass #--------------------------------------------------------- # That didn't succeed (window is still None), so build it #--------------------------------------------------------- if kind == 'epar': self.eparHelpWin = self.makeHelpWin(self._appHelpString, title='Parameter Editor Help') if kind == 'task': # Acquire the task help as a string # Need to include the package name for the task to # avoid name conflicts with tasks from other packages. WJH self.irafHelpWin = self.makeHelpWin(self.getHelpString( self.pkgName+'.'+self.taskName)) if kind == 'log': self.logHistWin = self.makeHelpWin('\n'.join(self._msgHistory), title=self._appName+' Event Log') def canceled(self): """ Did the user click Cancel? (or close us via the window manager) """ return self._canceled def executed(self): """ Did the user click Execute? """ return self._executed # Get the task help in a string def getHelpString(self, taskname): """ Provide a task-specific help string. """ return self._taskParsObj.getHelpAsString() # Set up the help dialog (browser) def makeHelpWin(self, helpString, title="Parameter Editor Help Browser"): # Generate a new Toplevel window for the browser # hb = Toplevel(self.top, bg="SlateGray3") hb = Toplevel(self.top, bg=None) hb.title(title) hb.iconLabel = title # Set up the Menu Bar hb.menubar = Frame(hb, relief=RIDGE, borderwidth=0) hb.menubar.button = Button(hb.menubar, text="Close", relief=RAISED, command=hb.destroy) hb.menubar.button.pack() hb.menubar.pack(side=BOTTOM, padx=5, pady=5) # Define the Frame for the scrolling Listbox hb.frame = Frame(hb, relief=RIDGE, borderwidth=1) # Attach a vertical Scrollbar to the Frame hb.frame.vscroll = Scrollbar(hb.frame, orient=VERTICAL, width=11, relief=SUNKEN, activerelief=RAISED, takefocus=FALSE) # Define the Listbox and setup the Scrollbar hb.frame.list = Listbox(hb.frame, relief=FLAT, height=25, width=80, takefocus=FALSE, selectmode=SINGLE, selectborderwidth=0) hb.frame.list['yscrollcommand'] = hb.frame.vscroll.set hb.frame.vscroll['command'] = hb.frame.list.yview hb.frame.vscroll.pack(side=RIGHT, fill=Y) hb.frame.list.pack(side=TOP, expand=TRUE, fill=BOTH) hb.frame.pack(side=TOP, fill=BOTH, expand=TRUE) # Insert each line of the helpString onto the Frame listing = helpString.split('\n') for line in listing: # Filter the text *** DO THIS A BETTER WAY *** line = line.replace("\x0e", "") line = line.replace("\x0f", "") line = line.replace("\f", "") # Insert the text into the Listbox hb.frame.list.insert(END, line) # When the Listbox appears, the listing will be at the beginning y = hb.frame.vscroll.get()[0] hb.frame.list.yview(int(y)) # enable Page Up/Down keys scroll = hb.frame.list.yview_scroll hb.bind('', lambda event, fs=scroll: fs(1, "pages")) hb.bind('', lambda event, fs=scroll: fs(-1, "pages")) # Position this dialog relative to the parent hb.geometry("+%d+%d" % (self.top.winfo_rootx() + HELPX, self.top.winfo_rooty() + HELPY)) return hb def validate(self): return 1 def setAllEntriesFromParList(self, aParList, updateModel=False): """ Set all the parameter entry values in the GUI to the values in the given par list. If 'updateModel' is True, the internal param list will be updated to the new values as well as the GUI entries (slower and not always necessary). Note the corresponding TparDisplay method. """ # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # we may modify members if len(aParList) != len(theParamList): showwarning(message="Attempting to set parameter values from a "+ \ "list of different length ("+str(len(aParList))+ \ ") than the number shown here ("+ \ str(len(theParamList))+"). Be aware.", title="Parameter List Length Mismatch") # LOOP THRU GUI PAR LIST for i in range(self.numParams): par = theParamList[i] if par.type == "pset": continue # skip PSET's for now gui_entry = self.entryNo[i] # Set the value in the paramList before setting it in the GUI # This may be in the form of a list, or an IrafParList (getValue) if isinstance(aParList, list): # Since "aParList" can have them in different order and number # than we do, we'll have to first find the matching param. found = False for newpar in aParList: if newpar.name==par.name and newpar.scope==par.scope: par.set(newpar.value) # same as .get(native=1,prompt=0) found = True break # Now see if newpar was found in our list if not found: pnm = par.name if len(par.scope): pnm = par.scope+'.'+par.name raise UnfoundParamError('Error - Unfound Parameter! \n\n'+\ 'Expected parameter "'+pnm+'" for task "'+ \ self.taskName+'". \nThere may be others...') else: # assume has getValue() par.set(aParList.getValue(par.name, native=1, prompt=0)) # gui holds a str, but par.value is native; conversion occurs gui_entry.forceValue(par.value, noteEdited=False) # no triggers yet if updateModel: # Update the model values via checkSetSaveEntries self.badEntriesList = self.checkSetSaveEntries(doSave=False) # If there were invalid entries, prepare the message dialog if self.badEntriesList: self.processBadEntries(self.badEntriesList, self.taskName, canCancel=False) def unlearnAllEntries(self, master): """ Method to "unlearn" all the parameter entry values in the GUI and set the parameter back to the default value """ for entry in self.entryNo: entry.unlearnValue() def getValue(self, name, scope=None, native=False): """ Return current par value from the GUI. This does not do any validation, and it it not necessarily the same value saved in the model, which is always behind the GUI setting, in time. This is NOT to be used to get all the values - it would not be efficient. """ # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # NOTE: If par scope is given, it will be used, otherwise it is # assumed to be unneeded and the first name-match is returned. fullName = basicpar.makeFullName(scope, name) # Loop over the parameters to find the requested par for i in range(self.numParams): par = theParamList[i] # IrafPar or subclass entry = self.entryNo[i] # EparOption or subclass if par.fullName() == fullName or \ (scope is None and par.name == name): if native: return entry.convertToNative(entry.choice.get()) else: return entry.choice.get() # We didn't find the requested par raise RuntimeError('Could not find par: "'+fullName+'"') # Read, save, and validate the entries def checkSetSaveEntries(self, doSave=True, filename=None, comment=None, fleeOnBadVals=False, allowGuiChanges=True, set_ro=False, overwriteRO=False): self.badEntries = [] asNative = self._taskParsObj.knowAsNative() # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # Loop over the parameters to obtain the modified information for i in range(self.numParams): par = theParamList[i] # IrafPar or subclass entry = self.entryNo[i] # EparOption or subclass # Cannot change an entry if it is a PSET, just skip if par.type == "pset": continue # get current state of par in the gui value = entry.choice.get() # Set new values for changed parameters - a bit tricky, # since changes that weren't followed by a return or # tab have not yet been checked. If we eventually # use a widget that can check all changes, we will # only need to check the isChanged flag. if par.isChanged() or value != entry.previousValue: # CHECK: Verify the value. If its invalid (and allowGuiChanges), # the value will be converted to its original valid value. # Maintain a list of the reset values for user notification. # Always call entryCheck, no matter what type of _taskParsObj, # since entryCheck can do some basic type checking. failed = False if entry.entryCheck(repair=allowGuiChanges): failed = True self.badEntries.append([entry.name, value, entry.choice.get()]) if fleeOnBadVals: return self.badEntries # See if we need to do a more serious validity check elif self._taskParsObj.canPerformValidation(): # if we are planning to save in native type, test that way if asNative: try: value = entry.convertToNative(value) except: failed = True prev = entry.previousValue self.badEntries.append([entry.name, value, prev]) if fleeOnBadVals: return self.badEntries if allowGuiChanges: entry.choice.set(prev) # now try the val in it's validator if not failed: valOK, prev = self._taskParsObj.tryValue(entry.name, value, scope=par.scope) if not valOK: failed = True self.badEntries.append([entry.name,str(value),prev]) if fleeOnBadVals: return self.badEntries if allowGuiChanges: entry.choice.set(prev) # get value again in case it changed - this version IS valid value = entry.choice.get() if asNative: value = entry.convertToNative(value) # SET: Update the task parameter (also does the conversion # from string) self._taskParsObj.setParam(par.name, value, scope=par.scope, check=0, idxHint=i) # SAVE: Save results to the given file if doSave: self.debug('Saving...') out = self._doActualSave(filename, comment, set_ro=set_ro, overwriteRO=overwriteRO) if len(out): self.showStatus(out, keep=2) # inform user on saves return self.badEntries def _doActualSave(self, fname, comment, set_ro=False): """ Here we call the method on the _taskParsObj to do the actual save. Return a string result to be printed to the screen. """ # do something like # return self._taskParsObj.saveParList(filename=fname, comment=comment) raise NotImplementedError("EditParDialog is not to be used directly") def checkSetSaveChildren(self, doSave=True): """Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure. """ if self.isChild: return # Need to get all the entries and verify them. # Save the children in backwards order to coincide with the # display of the dialogs (LIFO) for n in range (len(self.top.childList)-1, -1, -1): self.badEntriesList = self.top.childList[n]. \ checkSetSaveEntries(doSave=doSave) if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.top.childList[n].taskName) if not ansOKCANCEL: return self.badEntriesList # If there were no invalid entries or the user says OK, # close down the child and increment to the next child self.top.childList[n].top.focus_set() self.top.childList[n].top.withdraw() del self.top.childList[n] # all windows saved successfully return def _pushMessages(self): """ Internal callback used to make sure the msg list keeps moving. """ # This continues to get itself called until no msgs are left in list. self.showStatus('') if len(self._statusMsgsToShow) > 0: self.top.after(200, self._pushMessages) def debug(self, msg): """ Convenience function. Use showStatus without puting into GUI. """ self.showStatus(msg, cat=DBG) def showStatus(self, msg, keep=0, cat=None): """ Show the given status string, but not until any given delay from the previous message has expired. keep is a time (secs) to force the message to remain without being overwritten or cleared. cat is a string category used only in the historical log. """ # prep it, space-wise msg = msg.strip() if len(msg) > 0: # right here is the ideal place to collect a history of messages forhist = msg if cat: forhist = '['+cat+'] '+msg forhist = time.strftime("%a %H:%M:%S")+': '+forhist self._msgHistory.append(forhist) # now set the spacing msg = ' '+msg # stop here if it is a category not shown in the GUI if cat == DBG: return # see if we can show it now = time.time() if now >= self._leaveStatusMsgUntil: # we are clear, can show a msg # first see if this msg is '' - if so we will show an important # waiting msg instead of the '', and then pop it off our list if len(msg) < 1 and len(self._statusMsgsToShow) > 0: msg, keep = self._statusMsgsToShow[0] # overwrite both args del self._statusMsgsToShow[0] # now actuall print the status out to the status widget self.top.status.config(text = msg) # reset our delay flag self._leaveStatusMsgUntil = 0 if keep > 0: self._leaveStatusMsgUntil = now + keep else: # there is a previous message still up, is this one important? if len(msg) > 0 and keep > 0: # Uh-oh, this is an important message that we don't want to # simply skip, but on the other hand we can't show it yet... # So we add it to _statusMsgsToShow and show it later (asap) if (msg,keep) not in self._statusMsgsToShow: if len(self._statusMsgsToShow) < 7: self._statusMsgsToShow.append( (msg,keep) ) # tuple # kick off timer loop to get this one pushed through if len(self._statusMsgsToShow) == 1: self._pushMessages() else: # should never happen, but just in case print("Lost message!: "+msg+" (too far behind...)") # Run the task def runTask(self): # Use the run method of the IrafTask class # Set mode='h' so it does not prompt for parameters (like IRAF epar) # Also turn on parameter saving try: self._taskParsObj.run(mode='h', _save=1) except taskpars.NoExecError as nee: # catch only this, let all else thru showwarning(message="No way found to run task\n\n"+\ str(nee), title="Can Not Run Task") stsci.tools-3.4.12/lib/stsci/tools/eparoption.py0000644001120100020070000011140613112074217023353 0ustar jhunkSTSCI\science00000000000000"""eparoption.py: module for defining the various parameter display options to be used for the parameter editor task. The widget that is used for entering the parameter value is the variant. Instances should be created using the eparOptionFactory function defined at the end of the module. Parameter types: string - Entry widget *gcur - NOT IMPLEMENTED AT THIS TIME ukey - NOT IMPLEMENTED AT THIS TIME pset - Action button real - Entry widget int - Entry widget boolean - Radiobutton widget array real - NOT IMPLEMENTED AT THIS TIME array int - NOT IMPLEMENTED AT THIS TIME Enumerated lists - Menubutton/Menu widget $Id$ M.D. De La Pena, 1999 August 05 """ from __future__ import absolute_import, division, print_function # confidence high # System level modules import sys, string from . import capable PY3K = sys.version_info[0] > 2 if capable.OF_GRAPHICS: if PY3K: from tkinter import * from tkinter.filedialog import askdirectory, askopenfilename else: from Tkinter import * from tkFileDialog import askdirectory, askopenfilename else: StringVar = None # Are we using X? (see description of logic in pyraf's wutil.py) USING_X = True if sys.platform == 'darwin': junk = ",".join(sys.path) USING_X = junk.lower().find('/pyobjc') < 0 del junk # Constants MAXLIST = 15 MAXLINES = 100 XSHIFT = 110 DSCRPTN_FLAG = ' (***)' class EparOption(object): """EparOption base class Implementation for a specific parameter type must implement the makeInputWidget method and must create an attribute `entry' with the base widget created. The entry widget is used for focus setting and automatic scrolling. doScroll is a callback to do the scrolling when tab changes focus. """ # Chosen option choiceClass = StringVar def __init__(self, master, statusBar, paramInfo, defaultParamInfo, doScroll, fieldWidths, defaultsVerb, bg, indent=False, helpCallbackObj=None, mainGuiObj=None): # Connect to the information/status Label self.status = statusBar # Hook to allow scroll when this widget gets focus self.doScroll = doScroll # Track selection at the last FocusOut event self.lastSelection = (0,END) # A new Frame is created for each parameter entry self.master = master self.bkgColor = bg self.master_frame = Frame(self.master, bg=self.bkgColor) self.paramInfo = paramInfo self.defaultParamInfo = defaultParamInfo self.defaultsVerb = defaultsVerb self.inputWidth = fieldWidths.get('inputWidth') self.valueWidth = fieldWidths.get('valueWidth') self.promptWidth = fieldWidths.get('promptWidth') self.choice = self.choiceClass(self.master_frame) self.name = self.paramInfo.name self.value = self.paramInfo.get(field = "p_filename", native = 0, prompt = 0) self.previousValue = self.value self._editedCallbackObj = None self._helpCallbackObj = helpCallbackObj self._mainGuiObj = mainGuiObj self._lastWidgetEditedVal = None self._flagNonDefaultVals = False self._flaggedColor = "red" # DISABLE any indent for now - not sure why but this causes odd text # field sizes in other (unrelated and unindented) parameters... Maybe # because it messes with the total width of the window... if 0 and indent: self.spacer = Label(self.master_frame, anchor=W, takefocus=0, text="", width=3, bg=self.bkgColor) self.spacer.pack(side=LEFT, fill=X, expand=TRUE) # Generate the input label if self.paramInfo.get(field = "p_mode") == "h": self.inputLabel = Label(self.master_frame, anchor = W, text = "("+self.getShowName()+")", width = self.inputWidth, bg=self.bkgColor) else: self.inputLabel = Label(self.master_frame, anchor = W, text = self.getShowName(), width = self.inputWidth, bg=self.bkgColor) self.inputLabel.pack(side = LEFT, fill = X, expand = TRUE) # Get the prompt string and determine if special handling is needed # Use the prompt/description from the default version, in case they # have edited theirs - this is not editable - see ticket #803 self.prompt = self.defaultParamInfo.get(field="p_prompt", native=0, prompt=0) # Check the prompt to determine how many lines of valid text exist lines = self.prompt.split("\n") nlines = len(lines) promptLines = " " + lines[0] infoLines = "" blankLineNo = MAXLINES if (nlines > 1): # Keep all the lines of text before the blank line for the prompt for i in range(1, nlines): ntokens = lines[i].split() if ntokens != []: promptLines = "\n".join([promptLines, lines[i]]) else: blankLineNo = i break self._flagged = False if promptLines.endswith(DSCRPTN_FLAG): promptLines = promptLines[:-len(DSCRPTN_FLAG)] self._flagged = True fgColor = "black" # turn off this red coloring for the DSCRPTN_FLAG - see #803 # if self._flagged: fgColor = "red" # Generate the prompt label self.promptLabel = Label(self.master_frame, anchor=W, fg=fgColor, text=promptLines, width=self.promptWidth, bg=self.bkgColor) self.promptLabel.pack(side=RIGHT, fill=X, expand=TRUE) # Settings for subclasses to override in the makeInputWidget method self.isSelectable = True # ie widget has text (num or str) to select # Default is none of items on popup menu are activated # These can be changed by the makeInputWidget method to customize # behavior for each widget. self.browserEnabled = DISABLED self.clearEnabled = DISABLED self.unlearnEnabled = DISABLED self.helpEnabled = DISABLED if self._helpCallbackObj is not None: self.helpEnabled = NORMAL # Generate the input widget depending upon the datatype self.makeInputWidget() # print(self.name, self.__class__) # DBG line self.entry.bind('', self.focusOut, "+") self.entry.bind('', self.focusIn, "+") # Trap keys that leave field and validate entry self.entry.bind('', self.entryCheck, "+") self.entry.bind('', self.entryCheck, "+") self.entry.bind('', self.entryCheck, "+") self.entry.bind('', self.entryCheck, "+") self.entry.bind('', self.entryCheck, "+") self.entry.bind('', self.entryCheck, "+") try: # special shift-tab binding needed for (some? all?) linux systems self.entry.bind('', self.entryCheck, "+") except TclError: # Ignore exception here, the binding can't be relevant # if ISO_Left_Tab is unknown. pass # Bind the right button to a popup menu of choices if USING_X: self.entry.bind('', self.popupChoices) else: self.entry.bind('', self.popupChoices) # Pack the parameter entry Frame self.master_frame.pack(side=TOP, fill=X, ipady=1) # If there is more text associated with this entry, join all the # lines of text with the blank line. This is the "special" text # information. if (blankLineNo < (nlines - 1)): # Put the text after the blank line into its own Frame self.master.infoText = Frame(self.master) for j in range(blankLineNo + 1, nlines): ntokens = lines[j].split() if ntokens != []: infoLines = "\n".join([infoLines, lines[j]]) else: break # Assign the informational text to the label and pack self.master.infoText.label = Label(self.master.infoText, text = infoLines, anchor = W, bg = self.bkgColor) self.master.infoText.label.pack(side = LEFT) self.master.infoText.pack(side = TOP, anchor = W) def setFlaggedColor(self, colorstr): self._flaggedColor = colorstr def setIsFlagging(self, isFlagging, redrawImmediately): self._flagNonDefaultVals = isFlagging if redrawImmediately: if self._flagNonDefaultVals: curVal = self.choice.get() else: # otheriwse we don't care; use None; is ok and faster curVal = None self.flagThisPar(curVal, True) def getShowName(self): """ Return the name to be shown in the GUI for this par/option. """ return self.name def extraBindingsForSelectableText(self): """ Collect in 1 place the bindings needed for watchTextSelection() """ # See notes in watchTextSelection self.entry.bind('', self.watchTextSelection, "+") self.entry.bind('', self.watchTextSelection, "+") self.entry.bind('', self.watchTextSelection, "+") self.entry.bind('', self.watchTextSelection, "+") self.entry.bind('', self.watchTextSelection, "+") self.entry.bind('', self.watchTextSelection, "+") def convertToNative(self, aVal): """ The basic type is natively a string. """ return None if aVal is None else str(aVal) def focusOut(self, event=None): """Clear selection (if text is selected in this widget)""" # do nothing if this isn't a text-enabled widget if not self.isSelectable: return if self.entryCheck(event) is None: # Entry value is OK # Save the last selection so it can be restored if we # come right back to this widget. Then clear the selection # before moving on. entry = self.entry try: if not entry.selection_present(): self.lastSelection = None else: self.lastSelection = (entry.index(SEL_FIRST), entry.index(SEL_LAST)) except AttributeError: pass if USING_X and sys.platform == 'darwin': pass # do nothing here - we need it left selected for cut/paste else: entry.selection_clear() else: return "break" def watchTextSelection(self, event=None): """ Callback used to see if there is a new text selection. In certain cases we manually add the text to the clipboard (though on most platforms the correct behavior happens automatically). """ # Note that this isn't perfect - it is a key click behind when # selections are made via shift-arrow. If this becomes important, it # can likely be fixed with after(). if self.entry.selection_present(): # entry must be text entry type i1 = self.entry.index(SEL_FIRST) i2 = self.entry.index(SEL_LAST) if i1 >= 0 and i2 >= 0 and i2 > i1: sel = self.entry.get()[i1:i2] # Add to clipboard on platforms where necessary. print('selected: "'+sel+'"') # The following is unneeded if the selected text stays selected # when focus is lost or another app is bought to the forground. # if sel and USING_X and sys.platform == 'darwin': # clipboard_helper.put(sel, 'PRIMARY') def focusIn(self, event=None): """Select all text (if applicable) on taking focus""" try: # doScroll returns false if the call was ignored because the # last call also came from this widget. That avoids unwanted # scrolls and text selection when the focus moves in and out # of the window. if self.doScroll(event): self.entry.selection_range(0, END) # select all text in widget else: # restore selection to what it was on the last FocusOut if self.lastSelection: self.entry.selection_range(*self.lastSelection) except AttributeError: pass # Check the validity of the entry # If valid, changes the value of the parameter (note that this # is a copy, so change is not permanent until save) # Parameter change also sets the isChanged flag. def entryCheck(self, event=None, repair=True): # Make sure the input is legal value = self.choice.get() try: if value != self.previousValue: # THIS will likely get into IrafPar's _coerceOneValue() self.paramInfo.set(value) # fire any applicable triggers, whether value has changed or not self.widgetEdited(action='entry') return None except ValueError as exceptionInfo: # Reset the entry to the previous (presumably valid) value if repair: self.choice.set(self.previousValue) self.status.bell() errorMsg = str(exceptionInfo) if event is not None: self.status.config(text = errorMsg) # highlight the text again and terminate processing so # focus stays in this widget self.focusIn(event) return "break" def widgetEdited(self, event=None, val=None, action='entry', skipDups=True): """ A general method for firing any applicable triggers when a value has been set. This is meant to be easily callable from any part of this class (or its subclasses), so that it can be called as soon as need be (immed. on click?). This is smart enough to be called multiple times, itself handling the removal of any/all duplicate successive calls (unless skipDups is False). If val is None, it will use the GUI entry's current value via choice.get(). See teal.py for a description of action. """ # be as lightweight as possible if obj doesn't care about this stuff if not self._editedCallbackObj and not self._flagNonDefaultVals: return # get the current value curVal = val # take this first, if it is given if curVal is None: curVal = self.choice.get() # do any flagging self.flagThisPar(curVal, False) # see if this is a duplicate successive call for the same value if skipDups and curVal==self._lastWidgetEditedVal: return # pull trigger if not self._editedCallbackObj: return self._editedCallbackObj.edited(self.paramInfo.scope, self.paramInfo.name, self.previousValue, curVal, action) # for our duplicate checker self._lastWidgetEditedVal = curVal def focus_set(self, event=None): """Set focus to input widget""" self.entry.focus_set() # Generate the the input widget as appropriate to the parameter datatype def makeInputWidget(self): pass def popupChoices(self, event=None): """Popup right-click menu of special parameter operations Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled instance attributes to determine which items are available. """ # don't bother if all items are disabled if NORMAL not in (self.browserEnabled, self.clearEnabled, self.unlearnEnabled, self.helpEnabled): return self.menu = Menu(self.entry, tearoff = 0) if self.browserEnabled != DISABLED: # Handle file and directory in different functions (tkFileDialog) if capable.OF_TKFD_IN_EPAR: self.menu.add_command(label = "File Browser", state = self.browserEnabled, command = self.fileBrowser) self.menu.add_command(label = "Directory Browser", state = self.browserEnabled, command = self.dirBrowser) # Handle file and directory in the same function (filedlg) else: self.menu.add_command(label = "File/Directory Browser", state = self.browserEnabled, command = self.fileBrowser) self.menu.add_separator() self.menu.add_command(label = "Clear", state = self.clearEnabled, command = self.clearEntry) self.menu.add_command(label = self.defaultsVerb, state = self.unlearnEnabled, command = self.unlearnValue) self.menu.add_command(label = 'Help', state = self.helpEnabled, command = self.helpOnParam) # Get the current y-coordinate of the Entry ycoord = self.entry.winfo_rooty() # Get the current x-coordinate of the cursor xcoord = self.entry.winfo_pointerx() - XSHIFT # Display the Menu as a popup as it is not associated with a Button self.menu.tk_popup(xcoord, ycoord) def fileBrowser(self): """Invoke a tkinter file dialog""" if capable.OF_TKFD_IN_EPAR: fname = askopenfilename(parent=self.entry, title="Select File") else: from . import filedlg self.fd = filedlg.PersistLoadFileDialog(self.entry, "Select File", "*") if self.fd.Show() != 1: self.fd.DialogCleanup() return fname = self.fd.GetFileName() self.fd.DialogCleanup() if not fname: return # canceled self.choice.set(fname) # don't select when we go back to widget to reduce risk of # accidentally typing over the filename self.lastSelection = None def dirBrowser(self): """Invoke a tkinter directory dialog""" if capable.OF_TKFD_IN_EPAR: fname = askdirectory(parent=self.entry, title="Select Directory") else: raise NotImplementedError('Fix popupChoices() logic.') if not fname: return # canceled self.choice.set(fname) # don't select when we go back to widget to reduce risk of # accidentally typing over the filename self.lastSelection = None def clearEntry(self): """Clear just this Entry""" self.entry.delete(0, END) def forceValue(self, newVal, noteEdited=False): """Force-set a parameter entry to the given value""" if newVal is None: newVal = "" self.choice.set(newVal) if noteEdited: self.widgetEdited(val=newVal, skipDups=False) # WARNING: the value of noteEdited really should be false (default) # in most cases because we need the widgetEdited calls to be arranged # at one level higher than we are (single param). We need to allow the # caller to first loop over all eparoptions, setting their values # without triggering anything, and THEN go through again and run any # triggers. def unlearnValue(self): """Unlearn a parameter value by setting it back to its default""" defaultValue = self.defaultParamInfo.get(field = "p_filename", native = 0, prompt = 0) self.choice.set(defaultValue) def helpOnParam(self): """ Try to display help specific to this parameter. """ if self._helpCallbackObj is not None: self._helpCallbackObj.showParamHelp(self.name) def setEditedCallbackObj(self, ecbo): """ Sets a callback object to be triggred when this option/parameter is edited. The object is expected to have an "edited()" method which takes args as shown where it is called in widgetEdited. """ self._editedCallbackObj = ecbo def setActiveState(self, active): """ Use this to enable or disable (grey out) a parameter. """ st = DISABLED if active: st = NORMAL self.entry.configure(state=st) self.inputLabel.configure(state=st) self.promptLabel.configure(state=st) def flagThisPar(self, currentVal, force): """ If this par's value is different from the default value, it is here that we flag it somehow as such. This basic version simply makes the surrounding text red (or returns it to normal). May be overridden. Leave force at False if you want to allow this mehtod to make smart time-saving decisions about when it can skip recoloring because it is already the right color. Set force to true if you think we got out of sync and need to be fixed. """ # Get out ASAP if we can if (not force) and (not self._flagNonDefaultVals): return # handle simple case before comparing values (quick return) if force and not self._flagNonDefaultVals: self._flagged = False self.promptLabel.configure(fg="black") return # Get/format values to compare currentNative = self.convertToNative(currentVal) defaultNative = self.convertToNative(self.defaultParamInfo.value) # par.value is same as par.get(native=1,prompt=0) # flag or unflag as needed if currentNative != defaultNative: if not self._flagged or force: self._flagged = True self.promptLabel.configure(fg=self._flaggedColor) # was red else: # same as def if self._flagged or force: self._flagged = False self.promptLabel.configure(fg="black") # ['red','blue','green','purple','yellow','orange','black'] class EnumEparOption(EparOption): def makeInputWidget(self): self.unlearnEnabled = NORMAL self.isSelectable = False # Set the initial value for the button self.choice.set(self.value) # Need to adjust the value width so the menu button is # aligned properly if USING_X: self.valueWidth = self.valueWidth - 4 else: pass # self.valueWidth = self.valueWidth - 0 # looks right on Aqua # Generate the button self.entry = Menubutton(self.master_frame, width = self.valueWidth, text = self.choice.get(), # label relief = RAISED, anchor = W, # alignment textvariable = self.choice, # var to sync indicatoron = 1, takefocus = 1, highlightthickness = 1, activeforeground='black', fg='black', bg=self.bkgColor) self.entry.menu = Menu(self.entry, tearoff=0, postcommand=self.postcmd, fg = 'black', bg=self.bkgColor) # Generate the dictionary of shortcuts using first letter, # second if first not available, etc. self.shortcuts = {} trylist = self.paramInfo.choice underline = {} charset = string.ascii_lowercase + string.digits i = 0 while trylist: trylist2 = [] for option in trylist: # shortcuts dictionary is case-insensitive letter = option[i:i+1].lower() if letter in self.shortcuts: # will try again with next letter trylist2.append(option) elif letter: if letter in charset: self.shortcuts[letter] = option self.shortcuts[letter.upper()] = option underline[option] = i else: # only allow letters, numbers to be shortcuts # keep going in case this is an embedded blank (e.g.) trylist2.append(option) else: # no letters left, so no shortcut for this item underline[option] = -1 trylist = trylist2 i = i+1 # Generate the menu options with shortcuts underlined for option in self.paramInfo.choice: lbl = option if lbl=='-': lbl = ' -' # Tk treats '-' as a separator request self.entry.menu.add_radiobutton(label = lbl, value = option, variable = self.choice, command = self.selected, indicatoron = 0, underline = underline[option]) # set up a pointer from the menubutton back to the menu self.entry['menu'] = self.entry.menu self.entry.pack(side = LEFT) # shortcut keys jump to items for letter in self.shortcuts: self.entry.bind('<%s>' % letter, self.keypress) # Left button sets focus (as well as popping up menu) self.entry.bind('', self.focus_set) def keypress(self, event): """Allow keys typed in widget to select items""" try: self.choice.set(self.shortcuts[event.keysym]) except KeyError: # key not found (probably a bug, since we intend to catch # only events from shortcut keys, but ignore it anyway) pass def postcmd(self): """Make sure proper entry is activated when menu is posted""" value = self.choice.get() try: index = self.paramInfo.choice.index(value) self.entry.menu.activate(index) except ValueError: # initial null value may not be in list pass def selected(self): """They have chosen an enumerated option.""" self.widgetEdited(action='entry') # kick off any checks that need doin # def setActiveState(self, active): # [...] # for i in range(len(self.paramInfo.choice)): # this doesn't seem to # self.entry.menu.entryconfig(i, state=st) # make the menu text grey # [...] class BooleanEparOption(EparOption): def convertToNative(self, aVal): """ Convert to native bool; interpret certain strings. """ if aVal is None: return None if isinstance(aVal, bool): return aVal # otherwise interpret strings return str(aVal).lower() in ('1','on','yes','true') def makeInputWidget(self): self.unlearnEnabled = NORMAL self.isSelectable = False # Need to buffer the value width so the radio buttons and # the adjoining labels are aligned properly self.valueWidth = self.valueWidth + 10 if USING_X: self.padWidth = (self.valueWidth // 2) + 5 # looks right else: self.padWidth = 2 # looks right on Aqua # boolean parameters have 3 values: yes, no & undefined # Just display two choices (but variable may initially be # undefined) self.choice.set(self.value) self.entry = Frame(self.master_frame, relief = FLAT, width = self.valueWidth, takefocus = 1, highlightthickness = 1, bg=self.bkgColor, highlightbackground=self.bkgColor) if not USING_X: spacerL= Label(self.entry, takefocus=0, text="", width=2, bg=self.bkgColor) spacerL.pack(side=LEFT, fill=X, expand=TRUE) self.rbyes = Radiobutton(self.entry, text = "Yes", variable = self.choice, value = "yes", anchor = W, takefocus = 0, underline = 0, bg = self.bkgColor, highlightbackground=self.bkgColor) self.rbyes.pack(side=LEFT, ipadx=self.padWidth) if not USING_X: spacerM= Label(self.entry, takefocus=0, text="", width=3, bg=self.bkgColor) spacerM.pack(side=LEFT, fill=X, expand=TRUE) spacerR = Label(self.entry, takefocus=0, text="", width=2, bg=self.bkgColor) spacerR.pack(side=RIGHT, fill=X, expand=TRUE) self.rbno = Radiobutton(self.entry, text = "No", variable = self.choice, value = "no", anchor = W, takefocus = 0, underline = 0, bg = self.bkgColor, highlightbackground=self.bkgColor) self.rbno.pack(side = RIGHT, ipadx = self.padWidth) self.entry.pack(side = LEFT) # keyboard accelerators # Y/y sets yes, N/n sets no, space toggles selection self.entry.bind('', self.set) self.entry.bind('', self.set) self.entry.bind('', self.unset) self.entry.bind('', self.unset) self.entry.bind('', self.toggle) # When variable changes, make sure widget gets focus self.choice.trace("w", self.trace) # Right-click menu is bound to individual widgets too if USING_X: self.rbno.bind('', self.popupChoices) self.rbyes.bind('', self.popupChoices) else: self.rbno.bind('', self.popupChoices) self.rbyes.bind('', self.popupChoices) spacerM.bind('', self.popupChoices) # Regular selection - allow immediate trigger/check self.rbyes.bind('', self.boolWidgetEditedYes) self.rbno.bind('', self.boolWidgetEditedNo) def trace(self, *args): self.entry.focus_set() # Only needed over widgetEdited because the Yes isn't set yet def boolWidgetEditedYes(self, event=None): self.widgetEdited(val="yes") # Only needed over widgetEdited because the No isn't set yet def boolWidgetEditedNo(self, event=None): self.widgetEdited(val="no") def set(self, event=None): """Set value to Yes""" self.rbyes.select() self.widgetEdited() def unset(self, event=None): """Set value to No""" self.rbno.select() self.widgetEdited() def toggle(self, event=None): """Toggle value between Yes and No""" if self.choice.get() == "yes": self.rbno.select() else: self.rbyes.select() self.widgetEdited() def setActiveState(self, active): st = DISABLED if active: st = NORMAL self.rbyes.configure(state=st) self.rbno.configure(state=st) self.inputLabel.configure(state=st) self.promptLabel.configure(state=st) class StringEparOption(EparOption): def makeInputWidget(self): self.browserEnabled = NORMAL self.clearEnabled = NORMAL self.unlearnEnabled = NORMAL self.choice.set(self.value) self.entry = Entry(self.master_frame, width = self.valueWidth, textvariable = self.choice) # , bg=self.bkgColor) self.entry.pack(side = LEFT, fill = X, expand = TRUE) # self.extraBindingsForSelectableText() # do not use yet class ActionEparButton(EparOption): def getButtonLabel(self): return self.value def makeInputWidget(self): # self.choice.set(self.value) self.browserEnabled = DISABLED self.clearEnabled = DISABLED self.unlearnEnabled = DISABLED self.helpEnabled = NORMAL # Need to adjust the value width so the button is aligned properly if USING_X: self.valueWidth = self.valueWidth - 3 else: self.valueWidth = self.valueWidth - 2 self.isSelectable = False # Generate the button self.entry = Button(self.master_frame, width = self.valueWidth, text = self.getButtonLabel(), relief = RAISED, background = self.bkgColor, highlightbackground = self.bkgColor, command = self.clicked) self.entry.pack(side = LEFT) def clicked(self): raise NotImplementedError('clicked() must be implemented') def unlearnValue(self): pass # widget class that works for numbers and arrays of numbers class NumberEparOption(EparOption): def convertToNative(self, aVal): """ Natively as an int. """ if aVal in (None, '', 'None', 'NONE', 'INDEF'): return None return int(aVal) def notNull(self, value): vsplit = value.split() return vsplit.count("INDEF") != len(vsplit) def makeInputWidget(self): self.browserEnabled = DISABLED self.clearEnabled = NORMAL self.unlearnEnabled = NORMAL # Retain the original parameter value in case of bad entry self.previousValue = self.value self.choice.set(self.value) self.entry = Entry(self.master_frame, width = self.valueWidth, textvariable = self.choice) #, bg=self.bkgColor) self.entry.pack(side = LEFT) # self.extraBindingsForSelectableText() # do not use yet # Check the validity of the entry # Note that doing this using the parameter set method automatically # checks max, min, special value (INDEF, parameter indirection), etc. def entryCheck(self, event = None, repair = True): """ Ensure any INDEF entry is uppercase, before base class behavior """ valupr = self.choice.get().upper() if valupr.strip() == 'INDEF': self.choice.set(valupr) return EparOption.entryCheck(self, event, repair = repair) # numeric widget class specific to floats class FloatEparOption(NumberEparOption): def convertToNative(self, aVal): """ Natively as a float. """ if aVal in (None, '', 'None', 'NONE', 'INDEF'): return None return float(aVal) # EparOption values for non-string types _eparOptionDict = { "b": BooleanEparOption, "r": FloatEparOption, "R": FloatEparOption, "d": FloatEparOption, "I": NumberEparOption, "i": NumberEparOption, "z": ActionEparButton, "ar": FloatEparOption, "ai": NumberEparOption, } def eparOptionFactory(master, statusBar, param, defaultParam, doScroll, fieldWidths, plugIn=None, editedCallbackObj=None, helpCallbackObj=None, mainGuiObj=None, defaultsVerb="Default", bg=None, indent=False, flagging=False, flaggedColor=None): """Return EparOption item of appropriate type for the parameter param""" # Allow passed-in overrides if plugIn is not None: eparOption = plugIn # If there is an enumerated list, regardless of datatype use EnumEparOption elif param.choice is not None: eparOption = EnumEparOption else: # Use String for types not in the dictionary eparOption = _eparOptionDict.get(param.type, StringEparOption) # Create it eo = eparOption(master, statusBar, param, defaultParam, doScroll, fieldWidths, defaultsVerb, bg, indent=indent, helpCallbackObj=helpCallbackObj, mainGuiObj=mainGuiObj) eo.setEditedCallbackObj(editedCallbackObj) eo.setIsFlagging(flagging, False) if flaggedColor: eo.setFlaggedColor(flaggedColor) return eo stsci.tools-3.4.12/lib/stsci/tools/filedlg.py0000644001120100020070000003710013112074217022577 0ustar jhunkSTSCI\science00000000000000#### # Class FileDialog # # Purpose # ------- # # FileDialog's are widgets that allow one to select file names by # clicking on file names, directory names, filters, etc. # # Standard Usage # -------------- # # F = FileDialog(widget, some_title, some_filter) # if F.Show() != 1: # F.DialogCleanup() # return # file_name = F.GetFileName() # F.DialogCleanup() #### """ $Id$ """ from __future__ import absolute_import, division # confidence high import sys, os from . import capable PY3K = sys.version_info[0] > 2 if PY3K: from subprocess import getoutput else: from commands import getoutput if capable.OF_GRAPHICS: if PY3K: import tkinter as TKNTR else: import Tkinter as TKNTR from . import alert from .dialog import * else: ModalDialog = object class FileDialog(ModalDialog): # constructor lastWrtPrtChoice = None def __init__(self, widget, title, filter="*", initWProtState=None): """ Supply parent widget, title, filter, and initWProtState (True or False). Set initWProtState to None to hide the write-protect check-box. """ self.widget = widget self.filter = filter.strip() self.orig_dir = os.getcwd() self.cwd = os.getcwd() # the logical current working directory self.showChmod = initWProtState is not None # normally we use persistence for lastWrtPrtChoice; use this 1st time if FileDialog.lastWrtPrtChoice is None: FileDialog.lastWrtPrtChoice = initWProtState # Allow a start-directory as part of the given filter if self.filter.find(os.sep) >= 0: self.cwd = os.path.dirname(self.filter) self.filter = os.path.basename(self.filter) # do this second! # main Dialog code Dialog.__init__(self, widget) # setup routine called back from Dialog def SetupDialog(self): # directory label self.dirFrame = Frame(self.top) self.dirFrame['relief'] = 'raised' self.dirFrame['bd'] = '2' self.dirFrame.pack({'expand':'no', 'side':'top', 'fill':'both'}) self.dirLabel = Label(self.dirFrame) self.dirLabel["text"] = "Directory:" self.dirLabel.pack({'expand':'no', 'side':'left', 'fill':'none'}) # editable filter self.filterFrame = Frame(self.top) self.filterFrame['relief'] = 'raised' self.filterFrame['bd'] = '2' self.filterFrame.pack({'expand':'no', 'side':'top', 'fill':'both'}) self.filterLabel = Label(self.filterFrame) self.filterLabel["text"] = "Filter:" self.filterLabel.pack({'expand':'no', 'side':'left', 'fill':'none'}) self.filterEntry = Entry(self.filterFrame) self.filterEntry.bind('', self.FilterReturnKey) self.filterEntry["width"] = "40" self.filterEntry["relief"] = "ridge" self.filterEntry.pack({'expand':'yes', 'side':'right', 'fill':'x'}) self.filterEntry.insert(0, self.filter) # the directory and file listboxes self.listBoxFrame = Frame(self.top) self.listBoxFrame['relief'] = 'raised' self.listBoxFrame['bd'] = '2' self.listBoxFrame.pack({'expand':'yes', 'side' :'top', 'pady' :'2', 'padx': '0', 'fill' :'both'}) self.CreateDirListBox() self.CreateFileListBox() self.UpdateListBoxes() # write-protect option junk = FileDialog.lastWrtPrtChoice if junk is None: junk = 0 self.wpVar = IntVar(value=junk) # use class attr if self.showChmod: self.writeProtFrame = Frame(self.top) self.writeProtFrame['relief'] = 'raised' self.writeProtFrame['bd'] = '2' self.writeProtFrame.pack({'expand':'no','side':'top','fill':'both'}) self.wpButton = Checkbutton(self.writeProtFrame, text="Write-protect after save", command=self.wrtPrtClick, var=self.wpVar) self.wpButton.pack({'expand':'no', 'side':'left'}) # editable filename self.fileNameFrame = Frame(self.top) self.fileNameFrame.pack({'expand':'no', 'side':'top', 'fill':'both'}) self.fileNameFrame['relief'] = 'raised' self.fileNameFrame['bd'] = '2' self.fileNameLabel = Label(self.fileNameFrame) self.fileNameLabel["text"] = "File:" self.fileNameLabel.pack({'expand':'no', 'side':'left', 'fill':'none'}) self.fileNameEntry = Entry(self.fileNameFrame) self.fileNameEntry["width"] = "40" self.fileNameEntry["relief"] = "ridge" self.fileNameEntry.pack({'expand':'yes', 'side':'right', 'fill':'x', 'pady': '2'}) self.fileNameEntry.bind('', self.FileNameReturnKey) # buttons - ok, filter, cancel self.buttonFrame = Frame(self.top) self.buttonFrame['relief'] = 'raised' self.buttonFrame['bd'] = '2' self.buttonFrame.pack({'expand':'no', 'side':'top', 'fill':'x'}) self.okButton = Button(self.buttonFrame) self.okButton["text"] = "OK" self.okButton["command"] = self.OkPressed self.okButton["width"] = 8 self.okButton.pack({'expand':'yes', 'pady':'2', 'side':'left'}) self.filterButton = Button(self.buttonFrame) self.filterButton["text"] = "Filter" self.filterButton["command"] = self.FilterPressed self.filterButton["width"] = 8 self.filterButton.pack({'expand':'yes', 'pady':'2', 'side':'left'}) button = Button(self.buttonFrame) button["text"] = "Cancel" button["command"] = self.CancelPressed button["width"] = 8 button.pack({'expand':'yes', 'pady':'2', 'side':'left'}) # create the directory list box def CreateDirListBox(self): frame = Frame(self.listBoxFrame) frame.pack({'expand':'yes', 'side' :'left', 'pady' :'1', 'fill' :'both'}) frame['relief'] = 'raised' frame['bd'] = '2' filesFrame = Frame(frame) filesFrame['relief'] = 'flat' filesFrame['bd'] = '2' filesFrame.pack({'side':'top', 'expand':'no', 'fill':'x'}) label = Label(filesFrame) label['text'] = 'Directories:' label.pack({'side':'left', 'expand':'yes', 'anchor':'w', 'fill':'none'}) scrollBar = Scrollbar(frame, {'orient':'vertical'}) scrollBar.pack({'expand':'no', 'side':'right', 'fill':'y'}) self.dirLb = Listbox(frame, {'yscroll':scrollBar.set}) self.dirLb.pack({'expand':'yes', 'side' :'top', 'pady' :'1', 'fill' :'both'}) self.dirLb.bind('<1>', self.DoSelection) self.dirLb.bind('', self.DoDoubleClickDir) scrollBar['command'] = self.dirLb.yview # create the files list box def CreateFileListBox(self): frame = Frame(self.listBoxFrame) frame['relief'] = 'raised' frame['bd'] = '2' frame.pack({'expand':'yes', 'side' :'left', 'pady' :'1', 'padx' :'1', 'fill' :'both'}) filesFrame = Frame(frame) filesFrame['relief'] = 'flat' filesFrame['bd'] = '2' filesFrame.pack({'side':'top', 'expand':'no', 'fill':'x'}) label = Label(filesFrame) label['text'] = 'Files:' label.pack({'side':'left', 'expand':'yes', 'anchor':'w', 'fill':'none'}) scrollBar = Scrollbar(frame, {'orient':'vertical'}) scrollBar.pack({'side':'right', 'fill':'y'}) self.fileLb = Listbox(frame, {'yscroll':scrollBar.set}) self.fileLb.pack({'expand':'yes', 'side' :'top', 'pady' :'0', 'fill' :'both'}) self.fileLb.bind('<1>', self.DoSelection) self.fileLb.bind('', self.DoDoubleClickFile) scrollBar['command'] = self.fileLb.yview # update the listboxes and directory label after a change of directory def UpdateListBoxes(self): cwd = self.cwd self.fileLb.delete(0, self.fileLb.size()) filter = self.filterEntry.get() # '*' will list recurively, we don't want that. if filter == '*': filter = '' cmd = "/bin/ls " + os.path.join(cwd, filter) cmdOutput = getoutput(cmd) files = cmdOutput.split("\n") files.sort() for i in range(len(files)): if os.path.isfile(os.path.join(cwd, files[i])): self.fileLb.insert('end', os.path.basename(files[i])) self.dirLb.delete(0, self.dirLb.size()) files = os.listdir(cwd) if cwd != '/': files.append('..') files.sort() for i in range(len(files)): if os.path.isdir(os.path.join(cwd, files[i])): self.dirLb.insert('end', files[i]) self.dirLabel['text'] = "Directory:" + self.cwd_print() # selection handlers def DoSelection(self, event): lb = event.widget field = self.fileNameEntry field.delete(0, AtEnd()) field.insert(0, os.path.join(self.cwd_print(), lb.get(lb.nearest(event.y)))) if TKNTR.TkVersion >= 4.0: lb.select_clear(0, "end") lb.select_anchor(lb.nearest(event.y)) else: lb.select_clear() lb.select_from(lb.nearest(event.y)) def DoDoubleClickDir(self, event): lb = event.widget self.cwd = os.path.join(self.cwd, lb.get(lb.nearest(event.y))) self.UpdateListBoxes() def DoDoubleClickFile(self, event): self.OkPressed() def OkPressed(self): self.TerminateDialog(1) def wrtPrtClick(self): FileDialog.lastWrtPrtChoice = self.wpVar.get() # update class attr def FileNameReturnKey(self, event): # if its a relative path then include the cwd in the name name = self.fileNameEntry.get().strip() if not os.path.isabs(os.path.expanduser(name)): self.fileNameEntry.delete(0, 'end') self.fileNameEntry.insert(0, os.path.join(self.cwd_print(), name)) self.okButton.flash() self.OkPressed() def FilterReturnKey(self, event): filter = self.filterEntry.get().strip() self.filterEntry.delete(0, 'end') self.filterEntry.insert(0, filter) self.filterButton.flash() self.UpdateListBoxes() def FilterPressed(self): self.UpdateListBoxes() def CancelPressed(self): self.TerminateDialog(0) def GetFileName(self): return self.fileNameEntry.get() def GetWriteProtectChoice(self): return bool(self.wpVar.get()) # return the logical current working directory in a printable form # ie. without all the X/.. pairs. The easiest way to do this is to # chdir to cwd and get the path there. def cwd_print(self): os.chdir(self.cwd) p = os.getcwd() os.chdir(self.orig_dir) return p #### # Class LoadFileDialog # # Purpose # ------- # # Specialisation of FileDialog for loading files. #### class LoadFileDialog(FileDialog): def __init__(self, master, title, filter): FileDialog.__init__(self, master, title, filter) self.top.title(title) def OkPressed(self): fileName = self.GetFileName() if os.path.exists(fileName) == 0: msg = 'File ' + fileName + ' not found.' errorDlg = alert.ErrorDialog(self.top, msg) errorDlg.Show() errorDlg.DialogCleanup() return FileDialog.OkPressed(self) #### # Class SaveFileDialog # # Purpose # ------- # # Specialisation of FileDialog for saving files. #### class SaveFileDialog(FileDialog): def __init__(self, master, title, filter): FileDialog.__init__(self, master, title, filter) self.top.title(title) def OkPressed(self): fileName = self.GetFileName() if os.path.exists(fileName) == 1: msg = 'File ' + fileName + ' exists.\nDo you wish to overwrite it?' warningDlg = alert.WarningDialog(self.top, msg) if warningDlg.Show() == 0: warningDlg.DialogCleanup() return warningDlg.DialogCleanup() FileDialog.OkPressed(self) #---------------------------------------------------------------------------- ############################################################################# # # Class: PersistFileDialog # Purpose: Essentially the same as FileDialog, except this class contains # a class variable (lastAccessedDir) which keeps track of the last # directory from which a file was chosen. Subsequent invocations of # this dialog in the same Python session will start up in the last # directory where a file was successfully chosen, rather than in the # current working directory. # # History: M.D. De La Pena, 08 June 2000 # ############################################################################# class PersistFileDialog(FileDialog): # Define a class variable to track the last accessed directory lastAccessedDir = None def __init__(self, widget, title, filter="*", initWProtState=None): FileDialog.__init__(self, widget, title, filter, initWProtState) # If the last accessed directory were not None, start up # the file browser in the last accessed directory. if self.__class__.lastAccessedDir: self.cwd = self.__class__.lastAccessedDir # Override the OkPressed method from the parent in order to # update the class variable. def OkPressed(self): self.__class__.lastAccessedDir = self.cwd_print() self.TerminateDialog(1) ############################################################################# # # Class: PersistLoadFileDialog # Purpose: Essentially the same as LoadFileDialog, except this class invokes # PersistFileDialog instead of FileDialog. # # History: M.D. De La Pena, 08 June 2000 # ############################################################################# class PersistLoadFileDialog(PersistFileDialog): def __init__(self, master, title, filter): PersistFileDialog.__init__(self, master, title, filter) self.top.title(title) def OkPressed(self): fileName = self.GetFileName() if os.path.exists(fileName) == 0: msg = 'File ' + fileName + ' not found.' errorDlg = alert.ErrorDialog(self.top, msg) errorDlg.Show() errorDlg.DialogCleanup() return PersistFileDialog.OkPressed(self) ############################################################################# # # Class: PersistSaveFileDialog # Purpose: Essentially the same as SaveFileDialog, except this class invokes # PersistFileDialog instead of FileDialog. # ############################################################################# class PersistSaveFileDialog(PersistFileDialog): def __init__(self, master, title, filter, initWProtState=None): PersistFileDialog.__init__(self, master, title, filter, initWProtState) self.top.title(title) def OkPressed(self): fileName = self.GetFileName() if os.path.exists(fileName) == 1: msg = 'File ' + fileName + ' exists.\nDo you wish to overwrite it?' warningDlg = alert.WarningDialog(self.top, msg) if warningDlg.Show() == 0: warningDlg.DialogCleanup() return warningDlg.DialogCleanup() PersistFileDialog.OkPressed(self) stsci.tools-3.4.12/lib/stsci/tools/fileutil.py0000644001120100020070000013400013241163620023003 0ustar jhunkSTSCI\science00000000000000"""fileutil.py -- General file functions These were initially designed for use with PyDrizzle. These functions only rely on booleans 'yes' and 'no', PyFITS and readgeis. This file contains both IRAF-compatibility and general file access functions. General functions included are:: DEGTORAD(deg), RADTODEG(rad) DIVMOD(num,val) convertDate(date) Converts the DATE date string into a decimal year. decimal_date(date-obs,time-obs=None) Converts the DATE-OBS (with optional TIME-OBS) string into a decimal year buildRootname(filename, extn=None, extlist=None) buildNewRootname(filename, ext=None) parseFilename(filename) Splits a input name into a tuple containing (filename, group/extension) getKeyword(filename, keyword, default=None, handle=None) getHeader(filename,handle=None) Return a copy of the PRIMARY header, along with any group/extension header, for this filename specification. getExtn(fimg,extn=None) Returns a copy of the specified extension with data from PyFITS object 'fimg' for desired file. updateKeyword(filename, key, value) openImage(filename,mode='readonly',memmap=False,fitsname=None) Opens file and returns PyFITS object. It will work on both FITS and GEIS formatted images. findFile(input) checkFileExists(filename,directory=None) removeFile(inlist): Utility function for deleting a list of files or a single file. rAsciiLine(ifile) Returns the next non-blank line in an ASCII file. readAsnTable(input,output=None,prodonly=yes) Reads an association (ASN) table and interprets inputs and output. The 'prodonly' parameter specifies whether to use products as inputs or not; where 'prodonly=no' specifies to only use EXP as inputs. isFits(input) - returns (True|False, fitstype), fitstype is one of ('simple', 'mef', 'waiver') IRAF compatibility functions (abbreviated list):: osfn(filename) Convert IRAF virtual path name to OS pathname show(*args, **kw) Print value of IRAF or OS environment variables time() Print current time and date access(filename) Returns true if file exists, where filename can include IRAF variables """ from __future__ import division, print_function # confidence high from . import numerixenv numerixenv.check() import astropy from . import stpyfits as fits from . import readgeis from . import convertwaiveredfits import datetime import copy import os import re import shutil import sys import time as _time import numpy as np from distutils.version import LooseVersion PY3K = sys.version_info[0] > 2 if PY3K: string_types = str else: string_types = basestring ASTROPY_VER_GE13 = LooseVersion(astropy.__version__) >= LooseVersion('1.3') # Environment variable handling - based on iraffunctions.py # define INDEF, yes, no, EOF, Verbose, userIrafHome # Set up IRAF-compatible Boolean values yes = True no = False # List of supported default file types # It will look for these file types by default # when trying to recognize input rootnames. EXTLIST = ['_crj.fits', '_flt.fits', '_flc.fits', '_sfl.fits', '_cal.fits', '_raw.fits', '.c0h', '.hhh', '_c0h.fits', '_c0f.fits', '_c1f.fits', '.fits'] BLANK_ASNDICT = { 'output': None, 'order': [], 'members': { 'abshift': no, 'dshift': no } } def help(): print(__doc__) ################# # # # Generic Functions # # ################# def DEGTORAD(deg): return (deg * np.pi / 180.) def RADTODEG(rad): return (rad * 180. / np.pi) def DIVMOD(num,val): if isinstance(num, np.ndarray): # Treat number as numpy object _num = np.remainder(num, val) else: _num = divmod(num, val)[1] return _num def getLTime(): """Returns a formatted string with the current local time.""" _ltime = _time.localtime(_time.time()) tlm_str = _time.strftime('%H:%M:%S (%d/%m/%Y)', _ltime) return tlm_str def getDate(): """Returns a formatted string with the current date.""" _ltime = _time.localtime(_time.time()) date_str = _time.strftime('%Y-%m-%dT%H:%M:%S',_ltime) return date_str def convertDate(date): """Convert DATE string into a decimal year.""" d, t = date.split('T') return decimal_date(d, timeobs=t) def decimal_date(dateobs, timeobs=None): """Convert DATE-OBS (and optional TIME-OBS) into a decimal year.""" year, month, day = dateobs.split('-') if timeobs is not None: hr, min, sec = timeobs.split(':') else: hr, min, sec = 0, 0, 0 rdate = datetime.datetime(int(year), int(month), int(day), int(hr), int(min), int(sec)) dday = (float(rdate.strftime("%j")) + rdate.hour / 24.0 + rdate.minute / (60. * 24) + rdate.second / (3600 * 24.)) / 365.25 ddate = int(year) + dday return ddate def interpretDQvalue(input): """ Converts an integer 'input' into its component bit values as a list of power of 2 integers. For example, the bit value 1027 would return [1, 2, 1024] """ nbits = 16 # We will only support integer values up to 2**128 for iexp in [16, 32, 64, 128]: # Find out whether the input value is less than 2**iexp if (input // (2 ** iexp)) == 0: # when it finally is, we have identified how many bits can be used to # describe this input bitvalue nbits = iexp break # Find out how 'dtype' values are described on this machine a = np.zeros(1, dtype='int16') atype_descr = a.dtype.descr[0][1] # Use this description to build the description we need for our input integer dtype_str = atype_descr[:2] + str(nbits // 8) result = np.zeros(nbits + 1, dtype=dtype_str) # For each bit, determine whether it has been set in the input value or not for n in range(nbits + 1): i = 2 ** n if input & i > 0: # record which bit has been set as the power-of-2 integer result[n] = i # Return the non-zero unique values as a Python list return np.delete(np.unique(result), 0).tolist() def isFits(input): """ Returns -------- isFits: tuple An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and ``fitstype`` are specified as: - ``isfits``: True|False - ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None Notes ----- Input images which do not have a valid FITS filename will automatically result in a return of (False, None). In the case that the input has a valid FITS filename but runs into some error upon opening, this routine will raise that exception for the calling routine/user to handle. """ isfits = False fitstype = None names = ['fits', 'fit', 'FITS', 'FIT'] #determine if input is a fits file based on extension # Only check type of FITS file if filename ends in valid FITS string f = None fileclose = False if isinstance(input, fits.HDUList): isfits = True f = input else: isfits = True in [input.endswith(l) for l in names] # if input is a fits file determine what kind of fits it is #waiver fits len(shape) == 3 if isfits: if not f: try: f = fits.open(input, mode='readonly') fileclose = True except Exception: if f is not None: f.close() raise data0 = f[0].data if data0 is not None: try: if isinstance(f[1], fits.TableHDU): fitstype = 'waiver' except IndexError: fitstype = 'simple' else: fitstype = 'mef' if fileclose: f.close() return isfits, fitstype def buildRotMatrix(theta): _theta = DEGTORAD(theta) _mrot = np.zeros(shape=(2,2), dtype=np.float64) _mrot[0] = (np.cos(_theta), np.sin(_theta)) _mrot[1] = (-np.sin(_theta), np.cos(_theta)) return _mrot ################# # # # Generic File/Header Functions # # ################# def verifyWriteMode(files): """ Checks whether files are writable. It is up to the calling routine to raise an Exception, if desired. This function returns True, if all files are writable and False, if any are not writable. In addition, for all files found to not be writable, it will print out the list of names of affected files. """ # Start by insuring that input is a list of filenames, # if only a single filename has been given as input, # convert it to a list with len == 1. if not isinstance(files, list): files = [files] # Keep track of the name of each file which is not writable not_writable = [] writable = True # Check each file in input list for fname in files: try: f = open(fname,'a') f.close() del f except: not_writable.append(fname) writable = False if not writable: print('The following file(s) do not have write permission!') for fname in not_writable: print(' ', fname) return writable def getFilterNames(header, filternames=None): """ Returns a comma-separated string of filter names extracted from the input header (PyFITS header object). This function has been hard-coded to support the following instruments: ACS, WFPC2, STIS This function relies on the 'INSTRUME' keyword to define what instrument has been used to generate the observation/header. The 'filternames' parameter allows the user to provide a list of keyword names for their instrument, in the case their instrument is not supported. """ # Define the keyword names for each instrument _keydict = { 'ACS': ['FILTER1', 'FILTER2'], 'WFPC2': ['FILTNAM1', 'FILTNAM2'], 'STIS': ['OPT_ELEM', 'FILTER'], 'NICMOS': ['FILTER', 'FILTER2'], 'WFC3': ['FILTER', 'FILTER2'] } # Find out what instrument the input header came from, based on the # 'INSTRUME' keyword if 'INSTRUME' in header: instrument = header['INSTRUME'] else: raise ValueError('Header does not contain INSTRUME keyword.') # Check to make sure this instrument is supported in _keydict if instrument in _keydict: _filtlist = _keydict[instrument] else: _filtlist = filternames # At this point, we know what keywords correspond to the filter names # in the header. Now, get the values associated with those keywords. # Build a list of all filter name values, with the exception of the # blank keywords. Values containing 'CLEAR' or 'N/A' are valid. _filter_values = [] for _key in _filtlist: if _key in header: _val = header[_key] else: _val = '' if _val.strip() != '': _filter_values.append(header[_key]) # Return the comma-separated list return ','.join(_filter_values) def buildNewRootname(filename, extn=None, extlist=None): """ Build rootname for a new file. Use 'extn' for new filename if given, does NOT append a suffix/extension at all. Does NOT check to see if it exists already. Will ALWAYS return a new filename. """ # Search known suffixes to replace ('_crj.fits',...) _extlist = copy.deepcopy(EXTLIST) # Also, add a default where '_dth.fits' replaces # whatever extension was there ('.fits','.c1h',...) #_extlist.append('.') # Also append any user-specified extensions... if extlist: _extlist += extlist for suffix in _extlist: _indx = filename.find(suffix) if _indx > 0: break if _indx < 0: # default to entire rootname _indx = len(filename) if extn is None: extn = '' return filename[:_indx] + extn def buildRootname(filename, ext=None): """ Build a new rootname for an existing file and given extension. Any user supplied extensions to use for searching for file need to be provided as a list of extensions. Examples -------- :: >>> rootname = buildRootname(filename, ext=['_dth.fits']) """ if filename in ['' ,' ', None]: return None fpath, fname = os.path.split(filename) if ext is not None and '_' in ext[0]: froot = os.path.splitext(fname)[0].split('_')[0] else: froot = fname if fpath in ['', ' ', None]: fpath = os.curdir # Get complete list of filenames from current directory flist = os.listdir(fpath) #First, assume given filename is complete and verify # it exists... rootname = None for name in flist: if name == froot: rootname = froot break elif name == froot + '.fits': rootname = froot + '.fits' break # If we have an incomplete filename, try building a default # name and seeing if it exists... # # Set up default list of suffix/extensions to add to rootname _extlist = [] for extn in EXTLIST: _extlist.append(extn) if rootname is None: # Add any user-specified extension to list of extensions... if ext is not None: for i in ext: _extlist.insert(0,i) # loop over all extensions looking for a filename that matches... for extn in _extlist: # Start by looking for filename with exactly # the same case a provided in ASN table... rname = froot + extn for name in flist: if rname == name: rootname = name break if rootname is None: # Try looking for all lower-case filename # instead of a mixed-case filename as required # by the pipeline. rname = froot.lower() + extn for name in flist: if rname == name: rootname = name break if rootname is not None: break # If we still haven't found the file, see if we have the # info to build one... if rootname is None and ext is not None: # Check to see if we have a full filename to start with... _indx = froot.find('.') if _indx > 0: rootname = froot[:_indx] + ext[0] else: rootname = froot + ext[0] if fpath not in ['.', '', ' ', None]: rootname = os.path.join(fpath, rootname) # It will be up to the calling routine to verify # that a valid rootname, rather than 'None', was returned. return rootname def getKeyword(filename, keyword, default=None, handle=None): """ General, write-safe method for returning a keyword value from the header of a IRAF recognized image. Returns the value as a string. """ # Insure that there is at least 1 extension specified... if filename.find('[') < 0: filename += '[0]' _fname, _extn = parseFilename(filename) if not handle: # Open image whether it is FITS or GEIS _fimg = openImage(_fname) else: # Use what the user provides, after insuring # that it is a proper PyFITS object. if isinstance(handle, fits.HDUList): _fimg = handle else: raise ValueError('Handle must be %r object!' % fits.HDUList) # Address the correct header _hdr = getExtn(_fimg, _extn).header try: value = _hdr[keyword] except KeyError: _nextn = findKeywordExtn(_fimg, keyword) try: value = _fimg[_nextn].header[keyword] except KeyError: value = '' if not handle: _fimg.close() del _fimg if value == '': if default is None: value = None else: value = default # NOTE: Need to clean up the keyword.. Occasionally the keyword value # goes right up to the "/" FITS delimiter, and iraf.keypar is incapable # of realizing this, so it incorporates "/" along with the keyword value. # For example, after running "pydrizzle" on the image "j8e601bkq_flt.fits", # the CD keywords look like this: # # CD1_1 = 9.221627430999639E-06/ partial of first axis coordinate w.r.t. x # CD1_2 = -1.0346992614799E-05 / partial of first axis coordinate w.r.t. y # # so for CD1_1, iraf.keypar returns: # "9.221627430999639E-06/" # # So, the following piece of code CHECKS for this and FIXES the string, # very simply by removing the last character if it is a "/". # This fix courtesy of Anton Koekemoer, 2002. elif isinstance(value, string_types): if value[-1:] == '/': value = value[:-1] return value def getHeader(filename, handle=None): """ Return a copy of the PRIMARY header, along with any group/extension header for this filename specification. """ _fname, _extn = parseFilename(filename) # Allow the user to provide an already opened PyFITS object # to derive the header from... # if not handle: # Open image whether it is FITS or GEIS _fimg = openImage(_fname, mode='readonly') else: # Use what the user provides, after insuring # that it is a proper PyFITS object. if isinstance(handle, fits.HDUList): _fimg = handle else: raise ValueError('Handle must be a %r object!' % fits.HDUList) _hdr = _fimg['PRIMARY'].header.copy() # if the data is not in the primary array delete NAXIS # so that the correct value is read from the extension header if _hdr['NAXIS'] == 0: del _hdr['NAXIS'] if not (_extn is None or (_extn.isdigit() and int(_extn) == 0)): # Append correct extension/chip/group header to PRIMARY... #for _card in getExtn(_fimg,_extn).header.ascard: #_hdr.ascard.append(_card) for _card in getExtn(_fimg, _extn).header.cards: _hdr.append(_card) if not handle: # Close file handle now... _fimg.close() del _fimg return _hdr def updateKeyword(filename, key, value,show=yes): """Add/update keyword to header with given value.""" _fname, _extn = parseFilename(filename) # Open image whether it is FITS or GEIS _fimg = openImage(_fname, mode='update') # Address the correct header _hdr = getExtn(_fimg, _extn).header # Assign a new value or add new keyword here. try: _hdr[key] = value except KeyError: if show: print('Adding new keyword ', key, '=', value) _hdr[key] = value # Close image _fimg.close() del _fimg def buildFITSName(geisname): """Build a new FITS filename for a GEIS input image.""" # User wants to make a FITS copy and update it... _indx = geisname.rfind('.') _fitsname = geisname[:_indx] + '_' + geisname[_indx + 1:-1] + 'h.fits' return _fitsname def openImage(filename, mode='readonly', memmap=False, writefits=True, clobber=True, fitsname=None): """ Opens file and returns PyFITS object. Works on both FITS and GEIS formatted images. Notes ----- If a GEIS or waivered FITS image is used as input, it will convert it to a MEF object and only if ``writefits = True`` will write it out to a file. If ``fitsname = None``, the name used to write out the new MEF file will be created using `buildFITSName`. Parameters ---------- filename: str name of input file mode: str mode for opening file based on PyFITS `mode` parameter values memmap: bool switch for using memory mapping, `False` for no, `True` for yes writefits: bool if `True`, will write out GEIS as multi-extension FITS and return handle to that opened GEIS-derived MEF file clobber: bool overwrite previously written out GEIS-derived MEF file fitsname: str name to use for GEIS-derived MEF file, if None and writefits==`True`, will use 'buildFITSName()' to generate one """ from stwcs import updatewcs # Insure that the filename is always fully expanded # This will not affect filenames without paths or # filenames specified with extensions. filename = osfn(filename) # Extract the rootname and extension specification # from input image name _fname, _iextn = parseFilename(filename) # Check whether we have a FITS file and if so what type isfits, fitstype = isFits(_fname) if isfits: if fitstype != 'waiver': # Open the FITS file fimg = fits.open(_fname, mode=mode, memmap=memmap) return fimg else: fimg = convertwaiveredfits.convertwaiveredfits(_fname) #check for the existence of a data quality file _dqname = buildNewRootname(_fname, extn='_c1f.fits') dqexists = os.path.exists(_dqname) if dqexists: try: dqfile = convertwaiveredfits.convertwaiveredfits(_dqname) dqfitsname = buildNewRootname(_dqname, extn='_c1h.fits') except: print("Could not read data quality file %s" % _dqname) if writefits: # User wants to make a FITS copy and update it # using the filename they have provided if fitsname is None: rname = buildNewRootname(_fname) fitsname = buildNewRootname(rname, extn='_c0h.fits') # Write out GEIS image as multi-extension FITS. fexists = os.path.exists(fitsname) if (fexists and clobber) or not fexists: print('Writing out WAIVERED as MEF to ', fitsname) if ASTROPY_VER_GE13: fimg.writeto(fitsname, overwrite=clobber) else: fimg.writeto(fitsname, clobber=clobber) if dqexists: print('Writing out WAIVERED as MEF to ', dqfitsname) if ASTROPY_VER_GE13: dqfile.writeto(dqfitsname, overwrite=clobber) else: dqfile.writeto(dqfitsname, clobber=clobber) # Now close input GEIS image, and open writable # handle to output FITS image instead... fimg.close() del fimg # Image re-written as MEF, now it needs its WCS updated updatewcs.updatewcs(fitsname) fimg = fits.open(fitsname, mode=mode, memmap=memmap) # Return handle for use by user return fimg else: # Input was specified as a GEIS image, but no FITS copy # exists. Read it in with 'readgeis' and make a copy # then open the FITS copy... try: # Open as a GEIS image for reading only fimg = readgeis.readgeis(_fname) except: raise IOError("Could not open GEIS input: %s" % _fname) #check for the existence of a data quality file _dqname = buildNewRootname(_fname, extn='.c1h') dqexists = os.path.exists(_dqname) if dqexists: try: dqfile = readgeis.readgeis(_dqname) dqfitsname = buildFITSName(_dqname) except: print("Could not read data quality file %s" % _dqname) # Check to see if user wanted to update GEIS header. # or write out a multi-extension FITS file and return a handle to it if writefits: # User wants to make a FITS copy and update it # using the filename they have provided if fitsname is None: fitsname = buildFITSName(_fname) # Write out GEIS image as multi-extension FITS. fexists = os.path.exists(fitsname) if (fexists and clobber) or not fexists: print('Writing out GEIS as MEF to ', fitsname) if ASTROPY_VER_GE13: fimg.writeto(fitsname, overwrite=clobber) else: fimg.writeto(fitsname, clobber=clobber) if dqexists: print('Writing out GEIS as MEF to ', dqfitsname) if ASTROPY_VER_GE13: dqfile.writeto(dqfitsname, overwrite=clobber) else: dqfile.writeto(dqfitsname, clobber=clobber) # Now close input GEIS image, and open writable # handle to output FITS image instead... fimg.close() del fimg # Image re-written as MEF, now it needs its WCS updated updatewcs.updatewcs(fitsname) fimg = fits.open(fitsname, mode=mode, memmap=memmap) # Return handle for use by user return fimg def parseFilename(filename): """ Parse out filename from any specified extensions. Returns rootname and string version of extension name. """ # Parse out any extension specified in filename _indx = filename.find('[') if _indx > 0: # Read extension name provided _fname = filename[:_indx] _extn = filename[_indx + 1:-1] else: _fname = filename _extn = None return _fname, _extn def parseExtn(extn=None): """ Parse a string representing a qualified fits extension name as in the output of `parseFilename` and return a tuple ``(str(extname), int(extver))``, which can be passed to `astropy.io.fits` functions using the 'ext' kw. Default return is the first extension in a fits file. Examples -------- :: >>> parseExtn('sci, 2') ('sci', 2) >>> parseExtn('2') ('', 2) >>> parseExtn('sci') ('sci', 1) """ if not extn: return ('', 0) try: lext = extn.split(',') except: return ('', 1) if len(lext) == 1 and lext[0].isdigit(): return ("", int(lext[0])) elif len(lext) == 2: return (lext[0], int(lext[1])) else: return (lext[0], 1) def countExtn(fimg, extname='SCI'): """ Return the number of 'extname' extensions, defaulting to counting the number of SCI extensions. """ closefits = False if isinstance(fimg, string_types): fimg = fits.open(fimg) closefits = True n = 0 for e in fimg: if 'extname' in e.header and e.header['extname'] == extname: n += 1 if closefits: fimg.close() return n def getExtn(fimg, extn=None): """ Returns the PyFITS extension corresponding to extension specified in filename. Defaults to returning the first extension with data or the primary extension, if none have data. If a non-existent extension has been specified, it raises a `KeyError` exception. """ # If no extension is provided, search for first extension # in FITS file with data associated with it. if extn is None: # Set up default to point to PRIMARY extension. _extn = fimg[0] # then look for first extension with data. for _e in fimg: if _e.data is not None: _extn = _e break else: # An extension was provided, so parse it out... if repr(extn).find(',') > 1: if isinstance(extn, tuple): # We have a tuple possibly created by parseExtn(), so # turn it into a list for easier manipulation. _extns = list(extn) if '' in _extns: _extns.remove('') else: _extns = extn.split(',') # Two values given for extension: # for example, 'sci,1' or 'dq,1' try: _extn = fimg[_extns[0], int(_extns[1])] except KeyError: _extn = None for e in fimg: hdr = e.header if ('extname' in hdr and hdr['extname'].lower() == _extns[0].lower() and hdr['extver'] == int(_extns[1])): _extn = e break elif repr(extn).find('/') > 1: # We are working with GEIS group syntax _indx = str(extn[:extn.find('/')]) _extn = fimg[int(_indx)] elif isinstance(extn, string_types): if extn.strip() == '': _extn = None # force error since invalid name was provided # Only one extension value specified... elif extn.isdigit(): # We only have an extension number specified as a string... _nextn = int(extn) else: # We only have EXTNAME specified... _nextn = None if extn.lower() == 'primary': _nextn = 0 else: i = 0 for hdu in fimg: isimg = 'extname' in hdu.header hdr = hdu.header if isimg and extn.lower() == hdr['extname'].lower(): _nextn = i break i += 1 if _nextn < len(fimg): _extn = fimg[_nextn] else: _extn = None else: # Only integer extension number given, or default of 0 is used. if int(extn) < len(fimg): _extn = fimg[int(extn)] else: _extn = None if _extn is None: raise KeyError('Extension %s not found' % extn) return _extn #Revision History: # Nov 2001: findFile upgraded to accept full filenames with paths, # instead of working only on files from current directory. WJH # # Base function for # with optional path. def findFile(input): """Search a directory for full filename with optional path.""" # If no input name is provided, default to returning 'no'(FALSE) if not input: return no # We use 'osfn' here to insure that any IRAF variables are # expanded out before splitting out the path... _fdir, _fname = os.path.split(osfn(input)) if _fdir == '': _fdir = os.curdir try: flist = os.listdir(_fdir) except OSError: # handle when requested file in on a disconnect network store return no _root, _extn = parseFilename(_fname) found = no for name in flist: if name == _root: # Check to see if given extension, if any, exists if _extn is None: found = yes continue else: _split = _extn.split(',') _extnum = None _extver = None if _split[0].isdigit(): _extname = None _extnum = int(_split[0]) else: _extname = _split[0] if len(_split) > 1: _extver = int(_split[1]) else: _extver = 1 f = openImage(_root) f.close() if _extnum is not None: if _extnum < len(f): found = yes del f continue else: del f else: _fext = findExtname(f, _extname, extver=_extver) if _fext is not None: found = yes del f continue return found def checkFileExists(filename, directory=None): """ Checks to see if file specified exists in current or specified directory. Default is current directory. Returns 1 if it exists, 0 if not found. """ if directory is not None: fname = os.path.join(directory,filename) else: fname = filename _exist = os.path.exists(fname) return _exist def copyFile(input, output, replace=None): """Copy a file whole from input to output.""" _found = findFile(output) if not _found or (_found and replace): shutil.copy2(input, output) def _remove(file): # Check to see if file exists. If not, return immediately. if not findFile(file): return if file.find('.fits') > 0: try: os.remove(file) except (IOError, OSError): pass elif file.find('.imh') > 0: # Delete both .imh and .pix files os.remove(file) os.remove(file[:-3] + 'pix') else: # If we have a GEIS image that has separate header # and pixel files which need to be removed. # Assumption: filenames end in '.??h' and '.??d' # os.remove(file) # At this point, we may be deleting a non-image # file, so only verify whether a GEIS hhd or similar # file exists before trying to delete it. if findFile(file[:-1] + 'd'): os.remove(file[:-1] + 'd') def removeFile(inlist): """ Utility function for deleting a list of files or a single file. This function will automatically delete both files of a GEIS image, just like 'iraf.imdelete'. """ if not isinstance(inlist, string_types): # We do have a list, so delete all filenames in list. # Treat like a list of full filenames _ldir = os.listdir('.') for f in inlist: # Now, check to see if there are wildcards which need to be expanded if f.find('*') >= 0 or f.find('?') >= 0: # We have a wild card specification regpatt = f.replace('?', '.?') regpatt = regpatt.replace('*', '.*') _reg = re.compile(regpatt) for file in _ldir: if _reg.match(file): _remove(file) else: # This is just a single filename _remove(f) else: # It must be a string then, so treat as a single filename _remove(inlist) def findKeywordExtn(ft, keyword, value=None): """ This function will return the index of the extension in a multi-extension FITS file which contains the desired keyword with the given value. """ i = 0 extnum = -1 # Search through all the extensions in the FITS object for chip in ft: hdr = chip.header # Check to make sure the extension has the given keyword if keyword in hdr: if value is not None: # If it does, then does the value match the desired value # MUST use 'str.strip' to match against any input string! if hdr[keyword].strip() == value: extnum = i break else: extnum = i break i += 1 # Return the index of the extension which contained the # desired EXTNAME value. return extnum def findExtname(fimg, extname, extver=None): """ Returns the list number of the extension corresponding to EXTNAME given. """ i = 0 extnum = None for chip in fimg: hdr = chip.header if 'EXTNAME' in hdr: if hdr['EXTNAME'].strip() == extname.upper(): if extver is None or hdr['EXTVER'] == extver: extnum = i break i += 1 return extnum def rAsciiLine(ifile): """Returns the next non-blank line in an ASCII file.""" _line = ifile.readline().strip() while len(_line) == 0: _line = ifile.readline().strip() return _line ####################################################### # # # # IRAF environment variable interpretation routines # extracted from PyRAF's 'iraffunction.py' # # These allow IRAF variables to be interpreted without # having to install/use IRAF or PyRAF. # # ####################################################### # ----------------------------------------------------- # private dictionaries: # # _varDict: dictionary of all IRAF cl variables (defined with set name=value) # _tasks: all IRAF tasks (defined with task name=value) # _mmtasks: minimum-match dictionary for tasks # _pkgs: min-match dictionary for all packages (defined with # task name.pkg=value) # _loaded: loaded packages # ----------------------------------------------------- # Will want to enhance this to allow a "bye" function that unloads packages. # That might be done using a stack of definitions for each task. _varDict = {} # module variables that don't get saved (they get # initialized when this module is imported) unsavedVars = [ 'EOF', '_NullFile', '_NullPath', '__builtins__', '__doc__', '__file__', '__name__', '__re_var_match', '__re_var_match2', '__re_var_paren', '_badFormats', '_clearString', '_exitCommands', '_unsavedVarsDict', '_radixDigits', '_re_taskname', '_sttyArgs', 'no', 'yes', 'userWorkingHome' ] _unsavedVarsDict = {} for v in unsavedVars: _unsavedVarsDict[v] = 1 del unsavedVars, v # ----------------------------------------------------- # Miscellaneous access routines: # getVarList: Get list of names of all defined IRAF variables # ----------------------------------------------------- def getVarDict(): """Returns dictionary all IRAF variables.""" return _varDict def getVarList(): """Returns list of names of all IRAF variables.""" return list(_varDict.keys()) # ----------------------------------------------------- # listVars: # list contents of the dictionaries # ----------------------------------------------------- def listVars(prefix="", equals="\t= ", **kw): """List IRAF variables.""" keylist = getVarList() if len(keylist) == 0: print('No IRAF variables defined') else: keylist.sort() for word in keylist: print("%s%s%s%s" % (prefix, word, equals, envget(word))) def untranslateName(s): """Undo Python conversion of CL parameter or variable name.""" s = s.replace('DOT', '.') s = s.replace('DOLLAR', '$') # delete 'PY' at start of name components if s[:2] == 'PY': s = s[2:] s = s.replace('.PY', '.') return s def envget(var, default=None): """Get value of IRAF or OS environment variable.""" if 'pyraf' in sys.modules: #ONLY if pyraf is already loaded, import iraf into the namespace from pyraf import iraf else: # else set iraf to None so it knows to not use iraf's environment iraf = None try: if iraf: return iraf.envget(var) else: raise KeyError except KeyError: try: return _varDict[var] except KeyError: try: return os.environ[var] except KeyError: if default is not None: return default elif var == 'TERM': # Return a default value for TERM # TERM gets caught as it is found in the default # login.cl file setup by IRAF. print("Using default TERM value for session.") return 'xterm' else: raise KeyError("Undefined environment variable `%s'" % var) def osfn(filename): """Convert IRAF virtual path name to OS pathname.""" # Try to emulate the CL version closely: # # - expands IRAF virtual file names # - strips blanks around path components # - if no slashes or relative paths, return relative pathname # - otherwise return absolute pathname if filename is None: return filename ename = Expand(filename) dlist = [part.strip() for part in ename.split(os.sep)] if len(dlist) == 1 and dlist[0] not in [os.curdir, os.pardir]: return dlist[0] # I use str.join instead of os.path.join here because # os.path.join("","") returns "" instead of "/" epath = os.sep.join(dlist) fname = os.path.abspath(epath) # append '/' if relative directory was at end or filename ends with '/' if fname[-1] != os.sep and dlist[-1] in ['', os.curdir, os.pardir]: fname = fname + os.sep return fname def defvar(varname): """Returns true if CL variable is defined.""" if 'pyraf' in sys.modules: #ONLY if pyraf is already loaded, import iraf into the namespace from pyraf import iraf else: # else set iraf to None so it knows to not use iraf's environment iraf = None if iraf: _irafdef = iraf.envget(varname) else: _irafdef = 0 return varname in _varDict or varname in os.environ or _irafdef # ----------------------------------------------------- # IRAF utility procedures # ----------------------------------------------------- # these have extra keywords (redirection, _save) because they can # be called as tasks def set(*args, **kw): """Set IRAF environment variables.""" if len(args) == 0: if len(kw) != 0: # normal case is only keyword,value pairs for keyword, value in kw.items(): keyword = untranslateName(keyword) svalue = str(value) _varDict[keyword] = svalue else: # set with no arguments lists all variables (using same format # as IRAF) listVars(prefix=" ", equals="=") else: # The only other case allowed is the peculiar syntax # 'set @filename', which only gets used in the zzsetenv.def file, # where it reads extern.pkg. That file also gets read (in full cl # mode) by clpackage.cl. I get errors if I read this during # zzsetenv.def, so just ignore it here... # # Flag any other syntax as an error. if (len(args) != 1 or len(kw) != 0 or not isinstance(args[0], string_types) or args[0][:1] != '@'): raise SyntaxError("set requires name=value pairs") # currently do not distinguish set from reset # this will change when keep/bye/unloading are implemented reset = set def show(*args, **kw): """Print value of IRAF or OS environment variables.""" if len(kw): raise TypeError('unexpected keyword argument: %r' % list(kw)) if args: for arg in args: print(envget(arg)) else: # print them all listVars(prefix=" ", equals="=") def unset(*args, **kw): """ Unset IRAF environment variables. This is not a standard IRAF task, but it is obviously useful. It makes the resulting variables undefined. It silently ignores variables that are not defined. It does not change the os environment variables. """ if len(kw) != 0: raise SyntaxError("unset requires a list of variable names") for arg in args: if arg in _varDict: del _varDict[arg] def time(**kw): """Print current time and date.""" print(_time.ctime(_time.time())) # ----------------------------------------------------- # Expand: Expand a string with embedded IRAF variables # (IRAF virtual filename) # ----------------------------------------------------- # Input string is in format 'name$rest' or 'name$str(name2)' where # name and name2 are defined in the _varDict dictionary. The # name2 string may have embedded dollar signs, which are ignored. # There may be multiple embedded parenthesized variable names. # # Returns string with IRAF variable name expanded to full host name. # Input may also be a comma-separated list of strings to Expand, # in which case an expanded comma-separated list is returned. # search for leading string without embedded '$' __re_var_match = re.compile(r'(?P[^$]*)\$') __re_var_match2 = re.compile(r'\$(?P\w*)') # search for string embedded in parentheses __re_var_paren = re.compile(r'\((?P[^()]*)\)') def Expand(instring, noerror=0): """ Expand a string with embedded IRAF variables (IRAF virtual filename). Allows comma-separated lists. Also uses os.path.expanduser to replace '~' symbols. Set the noerror flag to silently replace undefined variables with just the variable name or null (so Expand('abc$def') = 'abcdef' and Expand('(abc)def') = 'def'). This is the IRAF behavior, though it is confusing and hides errors. """ # call _expand1 for each entry in comma-separated list wordlist = instring.split(",") outlist = [] for word in wordlist: outlist.append(os.path.expanduser(_expand1(word, noerror=noerror))) return ",".join(outlist) def _expand1(instring, noerror): """Expand a string with embedded IRAF variables (IRAF virtual filename).""" # first expand names in parentheses # note this works on nested names too, expanding from the # inside out (just like IRAF) mm = __re_var_paren.search(instring) while mm is not None: # remove embedded dollar signs from name varname = mm.group('varname').replace('$','') if defvar(varname): varname = envget(varname) elif noerror: varname = "" else: raise ValueError("Undefined variable `%s' in string `%s'" % (varname, instring)) instring = instring[:mm.start()] + varname + instring[mm.end():] mm = __re_var_paren.search(instring) # now expand variable name at start of string mm = __re_var_match.match(instring) if mm is None: return instring varname = mm.group('varname') if varname in ['', ' ', None]: mm = __re_var_match2.match(instring) varname = mm.group('varname') if defvar(varname): # recursively expand string after substitution return _expand1(envget(varname) + instring[mm.end():], noerror) elif noerror: return _expand1(varname + instring[mm.end():], noerror) else: raise ValueError("Undefined variable `%s' in string `%s'" % (varname, instring)) def access(filename): """Returns true if file exists.""" return os.path.exists(Expand(filename)) stsci.tools-3.4.12/lib/stsci/tools/fitsdiff.py0000755001120100020070000000401213006721301022761 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python # $Id$ """fitsdiff is now a part of PyFITS--the fitsdiff in PyFITS replaces the fitsdiff that used to be in the module. Now this module just provides a wrapper around astropy.io.fits.diff for backwards compatibility with the old interface in case anyone uses it. """ import os import sys PY3K = sys.version_info[0] > 2 if PY3K: string_types = str else: string_types = basestring from astropy.io.fits.diff import FITSDiff from astropy.io.fits.scripts.fitsdiff import log, main def fitsdiff(input1, input2, comment_excl_list='', value_excl_list='', field_excl_list='', maxdiff=10, delta=0.0, neglect_blanks=True, output=None): if isinstance(comment_excl_list, string_types): comment_excl_list = list_parse(comment_excl_list) if isinstance(value_excl_list, string_types): value_excl_list = list_parse(value_excl_list) if isinstance(field_excl_list, string_types): field_excl_list = list_parse(field_excl_list) diff = FITSDiff(input1, input2, ignore_keywords=value_excl_list, ignore_comments=comment_excl_list, ignore_fields=field_excl_list, numdiffs=maxdiff, tolerance=delta, ignore_blanks=neglect_blanks) if output is None: output = sys.stdout diff.report(output) return diff.identical def list_parse(name_list): """Parse a comma-separated list of values, or a filename (starting with @) containing a list value on each line. """ if name_list and name_list[0] == '@': value = name_list[1:] if not os.path.exists(value): log.warning('The file %s does not exist' % value) return try: return [v.strip() for v in open(value, 'r').readlines()] except IOError as e: log.warning('reading %s failed: %s; ignoring this file' % (value, e)) else: return [v.strip() for v in name_list.split(',')] if __name__ == "__main__": sys.exit(main()) stsci.tools-3.4.12/lib/stsci/tools/for2to3.py0000644001120100020070000001116113006721301022461 0ustar jhunkSTSCI\science00000000000000""" This is a temporary module, used during (and for a while after) the transition to Python 3. This code is planned to be kept in place until the least version of Python supported no longer requires it (and of course until all callers no longer need it). This code should run as-is in 2.x and also run unedited after 2to3 in 3.x. $Id$ """ from __future__ import division # confidence high import os, sys PY3K = sys.version_info[0] > 2 def ndarr2str(arr, encoding='ascii'): """ This is used to ensure that the return value of arr.tostring() is actually a string. This will prevent lots of if-checks in calling code. As of numpy v1.6.1 (in Python 3.2.3), the tostring() function still returns type 'bytes', not 'str' as it advertises. """ # be fast, don't check - just assume 'arr' is a numpy array - the tostring # call will fail anyway if not retval = arr.tostring() # would rather check "if isinstance(retval, bytes)", but support 2.5. # could rm the if PY3K check, but it makes this faster on 2.x. if PY3K and not isinstance(retval, str): return retval.decode(encoding) else: # is str return retval def ndarr2bytes(arr, encoding='ascii'): """ This is used to ensure that the return value of arr.tostring() is actually a *bytes* array in PY3K. See notes in ndarr2str above. Even though we consider it a bug that numpy's tostring() function returns a bytes array in PY3K, there are actually many instances where that is what we want - bytes, not unicode. So we use this function in those instances to ensure that when/if this numpy "bug" is "fixed", that our calling code still gets bytes where it needs/expects them. """ # be fast, don't check - just assume 'arr' is a numpy array - the tostring # call will fail anyway if not retval = arr.tostring() # would rather check "if not isinstance(retval, bytes)", but support 2.5. if PY3K and isinstance(retval, str): # Take note if this ever gets used. If this ever occurs, it # is likely wildly inefficient since numpy.tostring() is now # returning unicode and numpy surely has a tobytes() func by now. # If so, add a code path to call its tobytes() func at our start. return retval.encode(encoding) else: # is str==bytes in 2.x return retval def tobytes(s, encoding='ascii'): """ Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory. """ # NOTE: after we abandon 2.5, we might simply instead use "bytes(s)" # NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b' if PY3K: if isinstance(s, bytes): return s else: return s.encode(encoding) else: # for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes # but handle if unicode is passed if isinstance(s, unicode): return s.encode(encoding) else: return s def tostr(s, encoding='ascii'): """ Convert string-like-thing s to the 'str' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, str and bytes are the same type. In Python 3+, this may require a decoding step. """ if PY3K: if isinstance(s, str): # str == unicode in PY3K return s else: # s is type bytes return s.decode(encoding) else: # for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes # but handle if unicode is passed if isinstance(s, unicode): return s.encode(encoding) else: return s try: BNULLSTR = tobytes('') # after dropping 2.5, change to: b'' BNEWLINE = tobytes('\n') # after dropping 2.5, change to: b'\n' except: BNULLSTR = '' BNEWLINE = '\n' def bytes_read(fd, sz): """ Perform an os.read in a way that can handle both Python2 and Python3 IO. Assume we are always piping only ASCII characters (since that is all we have ever done with IRAF). Either way, return the data as bytes. """ # return tobytes(os.read(fd, sz)) return os.read(fd, sz) # already returns str in Py2.x and bytes in PY3K def bytes_write(fd, bufstr): """ Perform an os.write in a way that can handle both Python2 and Python3 IO. Assume we are always piping only ASCII characters (since that is all we have ever done with IRAF). Either way, write the binary data to fd. """ return os.write(fd, tobytes(bufstr)) stsci.tools-3.4.12/lib/stsci/tools/gfit.py0000644001120100020070000001101313112074217022115 0ustar jhunkSTSCI\science00000000000000""" Return the gaussian fit of a 1D array. Uses mpfit.py - a python implementation of the Levenberg-Marquardt least-squares minimization, based on MINPACK-1. See nmpfit.py for the history of this module (fortran -> idl -> python). nmpfit.py is a version of mpfit.py which uses numarray. @author: Nadia Dencheva @version: '1.0 (2007-02-20)' """ from __future__ import absolute_import, division, print_function __version__ = '1.0' #Release version number only __vdate__ = '2007-02-20' #Date of this version from . import numerixenv numerixenv.check() from . import nmpfit import numpy as N from numpy import random def _gauss_funct(p, fjac = None, x = None, y=None, err=None, weights=None): """ Defines the gaussian function to be used as the model. """ if p[2] != 0.0: Z = (x - p[1]) / p[2] model = p[0]*N.e ** (-Z**2 / 2.0) else: model = N.zeros(N.size(x)) status = 0 if weights is not None: if err is not None: print("Warning: Ignoring errors and using weights.\n") return [status, (y - model) * weights] elif err is not None: return [status, (y - model) / err] else: return [status, y-model] def test_gaussfit(): x=N.arange(10,20, 0.1) #x1=N.arange(0,10,0.1) #y1=5*N.e**(-(5-x1)**2/4) n=random.randn(100) y= 10*N.e**(-(15-x)**2/4) +n*3 #x=N.arange(100, typecode=N.Int) #y=n.zeros(10, typecode=n.Float) #y= random.rand(100) #err = N.zeros(100) #return gaussfit(x,y, maxiter=20) #, x,y, n return gfit1d(y,x, maxiter=20) def gfit1d(y, x=None, err = None, weights=None, par=None, parinfo=None, maxiter=200, quiet=0): """ Return the gaussian fit as an object. Parameters ---------- y: 1D Numarray array The data to be fitted x: 1D Numarray array (optional) The x values of the y array. x and y must have the same shape. err: 1D Numarray array (optional) 1D array with measurement errors, must be the same shape as y weights: 1D Numarray array (optiional) 1D array with weights, must be the same shape as y par: List (optional) Starting values for the parameters to be fitted parinfo: Dictionary of lists (optional) provides additional information for the parameters. For a detailed description see nmpfit.py. Parinfo can be used to limit parameters or keep some of them fixed. maxiter: number Maximum number of iterations to perform Default: 200 quiet: number if set to 1, nmpfit does not print to the screen Default: 0 Examples -------- >>> x=N.arange(10,20, 0.1) >>> y= 10*N.e**(-(x-15)**2/4) >>> print gfit1d(y,x=x, maxiter=20,quiet=1).params [ 10. 15. 1.41421356] """ if numerixenv.check_input(x) or numerixenv.check_input(y): raise ValueError("Input is a NumArray array. This version of %s requires a Numpy array\n" % __name__) y = y.astype(N.float) if weights is not None: weights = weights.astype(N.float) if err is not None: err = err.astype(N.float) if x is None and len(y.shape)==1 : x = N.arange(len(y)).astype(N.float) if x.shape != y.shape: print("input arrays X and Y must be of equal shape.\n") return fa = {'x':x, 'y':y, 'err':err, 'weights':weights} if par is not None: p = par else: ysigma = y.std() ind = N.nonzero(y > ysigma)[0] if len(ind) != 0: xind = int(ind.mean()) p2 = x[xind] p1 = y[xind] p3 = 1.0 else: ymax = y.max() ymin = y.min() ymean= y.mean() if (ymax - ymean) > (abs(ymin - ymean)): p1 = ymax else: p1 = ymin ind = (N.nonzero(y == p1))[0] p2 = x.mean() p3 = 1. p = [p1, p2, p3] m=nmpfit.mpfit(_gauss_funct, p,parinfo = parinfo, functkw=fa, maxiter=maxiter, quiet=quiet) if (m.status <=0): print('error message = ', m.errmsg) return m def plot_fit(y, mfit, x=None): if x is None: x = N.arange(len(y)) else: x = x p = mfit.params #y = gauss_funct(p, y) yy = p[0] + N.e**(-0.5*(x-p[1])**2/p[2]**2) try: import pylab except ImportError: print("Matplotlib is not available.\n") return pylab.plot(x,yy) def test(): import doctest from . import gfit return doctest.testmod(gfit) stsci.tools-3.4.12/lib/stsci/tools/imageiter.py0000644001120100020070000000222113112074217023133 0ustar jhunkSTSCI\science00000000000000""" License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE """ from __future__ import absolute_import, division, generators # confidence medium from . import numerixenv numerixenv.check() import numpy as N BUFSIZE = 1024*1000 # 1Mb cache size __version__ = '0.2' def ImageIter(imgarr, bufsize=None, overlap=0, copy=0): imgarr = N.asarray(imgarr) if bufsize is None: bufsize = BUFSIZE if len(imgarr.shape) == 1: if copy: yield imgarr.copy() else: yield imgarr else: nrows = int(bufsize / (imgarr.itemsize * imgarr.shape[1])) niter = int(imgarr.shape[0] / nrows) * nrows if copy: # Create a cache that will contain a copy of the input # not just a view... _cache = N.zeros((nrows, imgarr.shape[1]), dtype=imgarr.dtype.char) for pix in range(0, niter+1, nrows): if copy: _cache = imgarr[pix:pix+nrows].copy() yield _cache else: yield imgarr[pix:pix+nrows] if copy: _cache *= 0 pix -= overlap stsci.tools-3.4.12/lib/stsci/tools/irafglob.py0000644001120100020070000000304413112074217022756 0ustar jhunkSTSCI\science00000000000000""" License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE """ from __future__ import absolute_import, division # confidence high import glob try: from .fileutil import osfn # added to provide interpretation of environment variables except: osfn = None __author__ = 'Paul Barrett' __version__ = '1.1' def irafglob(inlist, atfile=None): """ Returns a list of filenames based on the type of IRAF input. Handles lists, wild-card characters, and at-files. For special at-files, use the atfile keyword to process them. This function is recursive, so IRAF lists can also contain at-files and wild-card characters, e.g. `a.fits`, `@file.lst`, `*flt.fits`. """ # Sanity check if inlist is None or len(inlist) == 0: return [] # Determine which form of input was provided: if isinstance(inlist, list): # python list flist = [] for f in inlist: flist += irafglob(f) elif ',' in inlist: # comma-separated string list flist = [] for f in inlist.split(','): f = f.strip() flist += irafglob(f) elif inlist[0] == '@': # file list flist = [] for f in open(inlist[1:], 'r').readlines(): f = f.rstrip() # hook for application specific atfiles. if atfile: f = atfile(f) flist += irafglob(f) else: # shell globbing if osfn: inlist = osfn(inlist) flist = glob.glob(inlist) return flist stsci.tools-3.4.12/lib/stsci/tools/irafglobals.py0000644001120100020070000003204413006721301023453 0ustar jhunkSTSCI\science00000000000000"""module irafglobals.py -- widely used IRAF constants and objects NOTE! This module does NOT require the installation of IRAF. It's location in stsci.tools is safe because it is intended to remain free of such dependency. yes, no Boolean values IrafError Standard IRAF exception Verbose Flag indicating verbosity level userIrafHome User's IRAF home directory (./ or ~/iraf/) userWorkingHome User's working home directory (the directory when this module gets imported.) EOF End-of-file indicator object INDEF Undefined object IrafTask "Tag" class for IrafTask type. IrafPkg "Tag" class for IrafPkg type This is defined so it is safe to say 'from irafglobals import *' The tag classes do nothing except allow checks of types via (e.g.) isinstance(o,IrafTask). Including it here decouples the other classes from the module that actually implements IrafTask, greatly reducing the need for mutual imports of modules by one another. $Id$ Taken from pyraf.irafglobals, originally signed "R. White, 2000 Jan 5" """ from __future__ import absolute_import, division import sys PY3K = sys.version_info[0] > 2 if PY3K: string_types = str number_types = (int, float) else: string_types = basestring number_types = (int, long, float) import os from . import compmixin _os = os _compmixin = compmixin del os, compmixin class IrafError(Exception): def __init__(self, msg, errno=-1, errmsg="", errtask=""): Exception.__init__(self, msg) self.errno = errno self.errmsg = errmsg or msg self.errtask = errtask # ----------------------------------------------------- # Verbose: verbosity flag # ----------------------------------------------------- # make Verbose an instance of a class so it can be imported # into other modules and changed by them class _VerboseClass(_compmixin.ComparableIntBaseMixin): """Container class for verbosity (or other) value""" def __init__(self, value=0): self.value = value def set(self, value): self.value = value def get(self): return self.value def _cmpkey(self): return self.value def __nonzero__(self): return self.value != 0 def __bool__(self): return self.value != 0 def __str__(self): return str(self.value) Verbose = _VerboseClass() # ----------------------------------------------------- # userWorkingHome is current working directory # ----------------------------------------------------- userWorkingHome = _os.getcwd() # ----------------------------------------------------- # userIrafHome is location of user's IRAF home directory # ----------------------------------------------------- # If login.cl exists here, use this directory as home. # Otherwise look for ~/iraf. if _os.path.exists('./login.cl'): userIrafHome = _os.path.join(userWorkingHome,'') elif _os.path.exists(_os.path.expanduser('~/.iraf/login.cl')): userIrafHome = _os.path.expanduser('~/.iraf') else: userIrafHome = _os.path.join(_os.getenv('HOME','.'),'iraf','') if not _os.path.exists(userIrafHome): # no ~/iraf, just use '.' as home userIrafHome = _os.path.join(userWorkingHome,'') # ----------------------------------------------------- # Boolean constant class # ----------------------------------------------------- class _Boolean(_compmixin.ComparableMixin): """Class of boolean constant object""" def __init__(self, value=None): # change value to 1 or 0 if value: self.__value = 1 else: self.__value = 0 self.__strvalue = ["no", "yes"][self.__value] def __copy__(self): """Don't bother to make a copy""" return self def __deepcopy__(self, memo=None): """Don't bother to make a copy""" return self def _compare(self, other, method): # _Boolean vs. _Boolean if isinstance(other, _Boolean): return method(self.__value, other.__value) # _Boolean vs. string: # If a string, compare with string value of this parameter. # Allow uppercase "YES", "NO" as well as lowercase. # Also allows single letter abbrevation "y" or "n". if isinstance(other, string_types): ovalue = other.lower() if len(ovalue)==1: return method(self.__strvalue[0], ovalue) else: return method(self.__strvalue, ovalue) # _Boolean vs. all other types (int, float, bool, etc) - treat this # value like an integer return method(self.__value, other) def __nonzero__(self): return self.__value != 0 def __bool__(self): return self.__value != 0 def __repr__(self): return self.__strvalue def __str__(self): return self.__strvalue def __int__(self): return self.__value def __float__(self): return float(self.__value) # create yes, no boolean values yes = _Boolean(1) no = _Boolean(0) # ----------------------------------------------------- # define end-of-file object # if printed, says 'EOF' # if converted to integer, has value -2 (special IRAF value) # Implemented as a singleton, although the singleton # nature is not really essential # ----------------------------------------------------- class _EOFClass(_compmixin.ComparableMixin): """Class of singleton EOF (end-of-file) object""" def __init__(self): global EOF if EOF is not None: # only allow one to be created raise RuntimeError("Use EOF object, not _EOFClass") def __copy__(self): """Not allowed to make a copy""" return self def __deepcopy__(self, memo=None): """Not allowed to make a copy""" return self def _compare(self, other, method): if isinstance(other, _EOFClass): # Despite trying to create only one EOF object, there # could be more than one. All EOFs are equal. return method(1, 1) if isinstance(other, string_types): # If a string, compare with 'EOF' return method("EOF", other) if isinstance(other, number_types): # If a number, compare with -2 return method(-2, other) # what else could it be? return NotImplemented def __repr__(self): return "EOF" def __str__(self): return "EOF" def __int__(self): return -2 def __float__(self): return -2.0 # initialize EOF to None first so singleton scheme works EOF = None EOF = _EOFClass() # ----------------------------------------------------- # define IRAF-like INDEF object # ----------------------------------------------------- class _INDEFClass(object): """Class of singleton INDEF (undefined) object""" def __new__(cls): # Guido's example Singleton pattern it = cls.__dict__.get("__it__") if it is not None: return it # this use of super gets the correct version of __new__ for the # int and float subclasses too cls.__it__ = it = super(_INDEFClass, cls).__new__(cls) return it def __copy__(self): """Not allowed to make a copy""" return self def __deepcopy__(self, memo=None): """Not allowed to make a copy""" return self def __lt__(self, other): return INDEF def __le__(self, other): return INDEF def __gt__(self, other): return INDEF def __ge__(self, other): return INDEF def __eq__(self, other): # Despite trying to create only one INDEF object, there # could be more than one. All INDEFs are equal. # Also allow "INDEF" - CDS 17Nov2011 return isinstance(other, _INDEFClass) or (other and str(other)=="INDEF") def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "INDEF" def __str__(self): return "INDEF" __oct__ = __str__ __hex__ = __str__ # type conversions return various types of INDEF objects # this is necessary for Python 2.4 def __int__(self): return _INDEF_int def __long__(self): return _INDEF_int def __float__(self): return _INDEF_float def __nonzero__(self): return False # need bool return type # all operations on INDEF return INDEF def __add__(self, other): return INDEF __sub__ = __add__ __mul__ = __add__ __rmul__ = __add__ __div__ = __add__ __mod__ = __add__ __divmod__ = __add__ __pow__ = __add__ __lshift__ = __add__ __rshift__ = __add__ __and__ = __add__ __xor__ = __add__ __or__ = __add__ __radd__ = __add__ __rsub__ = __add__ __rmul__ = __add__ __rrmul__ = __add__ __rdiv__ = __add__ __rmod__ = __add__ __rdivmod__ = __add__ __rpow__ = __add__ __rlshift__ = __add__ __rrshift__ = __add__ __rand__ = __add__ __rxor__ = __add__ __ror__ = __add__ def __neg__(self): return INDEF __pos__ = __neg__ __abs__ = __neg__ __invert__ = __neg__ INDEF = _INDEFClass() # Classes that inherit from built-in types are required for Python 2.4 # so that int and float conversion functions work correctly. # Unfortunately, if you call int(_INDEF_int) it ignores the # __int__ method and returns zero, so these objects should be # used sparingly and replaced with standard INDEF whereever # possible. class _INDEFClass_int(_INDEFClass, int): pass class _INDEFClass_float(_INDEFClass, float): pass _INDEF_int = _INDEFClass_int() _INDEF_float = _INDEFClass_float() # ----------------------------------------------------- # define IRAF-like EPSILON object # ----------------------------------------------------- class _EPSILONClass(_compmixin.ComparableFloatBaseMixin): """Class of singleton EPSILON object, for floating-point comparison""" def __new__(cls): # Guido's example Singleton pattern it = cls.__dict__.get("__it__") if it is not None: return it cls.__it__ = it = super(_EPSILONClass, cls).__new__(cls) return it def __init__(self): self.__dict__["_value"] = None def setvalue(self): DEFAULT_VALUE = 1.192e-7 hlib = _os.environ.get("hlib") if hlib is None: self._value = DEFAULT_VALUE return fd = open(_os.path.join(hlib, "mach.h")) lines = fd.readlines() fd.close() foundit = 0 for line in lines: words = line.split() if len(words) < 1 or words[0] == "#": continue if words[0] == "define" and words[1] == "EPSILONR": strvalue = words[2] if strvalue[0] == "(": strvalue = strvalue[1:-1] self._value = float(strvalue) foundit = 1 break if not foundit: self._value = DEFAULT_VALUE def __copy__(self): """Not allowed to make a copy""" return self def __deepcopy__(self, memo=None): """Not allowed to make a copy""" return self def __setattr__(self, name, value): """Not allowed to modify the value or add a new attribute""" if name == "_value": if self.__dict__["_value"] is None: self.__dict__["_value"] = value else: raise RuntimeError("epsilon cannot be modified") else: pass def __delattr__(self, value): """Not allowed to delete the value""" pass def _cmpkey(self): return self._value def __repr__(self): return "%.6g" % self._value def __str__(self): return "%.6g" % self._value __oct__ = None __hex__ = None def __int__(self): return 0 def __long__(self): return 0 def __float__(self): return self._value def __nonzero__(self): return True # need bool return type def __add__(self, other): return self._value + other def __sub__(self, other): return self._value - other def __mul__(self, other): return self._value * other def __div__(self, other): return self._value / other def __mod__(self, other): return self._value % other def __divmod__(self, other): return (self._value // other, self._value % other) def __pow__(self, other): return self._value ** other def __neg__(self): return -self._value def __pos__(self): return self._value def __abs__(self): return abs(self._value) # arguments in reverse order def __radd__(self, other): return other + self._value def __rsub__(self, other): return other - self._value def __rmul__(self, other): return other * self._value def __rdiv__(self, other): return other / self._value def __rmod__(self, other): return other % self._value def __rdivmod__(self, other): return (other // self._value, other % self._value) def __rpow__(self, other): return other ** self._value epsilon = _EPSILONClass() epsilon.setvalue() # ----------------------------------------------------- # tag classes # ----------------------------------------------------- class IrafTask: pass class IrafPkg(IrafTask): pass stsci.tools-3.4.12/lib/stsci/tools/irafutils.py0000644001120100020070000004764513112074217023212 0ustar jhunkSTSCI\science00000000000000"""module irafutils.py -- general utility functions printCols Print elements of list in cols columns printColsAuto Print elements of list in the best number of columns stripQuotes Strip single or double quotes off string and remove embedded quote pairs csvSplit Split comma-separated fields in strings (cover bug in csv mod) rglob Recursive glob setWritePrivs Convenience function to add/remove write privs removeEscapes Remove escaped quotes & newlines from strings translateName Convert CL parameter or variable name to Python-acceptable name untranslateName Undo Python conversion of CL parameter or variable name tkread Read n bytes from file while running Tk mainloop tkreadline Read a line from file while running Tk mainloop launchBrowser Given a URL, try to pop it up in a browser on most platforms. $Id$ R. White, 1999 Jul 16 """ from __future__ import division, print_function import os, stat, string, sys, re, fnmatch, keyword, select from . import capable PY3K = sys.version_info[0] > 2 if capable.OF_GRAPHICS: if PY3K: import tkinter as TKNTR else: import Tkinter as TKNTR def printColsAuto(in_strings, term_width=80, min_pad=1): """ Print a list of strings centered in columns. Determine the number of columns and lines on the fly. Return the result, ready to print. in_strings is a list/tuple/iterable of strings min_pad is number of spaces to appear on each side of a single string (so you will see twice this many spaces between 2 strings) """ # sanity check assert in_strings and len(in_strings)>0, 'Unexpected: '+repr(in_strings) # get max width in input maxWidth = len(max(in_strings, key=len)) + (2*min_pad) # width with pad numCols = term_width//maxWidth # integer div # set numCols so we take advantage of the whole line width numCols = min(numCols, len(in_strings)) # easy case - single column or too big if numCols < 2: # one or some items are too big but print one item per line anyway lines = [x.center(term_width) for x in in_strings] return '\n'.join(lines) # normal case - 2 or more columns colWidth = term_width//numCols # integer div # colWidth is guaranteed to be larger than all items in input retval = '' for i in range(len(in_strings)): retval+=in_strings[i].center(colWidth) if (i+1)%numCols == 0: retval += '\n' return retval.rstrip() def printCols(strlist,cols=5,width=80): """Print elements of list in cols columns""" # This may exist somewhere in the Python standard libraries? # Should probably rewrite this, it is pretty crude. nlines = (len(strlist)+cols-1)//cols line = nlines*[""] for i in range(len(strlist)): c, r = divmod(i,nlines) nwid = c*width//cols - len(line[r]) if nwid>0: line[r] = line[r] + nwid*" " + strlist[i] else: line[r] = line[r] + " " + strlist[i] for s in line: print(s) _re_doubleq2 = re.compile('""') _re_singleq2 = re.compile("''") def stripQuotes(value): """Strip single or double quotes off string; remove embedded quote pairs""" if value[:1] == '"': value = value[1:] if value[-1:] == '"': value = value[:-1] # replace "" with " value = re.sub(_re_doubleq2, '"', value) elif value[:1] == "'": value = value[1:] if value[-1:] == "'": value = value[:-1] # replace '' with ' value = re.sub(_re_singleq2, "'", value) return value def csvSplit(line, delim=',', allowEol=True): """ Take a string as input (e.g. a line in a csv text file), and break it into tokens separated by commas while ignoring commas embedded inside quoted sections. This is exactly what the 'csv' module is meant for, so we *should* be using it, save that it has two bugs (described next) which limit our use of it. When these bugs are fixed, this function should be forsaken in favor of direct use of the csv module (or similar). The basic use case is to split a function signature string, so for: afunc(arg1='str1', arg2='str, with, embedded, commas', arg3=7) we want a 3 element sequence: ["arg1='str1'", "arg2='str, with, embedded, commas'", "arg3=7"] but: >>> import csv >>> y = "arg1='str1', arg2='str, with, embedded, commas', arg3=7" >>> rdr = csv.reader( (y,), dialect='excel', quotechar="'", skipinitialspace=True) >>> l = rdr.next(); print len(l), str(l) 6 ["arg1='str1'", "arg2='str", 'with', 'embedded', "commas'", "arg3=7"] which we can see is not correct - we wanted 3 tokens. This occurs in Python 2.5.2 and 2.6. It seems to be due to the text at the start of each token ("arg1=") i.e. because the quote isn't for the whole token. If we were to remove the names of the args and the equal signs, it works: >>> x = "'str1', 'str, with, embedded, commas', 7" >>> rdr = csv.reader( (x,), dialect='excel', quotechar="'", skipinitialspace=True) >>> l = rdr.next(); print len(l), str(l) 3 ['str1', 'str, with, embedded, commas', '7'] But even this usage is delicate - when we turn off skipinitialspace, it fails: >>> x = "'str1', 'str, with, embedded, commas', 7" >>> rdr = csv.reader( (x,), dialect='excel', quotechar="'") >>> l = rdr.next(); print len(l), str(l) 6 ['str1', " 'str", ' with', ' embedded', " commas'", ' 7'] So, for now, we'll roll our own. """ # Algorithm: read chars left to right, go from delimiter to delimiter, # but as soon as a single/double/triple quote is hit, scan forward # (ignoring all else) until its matching end-quote is found. # For now, we will not specially handle escaped quotes. tokens = [] ldl = len(delim) keepOnRollin = line is not None and len(line) > 0 while keepOnRollin: tok = _getCharsUntil(line, delim, True, allowEol=allowEol) # len of token should always be > 0 because it includes end delimiter # except on last token if len(tok) > 0: # append it, but without the delimiter if tok[-ldl:] == delim: tokens.append(tok[:-ldl]) else: tokens.append(tok) # tok goes to EOL - has no delimiter keepOnRollin = False line = line[len(tok):] else: # This is the case of the empty end token tokens.append('') keepOnRollin = False return tokens # We'll often need to search a string for 3 possible characters. We could # loop and check each one ourselves; we could do 3 separate find() calls; # or we could do a compiled re.search(). For VERY long strings (hundreds # of thousands of chars), it turns out that find() is so fast and that # re (even compiled) has enough overhead, that 3 find's is the same or # slightly faster than one re.search with three chars in the re expr. # Of course, both methods are much faster than an explicit loop. # Since these strings will be short, the fastest method is re.search() _re_sq = re.compile(r"'") _re_dq = re.compile(r'"') _re_comma_sq_dq = re.compile('[,\'"]') def _getCharsUntil(buf, stopChar, branchForQuotes, allowEol): # Sanity checks if buf is None: return None if len(buf) <= 0: return '' # Search chars left-to-right looking for stopChar sought = (stopChar,) theRe = None if branchForQuotes: sought = (stopChar,"'",'"') # see later, we'll handle '"""' too if stopChar == ',': theRe = _re_comma_sq_dq # pre-compiled common case else: if stopChar == '"': theRe = _re_dq # pre-compiled common case if stopChar == "'": theRe = _re_sq # pre-compiled common case if theRe is None: theRe = re.compile('['+''.join(sought)+']') mo = theRe.search(buf) # No match found; stop if mo is None: if not stopChar in ('"', "'"): # this is a primary search, not a branch into quoted text return buf # searched until we hit the EOL, must be last token else: # this is a branch into a quoted string - do we allow EOL here? if allowEol: return buf else: raise ValueError('Unfound end-quote, buffer: '+buf) # The expected match was found. Stop. if mo.group() == stopChar: return buf[:1 + mo.start()] # return token plus stopChar at end # Should not get to this point unless in a branch-for-quotes situation. assert branchForQuotes,"Programming error! shouldnt be here w/out branching" # Quotes were found. # There are two kinds, but double quotes could be the start of # triple double-quotes. (""") So get the substring to create the token. # # token = preQuote+quotedPart+postQuote (e.g.: "abc'-hi,ya-'xyz") # preQuote = buf[:mo.start()] if mo.group() == "'": quotedPart = "'"+_getCharsUntil(buf[1+mo.start():],"'",False,allowEol) else: # first double quote (are there 3 in a row?) idx = mo.start() if len(buf) > idx+2 and '"""' == buf[idx:idx+3]: # We ARE in a triple-quote sub-string end_t_q = buf[idx+3:].find('"""') if end_t_q < 0: # hit end of line before finding end quote if allowEol: quotedPart = buf[idx:] else: raise ValueError('Unfound triple end-quote, buffer: '+buf) else: quotedPart = buf[idx:idx+3+end_t_q+1] else: quotedPart = '"'+_getCharsUntil(buf[1+mo.start():],'"',False,allowEol) lenSoFar = len(preQuote)+len(quotedPart) if lenSoFar < len(buf): # now get back to looking for end delimiter postQuote = _getCharsUntil(buf[lenSoFar:], stopChar, branchForQuotes, allowEol) return preQuote+quotedPart+postQuote else: return buf # at end def testCsvSplit(quiet=True): # test cases ( input-string, len(output-list), repr(output-list) ) cases = ( \ (None, 0, "[]"), ('', 0, "[]"), (' ', 1, "[' ']"), ('a', 1, "['a']"), (',', 2, "['', '']"), (',a', 2, "['', 'a']"), ('a,', 2, "['a', '']"), (',a,', 3, "['', 'a', '']"), ("abc'-hi,ya-'xyz", 1, """["abc'-hi,ya-'xyz"]"""), ('abc"double-quote,eg"xy,z', 2, """['abc"double-quote,eg"xy', 'z']"""), ('abc"""triple-quote,eg"""xyz', 1, '[\'abc"""triple-quote,eg"""xyz\']'), ("'s1', 'has, comma', z", 3, """["'s1'", " 'has, comma'", ' z']"""), ("a='s1', b='has,comma,s', c", 3, """["a='s1'", " b='has,comma,s'", ' c']"""), ) for c in cases: if not quiet: print("Testing: "+repr(c[0])) ll = csvSplit(c[0], ',', True) assert len(ll) == c[1] and repr(ll) == c[2], \ "For case: "+repr(c[0])+" expected:\n"+c[2]+"\nbut got:\n"+repr(ll) return True def rglob(root, pattern): """ Same thing as glob.glob, but recursively checks subdirs. """ # Thanks to Alex Martelli for basics on Stack Overflow retlist = [] if None not in (pattern, root): for base, dirs, files in os.walk(root): goodfiles = fnmatch.filter(files, pattern) retlist.extend(os.path.join(base, f) for f in goodfiles) return retlist def setWritePrivs(fname, makeWritable, ignoreErrors=False): """ Set a file named fname to be writable (or not) by user, with the option to ignore errors. There is nothing ground-breaking here, but I was annoyed with having to repeate this little bit of code. """ privs = os.stat(fname).st_mode try: if makeWritable: os.chmod(fname, privs | stat.S_IWUSR) else: os.chmod(fname, privs & (~ stat.S_IWUSR)) except OSError: if ignoreErrors: pass # just try, don't whine else: raise def removeEscapes(value, quoted=0): """Remove escapes from in front of quotes (which IRAF seems to just stick in for fun sometimes.) Remove \-newline too. If quoted is true, removes all blanks following \-newline (which is a nasty thing IRAF does for continuations inside quoted strings.) XXX Should we remove \\ too? """ i = value.find(r'\"') while i>=0: value = value[:i] + value[i+1:] i = value.find(r'\"',i+1) i = value.find(r"\'") while i>=0: value = value[:i] + value[i+1:] i = value.find(r"\'",i+1) # delete backslash-newlines i = value.find("\\\n") while i>=0: j = i+2 if quoted: # ignore blanks and tabs following \-newline in quoted strings for c in value[i+2:]: if c not in ' \t': break j = j+1 value = value[:i] + value[j:] i = value.find("\\\n",i+1) return value # Must modify Python keywords to make Python code legal. I add 'PY' to # beginning of Python keywords (and some other illegal Python identifiers). # It will be stripped off where appropriate. def translateName(s, dot=0): """Convert CL parameter or variable name to Python-acceptable name Translate embedded dollar signs to 'DOLLAR' Add 'PY' prefix to components that are Python reserved words Add 'PY' prefix to components start with a number If dot != 0, also replaces '.' with 'DOT' """ s = s.replace('$', 'DOLLAR') sparts = s.split('.') for i in range(len(sparts)): if sparts[i] == "" or sparts[i][0] in string.digits or \ keyword.iskeyword(sparts[i]): sparts[i] = 'PY' + sparts[i] if dot: return 'DOT'.join(sparts) else: return '.'.join(sparts) def untranslateName(s): """Undo Python conversion of CL parameter or variable name""" s = s.replace('DOT', '.') s = s.replace('DOLLAR', '$') # delete 'PY' at start of name components if s[:2] == 'PY': s = s[2:] s = s.replace('.PY', '.') return s # procedures to read while still allowing Tk widget updates def init_tk_default_root(withdraw=True): """ In case the _default_root value is required, you may safely call this ahead of time to ensure that it has been initialized. If it has already been, this is a no-op. """ if not capable.OF_GRAPHICS: raise RuntimeError("Cannot run this command without graphics") if not TKNTR._default_root: # TKNTR imported above junk = TKNTR.Tk() # tkinter._default_root is now populated (== junk) retval = TKNTR._default_root if withdraw and retval: retval.withdraw() return retval def tkread(file, n=0): """Read n bytes from file (or socket) while running Tk mainloop. If n=0 then this runs the mainloop until some input is ready on the file. (See tkreadline for an application of this.) The file must have a fileno method. """ return _TkRead().read(file, n) def tkreadline(file=None): """Read a line from file while running Tk mainloop. If the file is not line-buffered then the Tk mainloop will stop running after one character is typed. The function will still work but Tk widgets will stop updating. This should work OK for stdin and other line-buffered filehandles. If file is omitted, reads from sys.stdin. The file must have a readline method. If it does not have a fileno method (which can happen e.g. for the status line input on the graphics window) then the readline method is simply called directly. """ if file is None: file = sys.stdin if not hasattr(file, "readline"): raise TypeError("file must be a filehandle with a readline method") # Call tkread now... # BUT, if we get in here for something not GUI-related (e.g. terminal- # focused code in a sometimes-GUI app) then skip tkread and simply call # readline on the input eg. stdin. Otherwise we'd fail in _TkRead().read() try: fd = file.fileno() except: fd = None if (fd and capable.OF_GRAPHICS): tkread(fd, 0) # if EOF was encountered on a tty, avoid reading again because # it actually requests more data if not select.select([fd],[],[],0)[0]: return '' return file.readline() class _TkRead: """Run Tk mainloop while waiting for a pending read operation""" def read(self, file, nbytes): """Read nbytes characters from file while running Tk mainloop""" if not capable.OF_GRAPHICS: raise RuntimeError("Cannot run this command without graphics") if isinstance(file, int): fd = file else: # Otherwise, assume we have Python file object try: fd = file.fileno() except: raise TypeError("file must be an integer or a filehandle/socket") init_tk_default_root() # harmless if already done self.widget = TKNTR._default_root if not self.widget: # no Tk widgets yet, so no need for mainloop # (shouldnt happen now with init_tk_default_root) s = [] while nbytes>0: snew = os.read(fd, nbytes) # returns bytes in PY3K if snew: if PY3K: snew = snew.decode('ascii','replace') s.append(snew) nbytes -= len(snew) else: # EOF -- just return what we have so far break return "".join(s) else: self.nbytes = nbytes self.value = [] self.widget.tk.createfilehandler(fd, TKNTR.READABLE | TKNTR.EXCEPTION, self._read) try: self.widget.mainloop() finally: self.widget.tk.deletefilehandler(fd) return "".join(self.value) def _read(self, fd, mask): """Read waiting data and terminate Tk mainloop if done""" try: # if EOF was encountered on a tty, avoid reading again because # it actually requests more data if select.select([fd],[],[],0)[0]: snew = os.read(fd, self.nbytes) # returns bytes in PY3K if PY3K: snew = snew.decode('ascii','replace') self.value.append(snew) self.nbytes -= len(snew) else: snew = '' if (self.nbytes <= 0 or len(snew) == 0) and self.widget: # stop the mainloop self.widget.quit() except OSError: raise IOError("Error reading from %s" % (fd,)) def launchBrowser(url, brow_bin='mozilla', subj=None): """ Given a URL, try to pop it up in a browser on most platforms. brow_bin is only used on OS's where there is no "open" or "start" cmd. """ if not subj: subj = url # Tries to use webbrowser module on most OSes, unless a system command # is needed. (E.g. win, linux, sun, etc) if sys.platform not in ('os2warp, iphone'): # try webbrowser w/ everything? import webbrowser if not webbrowser.open(url): print("Error opening URL: "+url) else: print('Help on "'+subj+'" is now being displayed in a web browser') return # Go ahead and fork a subprocess to call the correct binary pid = os.fork() if pid == 0: # child if sys.platform == 'darwin': if 0 != os.system('open "'+url+'"'): # does not seem to keep '#.*' print("Error opening URL: "+url) os._exit(0) # The following retries if "-remote" doesnt work, opening a new browser # cmd = brow_bin+" -remote 'openURL("+url+")' '"+url+"' 1> /dev/null 2>&1" # if 0 != os.system(cmd) # print "Running "+brow_bin+" for HTML help..." # os.execvp(brow_bin,[brow_bin,url]) # os._exit(0) else: # parent if not subj: subj = url print('Help on "'+subj+'" is now being displayed in a browser') stsci.tools-3.4.12/lib/stsci/tools/iterfile.py0000644001120100020070000001054613112074217023001 0ustar jhunkSTSCI\science00000000000000""" License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE """ from __future__ import division # confidence high from astropy.io import fits __version__ = '0.3 (01-July-2014)' class IterFitsFile(object): """ This class defines an object which can be used to access the data from a FITS file without leaving the file-handle open between reads. """ def __init__(self,name): self.name = name self.fname = None self.extn = None self.handle = None self.inmemory = False self.compress = False if not self.fname: self.fname,self.extn = parseFilename(name) def set_inmemory(self,val): """Sets inmemory attribute to either True or False """ assert type(val) is bool, 'Please specify either True or False' self.inmemory = val def _shape(self): """ Returns the shape of the data array associated with this file.""" hdu = self.open() _shape = hdu.shape if not self.inmemory: self.close() del hdu return _shape def _data(self): """ Returns the data array associated with this file/extenstion.""" hdu = self.open() _data = hdu.data.copy() if not self.inmemory: self.close() del hdu return _data def type(self): """ Returns the shape of the data array associated with this file.""" hdu = self.open() _type = hdu.data.dtype.name if not self.inmemory: self.close() del hdu return _type def open(self): """ Opens the file for subsequent access. """ if self.handle is None: self.handle = fits.open(self.fname, mode='readonly') if self.extn: if len(self.extn) == 1: hdu = self.handle[self.extn[0]] else: hdu = self.handle[self.extn[0],self.extn[1]] else: hdu = self.handle[0] if isinstance(hdu,fits.hdu.compressed.CompImageHDU): self.compress = True return hdu def close(self): """ Closes file handle for this FITS object.""" if self.handle is not None: self.handle.close() self.handle = None def __getitem__(self,i): """ Returns a PyFITS section for the rows specified. """ # All I/O must be done here, starting with open hdu = self.open() if self.inmemory or self.compress: _data = hdu.data[i,:] else: _data = hdu.section[i,:] if not self.inmemory: self.close() del hdu return _data def __getattribute__(self,name): if name == 'data': return self._data() elif name == 'shape': return self._shape() else: return object.__getattribute__(self,name) def parseFilename(filename): """ Parse out filename from any specified extensions. Returns rootname and string version of extension name. Modified from 'pydrizzle.fileutil' to allow this module to be independent of PyDrizzle/MultiDrizzle. """ # Parse out any extension specified in filename _indx = filename.find('[') if _indx > 0: # Read extension name provided _fname = filename[:_indx] extn = filename[_indx+1:-1] # An extension was provided, so parse it out... if repr(extn).find(',') > 1: _extns = extn.split(',') # Two values given for extension: # for example, 'sci,1' or 'dq,1' _extn = [_extns[0],int(_extns[1])] elif repr(extn).find('/') > 1: # We are working with GEIS group syntax _indx = str(extn[:extn.find('/')]) _extn = [int(_indx)] elif isinstance(extn, str): # Only one extension value specified... if extn.isdigit(): # We only have an extension number specified as a string... _nextn = int(extn) else: # We only have EXTNAME specified... _nextn = extn _extn = [_nextn] else: # Only integer extension number given, or default of 0 is used. _extn = [int(extn)] else: _fname = filename _extn = None return _fname,_extn stsci.tools-3.4.12/lib/stsci/tools/linefit.py0000644001120100020070000000446613112074217022634 0ustar jhunkSTSCI\science00000000000000""" Fit a line to a data set with optional weights. Returns the parameters of the model, bo, b1: Y = b0 + b1* X :author: Nadia Dencheva :version: '1.0 (2007-02-20)' """ from __future__ import absolute_import, division, print_function # confidence high from . import numerixenv numerixenv.check() import numpy as N from numpy.core import around __version__ = '1.0' #Release version number only __vdate__ = '2007-02-20' #Date of this version def linefit(x, y, weights=None): """ Parameters ---------- y: 1D numpy array The data to be fitted x: 1D numpy array The x values of the y array. x and y must have the same shape. weights: 1D numpy array, must have the same shape as x and y weight values Examples -------- >>> x=N.array([-5, -4 ,-3 ,-2 ,-1, 0, 1, 2, 3, 4, 5]) >>> y=N.array([1, 5, 4, 7, 10, 8, 9, 13, 14, 13, 18]) >>> around(linefit(x,y), decimals=5) array([ 9.27273, 1.43636]) >>> x=N.array([1.3,1.3,2.0,2.0,2.7,3.3,3.3,3.7,3.7,4.,4.,4.,4.7,4.7,5.,5.3,5.3,5.3,5.7,6.,6.,6.3,6.7]) >>> y = N.array([2.3,1.8,2.8,1.5,2.2,3.8,1.8,3.7,1.7,2.8,2.8,2.2,3.2,1.9,1.8,3.5,2.8,2.1,3.4,3.2,3.,3.,5.9]) >>> around(linefit(x,y), decimals=5) array([ 1.42564, 0.31579]) """ if numerixenv.check_input(x) or numerixenv.check_input(y): raise ValueError("Input is a NumArray array. This version of %s requires a Numpy array\n" % __name__) if len(x) != len(y): print("Error: X and Y must have equal size\n") return n = len(x) w = N.zeros((n,n)).astype(N.float) if weights is None: for i in N.arange(n): w[i,i] = 1 else: if len(weights) != n: print("Error: Weights must have the same size as X and Y.\n") return for i in N.arange(n): w[i,i] = weights[i] x = x.astype(N.float) y = y.astype(N.float) # take the weighted avg for calculatiing the covarince Xavg = N.sum(N.dot(w,x)) / N.sum(w.diagonal()) Yavg = N.sum(N.dot(w,y)) / N.sum(w.diagonal()) xm = x - Xavg xmt = N.transpose(xm) ym = y - Yavg b1 = N.dot(xmt,N.dot(w,ym)) / N.dot(xmt ,N.dot(w,xm)) b0 = Yavg - b1 * Xavg return b0, b1 def test(): import doctest import linefit return doctest.testmod(linefit) stsci.tools-3.4.12/lib/stsci/tools/listdlg.py0000644001120100020070000000707613017116245022646 0ustar jhunkSTSCI\science00000000000000# # A home-grown list-selection convenience dialog. As *soon* as tkinter comes # with one of these, replace all uses of this one with that. This currently # only allows single selection. # """ $Id$ """ from __future__ import absolute_import, division, print_function # confidence high import sys PY3K = sys.version_info[0] > 2 from . import capable, irafutils if capable.OF_GRAPHICS: if PY3K: from tkinter import * from tkinter.simpledialog import Dialog else: from Tkinter import * from tkSimpleDialog import Dialog else: Dialog = object class ListSingleSelectDialog(Dialog): def __init__(self, title, prompt, choiceList, parent=None): if not parent: parent = irafutils.init_tk_default_root() self.__prompt = prompt self.__choices = choiceList self.__retval = None self.__clickedOK = False parent.update() Dialog.__init__(self, parent, title) # enters main loop here def get_current_index(self): """ Return currently selected index (or -1) """ # Need to convert to int; currently API returns a tuple of string curSel = self.__lb.curselection() if curSel and len(curSel) > 0: return int(curSel[0]) else: return -1 def getresult(self): return self.__retval def destroy(self): # first save the selected index before it is destroyed idx = self.get_current_index() # in PyRAF, assume they meant the first one if they clicked nothing, # since it is already active (underlined) if idx < 0: idx = 0 # get the object at that index if self.__clickedOK and idx >= 0: # otherwise is None self.__retval = self.__choices[idx] if self.__retval and type(self.__retval) == str: self.__retval = self.__retval.strip() # now destroy self.__lb = None Dialog.destroy(self) def body(self, master): label = Label(master, text=self.__prompt, justify=LEFT) # label.grid(row=0, padx=8, sticky=W) label.pack(side=TOP, fill=X, padx=10, pady=8) frame = Frame(master) # frame.grid(row=1, padx=8, sticky=W+E) frame.pack(side=TOP, fill=X, padx=10, pady=8) vscrollbar = Scrollbar(frame, orient=VERTICAL) hscrollbar = Scrollbar(frame, orient=HORIZONTAL) self.__lb = Listbox(frame, selectmode=BROWSE, xscrollcommand=hscrollbar.set, yscrollcommand=vscrollbar.set) # activestyle='none', # none = dont underline items hscrollbar.config(command=self.__lb.xview) hscrollbar.pack(side=BOTTOM, fill=X) vscrollbar.config(command=self.__lb.yview) vscrollbar.pack(side=RIGHT, fill=Y) self.__lb.pack(side=LEFT, fill=BOTH, expand=1) for itm in self.__choices: self.__lb.insert(END, str(itm)) self.__lb.bind("", self.ok) # dbl clk # self.__lb.selection_set(0,0) self.__lb.focus_set() return self.__lb def ok(self, val=None): self.__clickedOK = True # save that this wasn't a cancel Dialog.ok(self, val) def validate(self): return 1 if __name__ == "__main__": root = Tk() root.withdraw() root.update() x = ListSingleSelectDialog("Select Parameter File", \ "Select which file you prefer for task/pkg:", \ ['abc','def','ghi','jkl','1'], None) print(str(x.getresult())) stsci.tools-3.4.12/lib/stsci/tools/logutil.py0000644001120100020070000005744213006721301022656 0ustar jhunkSTSCI\science00000000000000""" A collection of utilities for handling output to standard out/err as well as to file-based or other logging handlers through a single interface. """ import inspect import logging import os import sys import threading from stsci.tools.for2to3 import tostr PY3K = sys.version_info[0] > 2 if PY3K: from io import StringIO else: from cStringIO import StringIO global_logging_started = False # The global_logging system replaces the raw_input builtin (input on Python 3) # for two reasons: # # 1) It's the easiest way to capture the raw_input prompt and subsequent user # input to the log. # # 2) On Python 2.x raw_input() does not play nicely with GUI toolkits if # sys.stdout has been replaced by a non-file object (as global_logging # does). The default raw_input() implementation first checks that # sys.stdout and sys.stdin are connected to a terminal. If so it uses the # PyOS_Readline() implementation, which allows a GUI's event loop to run # while waiting for user input via PyOS_InputHook(). However, if # sys.stdout is not attached to a terminal, raw_input() uses # PyFile_GetLine(), which blocks until a line is entered on sys.stdin, # thus preventing the GUI from updating. It doesn't matter if sys.stdin is # still attached to the terminal even if sys.stdout isn't, nor does it # automatically fall back on sys.__stdout__ and sys.__stdin__. # # This replacement raw_input() reimplements most of the built in # raw_input(), but is aware that sys.stdout may have been replaced and # knows how to find the real stdout if so. # # Note that this is a non-issue in Python 3 which has a new implementation # in which it doesn't matter what sys.stdout points to--only that it has a # fileno() method that returns the correct file descriptor for the # console's stdout. if not PY3K: import __builtin__ as builtins from ctypes import pythonapi, py_object, c_void_p, c_char_p # PyFile_AsFile returns a FILE * from a python file object. # This is used later with pythonapi.PyOS_Readline to perform # the readline. pythonapi.PyFile_AsFile.argtypes = (py_object,) pythonapi.PyFile_AsFile.restype = c_void_p pythonapi.PyOS_Readline.argtypes = (c_void_p, c_void_p, c_char_p) pythonapi.PyOS_Readline.restype = c_char_p def global_logging_raw_input(prompt): def get_stream(name): if hasattr(sys, name): stream = getattr(sys, name) if isinstance(stream, file): return stream elif isinstance(stream, StreamTeeLogger): return stream.stream if hasattr(sys, '__%s__' % name): stream = getattr(sys, '__%s__' % name) if isinstance(stream, file): return stream return None def check_interactive(stream, name): try: fd = stream.fileno() except: # Could be an AttributeError, an OSError, and IOError, or who # knows what else... return False realfd = {'stdin': 0, 'stdout': 1, 'stderr': 2}[name] return fd == realfd and os.isatty(fd) stdout = get_stream('stdout') stdin = get_stream('stdin') stderr = get_stream('stderr') if stdout is None: raise RuntimeError('raw_input(): lost sys.stdout') if stdin is None: raise RuntimeError('raw_input(): lost sys.stdin') if stderr is None: raise RuntimeError('raw_input(): lost sys.stderr') if (not check_interactive(stdin, 'stdin') or not check_interactive(stdout, 'stdout')): # Use the built-in raw_input(); this will repeat some of the checks # we just did, but will save us from having to reimplement # raw_input() in its entirety retval = builtins._original_raw_input(prompt) else: stdout.flush() infd = pythonapi.PyFile_AsFile(stdin) outfd = pythonapi.PyFile_AsFile(stdout) retval = pythonapi.PyOS_Readline(infd, outfd, str(prompt)) retval = retval.rstrip('\n') if isinstance(sys.stdout, StreamTeeLogger): sys.stdout.log_orig(str(prompt) + retval, echo=False) return retval else: import builtins def global_logging_raw_input(prompt): retval = builtins._original_raw_input(prompt) if isinstance(sys.stdout, StreamTeeLogger): sys.stdout.log_orig(str(prompt) + retval, echo=False) return retval class StreamTeeLogger(logging.Logger): """ A Logger implementation that is meant to replace an I/O stream such as `sys.stdout`, `sys.stderr`, or any other stream-like object that supports a `write()` method and a `flush()` method. When `StreamTeeLogger.write` is called, the written strings are line-buffered, and each line is logged through the normal Python logging interface. The `StreamTeeLogger` has two handlers: * The LogTeeHandler redirects all log messages to a logger with the same name as the module in which the `write()` method was called. For example, if this logger is used to replace `sys.stdout`, all `print` statements in the module `foo.bar` will be logged to a logger called ``foo.bar``. * If the ``stream`` argument was provided, this logger also attaches a `logging.StreamHandler` to itself for the given ``stream``. For example, if ``stream=sys.stdout`` then messages sent to this logger will be output to `sys.stdout`. However, only messages created through the `write()` method call will re-output to the given stream. Parameters ---------- name : string The name of this logger, as in `logging.Logger` level : int (optional) The minimum level at which to log messages sent to this logger; also used as the default level for messages logged through the `write()` interface (default: `logging.INFO`). stream : stream-like object (optional) The stream-like object (an object with `write()` and `flush()` methods) to tee to; should be the same file object being replaced (i.e. sys.stdout). If `None` (the default) writes to this file will not be sent to a stream logger. See Also -------- `EchoFilter` is a logger filter that can control which modules' output is sent to the screen via the `StreamHandler` on this logger. """ def __init__(self, name, level=logging.INFO, stream=None): logging.Logger.__init__(self, name, level) self.__thread_local_ctx = threading.local() self.__thread_local_ctx.write_count = 0 self.propagate = False self.buffer = '' self.stream = None self.set_stream(stream) self.addHandler(_LogTeeHandler()) #self.errors = 'strict' #self.encoding = 'utf8' @property def encoding(self): if self.stream: try: return self.stream.encoding except AttributeError: pass # Default value return 'utf-8' @property def errors(self): if self.stream: try: return self.stream.errors except AttributeError: pass # Default value return 'strict' def set_stream(self, stream): """ Set the stream that this logger is meant to replace. Usually this will be either `sys.stdout` or `sys.stderr`, but can be any object with `write()` and `flush()` methods, as supported by `logging.StreamHandler`. """ for handler in self.handlers[:]: if isinstance(handler, logging.StreamHandler): self.handlers.remove(handler) if stream is not None: stream_handler = logging.StreamHandler(stream) stream_handler.addFilter(_StreamHandlerEchoFilter()) stream_handler.setFormatter(logging.Formatter('%(message)s')) self.addHandler(stream_handler) self.stream = stream def write(self, message): """ Buffers each message until a newline is reached. Each complete line is then published to the logging system through ``self.log()``. """ self.__thread_local_ctx.write_count += 1 try: if self.__thread_local_ctx.write_count > 1: return # For each line in the buffer ending with \n, output that line to # the logger begin = 0 end = message.find('\n', begin) + 1 while end > begin: if self.buffer: self.log_orig(self.buffer, echo=True) self.buffer = '' self.log_orig(message[begin:end].rstrip(), echo=True) begin = end end = message.find('\n', begin) + 1 self.buffer = self.buffer + message[begin:] finally: self.__thread_local_ctx.write_count -= 1 def flush(self): """ Flushes all handlers attached to this logger; this includes flushing any attached stream-like object (e.g. `sys.stdout`). """ for handler in self.handlers: handler.flush() def fileno(self): fd = None if self.stream: try: fd = self.stream.fileno() except: fd = None if fd is None: raise IOError('fileno() not defined for logger stream %r' % self.stream) return fd def log_orig(self, message, echo=True): modname, path, lno, func = self.find_actual_caller() self.log(self.level, message, extra={'orig_name': modname, 'orig_pathname': path, 'orig_lineno': lno, 'orig_func': func, 'echo': echo}) def find_actual_caller(self): """ Returns the full-qualified module name, full pathname, line number, and function in which `StreamTeeLogger.write()` was called. For example, if this instance is used to replace `sys.stdout`, this will return the location of any print statement. """ # Gleaned from code in the logging module itself... try: f = sys._getframe(1) ##f = inspect.currentframe(1) except Exception: f = None # On some versions of IronPython, currentframe() returns None if # IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown module)", "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) mod = inspect.getmodule(f) if mod is None: modname = '__main__' else: modname = mod.__name__ if modname == __name__: # Crawl back until the first frame outside of this module f = f.f_back continue rv = (modname, filename, f.f_lineno, co.co_name) break return rv class EchoFilter(object): """ A logger filter primarily for use with `StreamTeeLogger`. Adding an `EchoFilter` to a `StreamTeeLogger` instances allows control over which modules' print statements, for example, are output to stdout. For example, to allow only output from the 'foo' module to be printed to the console: >>> stdout_logger = logging.getLogger('stsci.tools.logutil.stdout') >>> stdout_logger.addFilter(EchoFilter(include=['foo'])) Now only print statements in the 'foo' module (or any sub-modules if 'foo' is a package) are printed to stdout. Any other print statements are just sent to the appropriate logger. Parameters ---------- include : iterable Packages or modules to include in stream output. If set, then only the modules listed here are output to the stream. exclude : iterable Packages or modules to be excluded from stream output. If set then all modules except for those listed here are output to the stream. If both ``include`` and ``exclude`` are provided, ``include`` takes precedence and ``exclude`` is ignored. """ def __init__(self, include=None, exclude=None): self.include = set(include) if include is not None else include self.exclude = set(exclude) if exclude is not None else exclude def filter(self, record): if ((self.include is None and self.exclude is None) or not hasattr(record, 'orig_name')): return True record_name = record.orig_name.split('.') while record_name: if self.include is not None: if '.'.join(record_name) in self.include: return True elif self.exclude is not None: if '.'.join(record_name) not in self.exclude: return True else: break record_name.pop() record.echo = False return True class LoggingExceptionHook(object): def __init__(self, logger, level=logging.ERROR): self._oldexcepthook = sys.excepthook self.logger = logger self.level = level if not self.logger.handlers: self.logger.addHandler(logging.NullHandler()) def __del__(self): try: try: sys.excepthook = self._oldexcepthook except AttributeError: sys.excepthook = sys.__excepthook__ except AttributeError: pass def __call__(self, exc_type, exc_value, traceback): self.logger.log(self.level, 'An unhandled exception ocurred:', exc_info=(exc_type, exc_value, traceback)) self._oldexcepthook(exc_type, exc_value, traceback) def setup_global_logging(): """ Initializes capture of stdout/stderr, Python warnings, and exceptions; redirecting them to the loggers for the modules from which they originated. """ global global_logging_started if not PY3K: sys.exc_clear() if global_logging_started: return orig_logger_class = logging.getLoggerClass() logging.setLoggerClass(StreamTeeLogger) try: stdout_logger = logging.getLogger(__name__ + '.stdout') stderr_logger = logging.getLogger(__name__ + '.stderr') finally: logging.setLoggerClass(orig_logger_class) stdout_logger.setLevel(logging.INFO) stderr_logger.setLevel(logging.ERROR) stdout_logger.set_stream(sys.stdout) stderr_logger.set_stream(sys.stderr) sys.stdout = stdout_logger sys.stderr = stderr_logger exception_logger = logging.getLogger(__name__ + '.exc') sys.excepthook = LoggingExceptionHook(exception_logger) logging.captureWarnings(True) rawinput = 'input' if PY3K else 'raw_input' builtins._original_raw_input = getattr(builtins, rawinput) setattr(builtins, rawinput, global_logging_raw_input) global_logging_started = True def teardown_global_logging(): """Disable global logging of stdio, warnings, and exceptions.""" global global_logging_started if not global_logging_started: return stdout_logger = logging.getLogger(__name__ + '.stdout') stderr_logger = logging.getLogger(__name__ + '.stderr') if sys.stdout is stdout_logger: sys.stdout = sys.stdout.stream if sys.stderr is stderr_logger: sys.stderr = sys.stderr.stream # If we still have an unhandled exception go ahead and handle it with the # replacement excepthook before deleting it exc_type, exc_value, exc_traceback = sys.exc_info() if exc_type is not None: sys.excepthook(exc_type, exc_value, exc_traceback) del exc_type del exc_value del exc_traceback if not PY3K: sys.exc_clear() del sys.excepthook logging.captureWarnings(False) rawinput = 'input' if PY3K else 'raw_input' if hasattr(builtins, '_original_raw_input'): setattr(builtins, rawinput, builtins._original_raw_input) del builtins._original_raw_input global_logging_started = False # Cribbed, with a few tweaks from Tom Aldcroft at # http://www.astropython.org/snippet/2010/2/Easier-python-logging def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None, stream=None, level=logging.INFO, filename=None, filemode='w', filelevel=None, propagate=True): """ Do basic configuration for the logging system. Similar to logging.basicConfig but the logger ``name`` is configurable and both a file output and a stream output can be created. Returns a logger object. The default behaviour is to create a logger called ``name`` with a null handled, and to use the "%(levelname)s: %(message)s" format string, and add the handler to the ``name`` logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. :param name: Logger name :param format: handler format string :param datefmt: handler date/time format specifier :param stream: add a StreamHandler using ``stream`` (None disables the stream, default=None) :param level: logger level (default=INFO). :param filename: add a FileHandler using ``filename`` (default=None) :param filemode: open ``filename`` with specified filemode ('w' or 'a') :param filelevel: logger level for file logger (default=``level``) :param propagate: propagate message to parent (default=True) :returns: logging.Logger object """ # Get a logger for the specified name logger = logging.getLogger(name) logger.setLevel(level) fmt = logging.Formatter(format, datefmt) logger.propagate = propagate # Remove existing handlers, otherwise multiple handlers can accrue for hdlr in logger.handlers: logger.removeHandler(hdlr) # Add handlers. Add NullHandler if no file or stream output so that # modules don't emit a warning about no handler. if not (filename or stream): logger.addHandler(logging.NullHandler()) if filename: hdlr = logging.FileHandler(filename, filemode) if filelevel is None: filelevel = level hdlr.setLevel(filelevel) hdlr.setFormatter(fmt) logger.addHandler(hdlr) if stream: hdlr = logging.StreamHandler(stream) hdlr.setLevel(level) hdlr.setFormatter(fmt) logger.addHandler(hdlr) return logger class _StreamHandlerEchoFilter(logging.Filter): """ Filter used by the `logging.StreamHandler` internal to `StreamTeeLogger`; any message logged through `StreamTeeLogger.write()` has an ``echo=True`` attribute attached to the `LogRecord`. This ensures that the `StreamHandler` only logs messages with this ``echo`` attribute set to `True`. """ def filter(self, record): if hasattr(record, 'echo'): return record.echo return False class _LogTeeHandler(logging.Handler): def __init__(self, level=logging.NOTSET): logging.Handler.__init__(self, level) self.__thread_local_ctx = threading.local() self.__thread_local_ctx.logger_handle_counts = {} def emit(self, record): # Hand off to the global logger with the name same as the module of # origin for this record if not hasattr(record, 'orig_name'): return record = logging.LogRecord(record.orig_name, record.levelno, record.orig_pathname, record.orig_lineno, record.msg, record.args, record.exc_info, record.orig_func) record.origin = "" logger = logging.getLogger(record.name) if not logger.handlers: logger.addHandler(logging.NullHandler()) counts = self.__thread_local_ctx.logger_handle_counts if logger.name in counts: counts[logger.name] += 1 else: counts[logger.name] = 1 if self._search_stack(): return try: if counts[logger.name] > 1: return logger.handle(record) finally: counts[logger.name] -= 1 def _search_stack(self): curr_frame = sys._getframe(3) ##curr_frame = inspect.currentframe(3) while curr_frame: if 'self' in curr_frame.f_locals: s = curr_frame.f_locals['self'] if (isinstance(s, logging.Logger) and not isinstance(s, StreamTeeLogger)): return True curr_frame = curr_frame.f_back return False if sys.version_info[:2] < (2, 7): # We need to backport logging.captureWarnings import warnings PY26 = sys.version_info[:2] >= (2, 6) logging._warnings_showwarning = None class NullHandler(logging.Handler): """ This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package. """ def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None logging.NullHandler = NullHandler def _showwarning(message, category, filename, lineno, file=None, line=None): """ Implementation of showwarnings which redirects to logging, which will first check to see if the file parameter is None. If a file is specified, it will delegate to the original warnings implementation of showwarning. Otherwise, it will call warnings.formatwarning and will log the resulting string to a warnings logger named "py.warnings" with level logging.WARNING. """ if file is not None: if logging._warnings_showwarning is not None: if PY26: _warnings_showwarning(message, category, filename, lineno, file, line) else: # Python 2.5 and below don't support the line argument _warnings_showwarning(message, category, filename, lineno, file) else: if PY26: s = warnings.formatwarning(message, category, filename, lineno, line) else: s = warnings.formatwarning(message, category, filename, lineno) logger = logging.getLogger("py.warnings") if not logger.handlers: logger.addHandler(NullHandler()) logger.warning("%s", s) logging._showwarning = _showwarning del _showwarning def captureWarnings(capture): """ If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations. """ if capture: if logging._warnings_showwarning is None: logging._warnings_showwarning = warnings.showwarning warnings.showwarning = logging._showwarning else: if logging._warnings_showwarning is not None: warnings.showwarning = logging._warnings_showwarning logging._warnings_showwarning = None logging.captureWarnings = captureWarnings del captureWarnings stsci.tools-3.4.12/lib/stsci/tools/minmatch.py0000644001120100020070000002510713006721301022770 0ustar jhunkSTSCI\science00000000000000"""minmatch.py: Dictionary allowing minimum-match of string keys Entries can be retrieved using an abbreviated key as long as the key is unambiguous. __getitem__ and get() raise an error if the key is ambiguous. A key is not consider ambiguous if it matches a full key, even if it also is an abbreviation for a longer key. E.g., if there are keys 'spam' and 'spameggs' in the dictionary, d['spam'] returns the value associated with 'spam', while d['spa'] is an error due to ambiguity. New key/value pairs must be inserted using the add() method to avoid ambiguities with when to overwrite and when to add a new key. Assignments using setitem (e.g. d[key] = value) will raise an exception unless the key already exists and is unambiguous. The getall(key) method returns a list of all the matching values, containing a single entry for unambiguous matches and multiple entries for ambiguous matches. $Id$ R. White, 2000 January 28 """ from __future__ import division, print_function # confidence high import sys, copy PY3K = sys.version_info[0] > 2 if PY3K: string_types = str else: string_types = basestring # Need to import UserDict - 3.x has it in collections, 2.x has it in UserDict, # and the 2to3 tool doesn't fix this for us; the following should handle it all try: from collections import UserDict # try for 3.x except ImportError: from UserDict import UserDict # must be in 2.x class AmbiguousKeyError(KeyError): pass class MinMatchDict(UserDict): def __init__(self,indict=None,minkeylength=1): self.data = {} # use lazy instantiation for min-match dictionary # it may never be created if full keys are always used self.mmkeys = None if minkeylength<1: minkeylength = 1 self.minkeylength = minkeylength if indict is not None: add = self.add for key in indict.keys(): add(key, indict[key]) def __deepcopy__(self, memo=None): """Deep copy of dictionary""" # this is about twice as fast as the default implementation return self.__class__(copy.deepcopy(self.data,memo), self.minkeylength) def __getinitargs__(self): """Return __init__ args for pickle""" return (self.data, self.minkeylength) def _mmInit(self): """Create the minimum match dictionary of keys""" # cache references to speed up loop a bit mmkeys = {} mmkeysGet = mmkeys.setdefault minkeylength = self.minkeylength for key in self.data.keys(): # add abbreviations as short as minkeylength # always add at least one entry (even for key="") lenkey = len(key) start = min(minkeylength,lenkey) for i in range(start,lenkey+1): mmkeysGet(key[0:i],[]).append(key) self.mmkeys = mmkeys def getfullkey(self, key, new=0): # check for exact match first # ambiguous key is ok if there is exact match if key in self.data: return key if not isinstance(key, string_types): raise KeyError("MinMatchDict keys must be strings") # no exact match, so look for unique minimum match if self.mmkeys is None: self._mmInit() keylist = self.mmkeys.get(key) if keylist is None: # no such key -- ok only if new flag is set if new: return key raise KeyError("Key "+key+" not found") elif len(keylist) == 1: # unambiguous key return keylist[0] else: return self.resolve(key,keylist) def resolve(self, key, keylist): """Hook to resolve ambiguities in selected keys""" raise AmbiguousKeyError("Ambiguous key "+ repr(key) + ", could be any of " + str(sorted(keylist))) def add(self, key, item): """Add a new key/item pair to the dictionary. Resets an existing key value only if this is an exact match to a known key.""" mmkeys = self.mmkeys if mmkeys is not None and not (key in self.data): # add abbreviations as short as minkeylength # always add at least one entry (even for key="") lenkey = len(key) start = min(self.minkeylength,lenkey) # cache references to speed up loop a bit mmkeysGet = mmkeys.setdefault for i in range(start,lenkey+1): mmkeysGet(key[0:i],[]).append(key) self.data[key] = item def __setitem__(self, key, item): """Set value of existing key/item in dictionary""" try: key = self.getfullkey(key) self.data[key] = item except KeyError as e: raise e.__class__(str(e) + "\nUse add() method to add new items") def __getitem__(self, key): try: # try the common case that the exact key is given first return self.data[key] except KeyError: return self.data[self.getfullkey(key)] def get(self, key, failobj=None, exact=0): """Raises exception if key is ambiguous""" if not exact: key = self.getfullkey(key,new=1) return self.data.get(key,failobj) def get_exact_key(self, key, failobj=None): """Returns failobj if key does not match exactly""" return self.data.get(key,failobj) def __delitem__(self, key): key = self.getfullkey(key) del self.data[key] if self.mmkeys is not None: start = min(self.minkeylength,len(key)) for i in range(start,len(key)+1): s = key[0:i] value = self.mmkeys.get(s) value.remove(key) if not value: # delete entry from mmkeys if that was last value del self.mmkeys[s] def clear(self): self.mmkeys = None self.data.clear() def __contains__(self, key): """For the "in" operator. Raise an exception if key is ambiguous""" return self._has(key) def has_key(self, key, exact=0): return self._has(key, exact) def _has(self, key, exact=0): """Raises an exception if key is ambiguous""" if not exact: key = self.getfullkey(key,new=1) return key in self.data def has_exact_key(self, key): """Returns true if there is an exact match for this key""" return key in self.data def update(self, other): # check for missing attrs (needed in python 2.7) if not hasattr(self, 'data'): self.data = {} if not hasattr(self, 'mmkeys'): self.mmkeys = None if not hasattr(self, 'minkeylength'): self.minkeylength = other.minkeylength # now do the update from 'other' if type(other) is type(self.data): for key in other.keys(): self.add(key,other[key]) else: for key, value in other.items(): self.add(key,value) def getall(self, key, failobj=None): """Returns a list of all the matching values for key, containing a single entry for unambiguous matches and multiple entries for ambiguous matches.""" if self.mmkeys is None: self._mmInit() k = self.mmkeys.get(key) if not k: return failobj return list(map(self.data.get, k)) def getallkeys(self, key, failobj=None): """Returns a list of the full key names (not the items) for all the matching values for key. The list will contain a single entry for unambiguous matches and multiple entries for ambiguous matches.""" if self.mmkeys is None: self._mmInit() return self.mmkeys.get(key, failobj) class QuietMinMatchDict(MinMatchDict): """Minimum match dictionary that does not raise unexpected AmbiguousKeyError Unlike MinMatchDict, if key is ambiguous then both get() and has_key() methods return false (just as if there is no match). For most uses this is probably not the preferred behavior (use MinMatchDict instead), but for applications that rely on the usual dictionary behavior where .get() and .has_key() do not raise exceptions, this is useful. """ def get(self, key, failobj=None, exact=0): """Returns failobj if key is not found or is ambiguous""" if not exact: try: key = self.getfullkey(key) except KeyError: return failobj return self.data.get(key,failobj) def _has(self, key, exact=0): """Returns false if key is not found or is ambiguous""" if not exact: try: key = self.getfullkey(key) return 1 except KeyError: return 0 else: return key in self.data # some simple tests def test(): d = MinMatchDict() print("a few d.add() calls") d.add('test',1) d.add('text',2) d.add('ten',10) print("d.items()", sorted(d.items())) print("d['tex']=", d['tex']) print("Changing d['tes'] to 3") d['tes'] = 3 print("d.items()", sorted(d.items())) try: print("Ambiguous assignment to d['te'] - expecting exception") d['te'] = 5 except AmbiguousKeyError as e: print(str(e)) print('---') print("d.get('tes')", d.get('tes')) print("d.get('teq'), expect None: ", d.get('teq')) print("d.getall('t')", sorted(d.getall('t'))) try: print("d.get('t') - expecting exception") d.get('t') except AmbiguousKeyError as e: print(str(e)) print('---') print("d.add('tesseract',100)") d.add('tesseract',100) print("d.items()", sorted(d.items())) try: print("d.get('tes') - expecting exception") d.get('tes') except AmbiguousKeyError as e: print(str(e)) print('---') try: print("del d['tes'] - expecting exception") del d['tes'] except AmbiguousKeyError as e: print(str(e)) print('---') print("del d['tess']") del d['tess'] print("d.items()", sorted(d.items())) print("d.get('tes')", d.get('tes')) print("d.has_key('tes'):", 'tes' in d) print("d.has_key('tes', exact=True):", d.has_key('tes', exact=True)) print("'tes' in d:", 'tes' in d) print("d.clear()") d.clear() print("d.items()", sorted(d.items())) print("d.update({'ab': 0, 'cd': 1, 'ce': 2})") d.update({'ab': 0, 'cd': 1, 'ce': 2}) print("d.items()", sorted(d.items())) print("d['a']", d['a']) try: print("d['t'] - expecting exception") d['t'] except KeyError as e: print(str(e)) print('---') if __name__ == "__main__": test() stsci.tools-3.4.12/lib/stsci/tools/mputil.py0000755001120100020070000001752013112074217022512 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python # from __future__ import print_function import math, time class WatchedProcess(object): """ MINIMAL wrapper around multiprocessing.Process so we can more easily track/time them. """ def __init__(self, proc): self.process = proc self.state = 0 # 0=not-yet-started; 1=started; 2=finished-or-terminated self._start_time = None def start_process(self): assert self.state == 0, "Already started: "+str(self.process) self._start_time = time.time() self.process.start() self.state = 1 def join_process(self): assert self.state >= 1, "Not started: "+str(self.process) self.process.join() self.state = 2 def time_since_started(self): assert self.state > 0, "Not yet started: "+str(self.process) return time.time() - self._start_time def __repr__(self): return "WatchedProcess for: "+str(self.process)+', state='+str(self.state) def launch_and_wait(mp_proc_list, pool_size): """ Given a list of multiprocessing.Process objects which have not yet been started, this function launches them and blocks until the last finishes. This makes sure that only processes are ever working at any one time (this number does not include the main process which called this function, since that will not tax the CPU). The idea here is roughly analogous to multiprocessing.Pool with the exceptions that: 1 - The caller will get to use the multiprocessing.Process model of using shared memory (inheritance) to pass arg data to the child, 2 - maxtasksperchild is always 1, 3 - no function return value is kept/tranferred (not yet implemented) """ # Sanity check if len(mp_proc_list) < 1: return # Create or own list with easy state watching procs = [] for p in mp_proc_list: procs.append(WatchedProcess(p)) num_total = len(procs) # Launch all of them, but only so pool_size are running at any time keep_going = True while (keep_going): # Before we start any more, find out how many are running. First go # through the list of those started and see if alive. Update state. for p in procs: if p.state == 1: # been started if not p.process.is_alive(): p.state = 2 # process has finished or been terminated assert p.process.exitcode is not None, \ "Process is not alive but has no exitcode? "+ \ str(p.process) # now figure num_running num_running = len([p for p in procs if p.state == 1]) # Start some. Only as many as pool_size should ever be running. num_avail_cpus = pool_size - num_running num_to_start = len([p for p in procs if p.state == 0]) if num_to_start < 1: # all have been started, can finally leave loop and go wait break if num_avail_cpus > 0 and num_to_start > 0: num_to_start_now = min(num_avail_cpus, num_to_start) started_now = 0 for p in procs: if started_now < num_to_start_now and p.state == 0: p.start_process() # debug "launch_and_wait: started: "+str(p.process) started_now += 1 # else: otherwise, all cpus are in use, just wait ... # sleep to tame loop activity, but also must sleep a bit after each # start call so that the call to is_alive() woorks correctly time.sleep(1) # Out of the launching loop, can now wait on all procs left. for p in procs: p.join_process() # Check all exit codes before returning for p in procs: if 0 != p.process.exitcode: raise RuntimeError("Problem during: "+str(p.process.name)+ \ ', exitcode: '+str(p.process.exitcode)+'. Check log.') # all is well, can return def takes_time(x): """ Example function which takes some time to run - just here for testing. """ import numpy START = time.time() s = numpy.float64(1) #s = numpy.float32(1) #s = 1.0 assert x not in (3, 7,9), "Simulate some errors" for i in range(10000000): s = (s + x) * s % 2399232 elap = time.time() - START print(('Done "takes_time" x='+str(x)+': s = '+str(s)+', elapsed time = %.2f s' % elap)) def do_main(): """ Illustrate use of launch_and_wait """ # load em up import multiprocessing p = None subprocs = [] for item in [2,3,4,5,6,7,8,9]: print(("mputil: instantiating Process for x = "+str(item))) p = multiprocessing.Process(target=takes_time, args=(item,), name='takes_time()') subprocs.append(p) # launch em, pool-fashion launch_and_wait(subprocs, 3) # by now, all should be finished print("All subprocs should be finished and joined.") def best_tile_layout(pool_size): """ Determine and return the best layout of "tiles" for fastest overall parallel processing of a rectangular image broken up into N smaller equally-sized rectangular tiles, given as input the number of processes/chunks which can be run/worked at the same time (pool_size). This attempts to return a layout whose total number of tiles is as close as possible to pool_size, without going over (and thus not really taking advantage of pooling). Since we can vary the size of the rectangles, there is not much (any?) benefit to pooling. Returns a tuple of ( , ) This assumes the image in question is relatively close to square, and so the returned tuple attempts to give a layout which is as squarishly-blocked as possible, except in cases where speed would be sacrificed. EXAMPLES: For pool_size of 4, the best result is 2x2. For pool_size of 6, the best result is 2x3. For pool_size of 5, a result of 1x5 is better than a result of 2x2 (which would leave one core unused), and 1x5 is also better than a result of 2x3 (which would require one core to work twice while all others wait). For higher, odd pool_size values (say 39), it is deemed best to sacrifice a few unused cores to satisfy our other constraints, and thus the result of 6x6 is best (giving 36 tiles and 3 unused cores). """ # Easy answer sanity-checks if pool_size < 2: return (1, 1) # Next, use a small mapping of hard-coded results. While we agree # that many of these are unlikely pool_size values, they are easy # to accomodate. mapping = { 0:(1,1), 1:(1,1), 2:(1,2), 3:(1,3), 4:(2,2), 5:(1,5), 6:(2,3), 7:(2,3), 8:(2,4), 9:(3,3), 10:(2,5), 11:(2,5), 14:(2,7), 18:(3,6), 19:(3,6), 28:(4,7), 29:(4,7), 32:(4,8), 33:(4,8), 34:(4,8), 40:(4,10), 41:(4,10) } if pool_size in mapping: return mapping[pool_size] # Next, take a guess using the square root and (for the sake of # simplicity), go with it. We *could* get much fancier here... # Use floor-rounding (not ceil) so that the total number of resulting # tiles is <= pool_size. xnum = int(math.sqrt(pool_size)) ynum = int((1.*pool_size)/xnum) return (xnum, ynum) def test_best_tile_layout(): """ Loop though some numbers and make sure we get expected results. """ for i in range(257): x,y = best_tile_layout(i) assert (x*y <= i) or (i == 0), "Total num resulting tiles > pool_size" unused_cores = i - (x*y) print(i, (x,y), unused_cores) if i < 10: assert unused_cores <= 1, "Too many idle cores at i = "+str(i) else: percent_unused = 100.*((unused_cores*1.)/i) assert percent_unused < 14., "Too many idles cores at i: "+str(i) if __name__=='__main__': do_main() stsci.tools-3.4.12/lib/stsci/tools/nimageiter.py0000644001120100020070000001705113112074217023320 0ustar jhunkSTSCI\science00000000000000""" License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE """ from __future__ import division # confidence medium from __future__ import generators import types import numpy as N BUFSIZE = 1024*1000 # 1Mb cache size __version__ = '0.7' __vdate__ = '25-July-2012' def ImageIter(imglist,bufsize=BUFSIZE,overlap=0,copy=0,updateSection = None): """ Return image section for each image listed on input. The inputs can either be a single image or a list of them, with the return value matching the input type. All images in a list MUST have the same shape, though, in order for the iterator to scroll through them properly. The size of section gets defined by 'bufsize', while 'copy' specifies whether to return an explicit copy of each input section or simply return views. The 'overlap' parameter provides a way of scrolling through the image with this many rows of overlap, with the default being no overlap at all. """ if type(imglist) != list: imgarr = imglist.data imgarr = N.asarray(imgarr) _imglen = 1 single = 1 else: imgarr = imglist[0].data imgarr = N.asarray(imgarr) _imglen = len(imglist) single = 0 _outlist = [] _numrows = imgarr.shape[0] if len(imgarr.shape) == 1: if copy: if single: yield imgarr.copy(),None else: for img in imglist: _outlist.append(img.copy()) else: yield imglist,None else: nrows = computeBuffRows(imgarr,bufsize=bufsize) # niter = int(imgarr.shape[0] / nrows) * nrows nbuff,nrows = computeNumberBuff(imgarr.shape[0],nrows,overlap) niter = nbuff*nrows if copy: # Create a cache that will contain a copy of the input # not just a view... if single: _cache = N.zeros((nrows,imgarr.shape[1]),dtype=imgarr.dtype) else: for img in imglist: _outlist.append(N.zeros((nrows,imgarr.shape[1]),dtype=imgarr.dtype)) for pix in range(0,niter+1,nrows): # overlap needs to be computed here # This allows the user to avoid edge effects when # convolving the returned image sections, and insures # that the last segment will always be returned with # overlap+1 rows. _prange = pix+nrows+overlap if _prange > _numrows: _prange = _numrows if pix == _prange: break if copy: if single: _cache = imgarr[pix:_prange].copy() yield _cache,(pix,_prange) N.multiply(_cache,0.,_cache) else: for img in range(len(imglist)): _outlist[img] = imglist[img][pix:_prange].copy() yield _outlist,(pix,_prange) for img in range(len(imglist)): N.multiply(_outlist[img],0.,_outlist[img]) else: if single: #yield imgarr.section[pix:_prange,:],(pix,_prange) yield imgarr[pix:_prange],(pix,_prange) else: for hdu in imglist: #_outlist.append(imglist[img][pix:pix+nrows]) _outlist.append(hdu.section[pix:_prange,:]) yield _outlist,(pix,_prange) # This code is inserted to copy any values changed # in the image sections back into the original image. if updateSection is not None: #for _index in xrange(len(_outlist)): imglist[updateSection][pix:_prange] = _outlist[updateSection] del _outlist _outlist = [] def computeBuffRows(imgarr,bufsize=BUFSIZE): """ Function to compute the number of rows from the input array that fits in the allocated memory given by the bufsize. """ imgarr = N.asarray(imgarr) buffrows = int(bufsize / (imgarr.itemsize * imgarr.shape[1])) return buffrows def computeNumberBuff(numrows, buffrows, overlap): """ Function to compute the number of buffer sections that will be used to read the input image given the specified overlap. """ nbuff = _computeNbuff(numrows, buffrows, overlap) niter = 1 + int(nbuff) totalrows = niter * buffrows # We need to account for the case where the number of # iterations ends up being greater than needed due to the # overlap. #if totalrows > numrows: niter -= 1 lastbuff = numrows - (niter*(buffrows-overlap)) if lastbuff < overlap+1 and nbuff > 1: good = False while not good: if buffrows > overlap+1: buffrows -= 1 nbuff = _computeNbuff(numrows, buffrows, overlap) niter = 1 + int(nbuff) totalrows = niter * (buffrows - overlap) lastbuff = numrows - (niter*(buffrows-overlap)) if lastbuff > overlap + 1: good = True else: good = True return niter,buffrows def _computeNbuff(numrows,buffrows,overlap): if buffrows > numrows: nbuff = 1 else: overlaprows = buffrows - overlap rowratio = (numrows - overlaprows)/(1.0*buffrows) nbuff = (numrows - overlaprows+1)/(1.0*overlaprows) return nbuff def FileIter(filelist,bufsize=BUFSIZE,overlap=0): """ Return image section for each image listed on input, with the object performing the file I/O upon each call to the iterator. The inputs can either be a single image or a list of them, with the return value matching the input type. All images in a list MUST have the same shape, though, in order for the iterator to scroll through them properly. The size of section gets defined by 'bufsize'. The 'overlap' parameter provides a way of scrolling through the image with this many rows of overlap, with the default being no overlap at all. """ if type(filelist) != list: imgarr = filelist.data imgarr = N.asarray(imgarr) _imglen = 1 single = 1 else: imgarr = filelist[0].data imgarr = N.asarray(imgarr) _imglen = len(filelist) single = 0 _outlist = [] _numrows = imgarr.shape[0] if len(imgarr.shape) == 1: # This needs to be generalized to return pixel ranges # based on bufsize, just like with 2-D arrays (images). yield filelist,None else: nrows = computeBuffRows(imgarr,bufsize=bufsize) # niter = int(imgarr.shape[0] / nrows) * nrows nbuff,nrows = computeNumberBuff(imgarr.shape[0],nrows,overlap) niter = nbuff * nrows for pix in range(0,niter+1,nrows-overlap): # overlap needs to be computed here # This allows the user to avoid edge effects when # convolving the returned image sections, and insures # that the last segment will always be returned with # overlap+1 rows. _prange = pix+nrows if _prange > _numrows: _prange = _numrows if pix >= _prange: break if single: yield imgarr[pix:_prange],(pix,_prange) else: for hdu in filelist: _outlist.append(hdu[pix:_prange]) yield _outlist,(pix,_prange) del _outlist _outlist = [] stsci.tools-3.4.12/lib/stsci/tools/nmpfit.py0000644001120100020070000027506213112074217022501 0ustar jhunkSTSCI\science00000000000000""" Python/Numeric version of this module was called mpfit. This version was modified to use numpy. """ from __future__ import division, print_function # confidence medium __version__ = '0.2' """ Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1. AUTHORS The original version of this software, called LMFIT, was written in FORTRAN as part of the MINPACK-1 package by XXX. Craig Markwardt converted the FORTRAN code to IDL. The information for the IDL version is: Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770 craigm@lheamail.gsfc.nasa.gov UPDATED VERSIONs can be found on my WEB PAGE: http://cow.physics.wisc.edu/~craigm/idl/idl.html Mark Rivers created this Python version from Craig's IDL version. Mark Rivers, University of Chicago Building 434A, Argonne National Laboratory 9700 South Cass Avenue, Argonne, IL 60439 rivers@cars.uchicago.edu Updated versions can be found at http://cars.uchicago.edu/software DESCRIPTION MPFIT uses the Levenberg-Marquardt technique to solve the least-squares problem. In its typical use, MPFIT will be used to fit a user-supplied function (the "model") to user-supplied data points (the "data") by adjusting a set of parameters. MPFIT is based upon MINPACK-1 (LMDIF.F) by More' and collaborators. For example, a researcher may think that a set of observed data points is best modelled with a Gaussian curve. A Gaussian curve is parameterized by its mean, standard deviation and normalization. MPFIT will, within certain constraints, find the set of parameters which best fits the data. The fit is "best" in the least-squares sense; that is, the sum of the weighted squared differences between the model and data is minimized. The Levenberg-Marquardt technique is a particular strategy for iteratively searching for the best fit. This particular implementation is drawn from MINPACK-1 (see NETLIB), and is much faster and more accurate than the version provided in the Scientific Python package in Scientific.Functions.LeastSquares. This version allows upper and lower bounding constraints to be placed on each parameter, or the parameter can be held fixed. The user-supplied Python function should return an array of weighted deviations between model and data. In a typical scientific problem the residuals should be weighted so that each deviate has a gaussian sigma of 1.0. If X represents values of the independent variable, Y represents a measurement for each value of X, and ERR represents the error in the measurements, then the deviates could be calculated as follows: DEVIATES = (Y - F(X)) / ERR where F is the analytical function representing the model. You are recommended to use the convenience functions MPFITFUN and MPFITEXPR, which are driver functions that calculate the deviates for you. If ERR are the 1-sigma uncertainties in Y, then TOTAL( DEVIATES^2 ) will be the total chi-squared value. MPFIT will minimize the chi-square value. The values of X, Y and ERR are passed through MPFIT to the user-supplied function via the FUNCTKW keyword. Simple constraints can be placed on parameter values by using the PARINFO keyword to MPFIT. See below for a description of this keyword. MPFIT does not perform more general optimization tasks. See TNMIN instead. MPFIT is customized, based on MINPACK-1, to the least-squares minimization problem. USER FUNCTION The user must define a function which returns the appropriate values as specified above. The function should return the weighted deviations between the model and the data. It should also return a status flag and an optional partial derivative array. For applications which use finite-difference derivatives -- the default -- the user function should be declared in the following way: def myfunct(p, fjac=None, x=None, y=None, err=None) # Parameter values are passed in "p" # If fjac is None then partial derivatives should not # computed. It will always be None if MPFIT is called with default # flag. model = F(x, p) # Non-negative status value means MPFIT should continue, negative means # stop the status = 0 return([status, (y-model)/err] See below for applications with analytical derivatives. The keyword parameters X, Y, and ERR in the example above are suggestive but not required. Any parameters can be passed to MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and MPFITEXPR if you need ideas on how to do that. The function *must* accept a parameter list, P. In general there are no restrictions on the number of dimensions in X, Y or ERR. However the deviates *must* be returned in a one-dimensional Numeric array of type Float. User functions may also indicate a fatal error condition using the status return described above. If status is set to a number between -15 and -1 then MPFIT will stop the calculation and return to the caller. ANALYTIC DERIVATIVES In the search for the best-fit solution, MPFIT by default calculates derivatives numerically via a finite difference approximation. The user-supplied function need not calculate the derivatives explicitly. However, if you desire to compute them analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT. As a practical matter, it is often sufficient and even faster to allow MPFIT to calculate the derivatives numerically, and so AUTODERIVATIVE=0 is not necessary. If AUTODERIVATIVE=0 is used then the user function must check the parameter FJAC, and if FJAC is not None then return the partial derivative array in the return list. def myfunct(p, fjac=None, x=None, y=None, err=None) # Parameter values are passed in "p" # If FJAC is not None then partial derivatives must be comptuer. # FJAC contains an array of len(p), where each entry # is 1 if that parameter is free and 0 if it is fixed. model = F(x, p) Non-negative status value means MPFIT should continue, negative means # stop the calculation. status = 0 if (dojac): pderiv = Numeric.zeros([len(x), len(p)], Numeric.Float) for j in range(len(p)): pderiv[:,j] = FGRAD(x, p, j) else: pderiv = None return([status, (y-model)/err, pderiv] where FGRAD(x, p, i) is a user function which must compute the derivative of the model with respect to parameter P[i] at X. When finite differencing is used for computing derivatives (ie, when AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the derivatives the parameter FJAC=None. Derivatives should be returned in the PDERIV array. PDERIV should be an m x n array, where m is the number of data points and n is the number of parameters. dp[i,j] is the derivative at the ith point with respect to the jth parameter. The derivatives with respect to fixed parameters are ignored; zero is an appropriate value to insert for those derivatives. Upon input to the user function, FJAC is set to a vector with the same length as P, with a value of 1 for a parameter which is free, and a value of zero for a parameter which is fixed (and hence no derivative needs to be calculated). If the data is higher than one dimensional, then the *last* dimension should be the parameter dimension. Example: fitting a 50x50 image, "dp" should be 50x50xNPAR. CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD The behavior of MPFIT can be modified with respect to each parameter to be fitted. A parameter value can be fixed; simple boundary constraints can be imposed; limitations on the parameter changes can be imposed; properties of the automatic derivative can be modified; and parameters can be tied to one another. These properties are governed by the PARINFO structure, which is passed as a keyword parameter to MPFIT. PARINFO should be a list of dictionaries, one list entry for each parameter. Each parameter is associated with one element of the array, in numerical order. The dictionary can have the following keys (none are required, keys are case insensitive): 'value' - the starting parameter value (but see the START_PARAMS parameter for more information). 'fixed' - a boolean value, whether the parameter is to be held fixed or not. Fixed parameters are not varied by MPFIT, but are passed on to MYFUNCT for evaluation. 'limited' - a two-element boolean array. If the first/second element is set, then the parameter is bounded on the lower/upper side. A parameter can be bounded on both sides. Both LIMITED and LIMITS must be given together. 'limits' - a two-element float array. Gives the parameter limits on the lower and upper sides, respectively. Zero, one or two of these values can be set, depending on the values of LIMITED. Both LIMITED and LIMITS must be given together. 'parname' - a string, giving the name of the parameter. The fitting code of MPFIT does not use this tag in any way. However, the default iterfunct will print the parameter name if available. 'step' - the step size to be used in calculating the numerical derivatives. If set to zero, then the step size is computed automatically. Ignored when AUTODERIVATIVE=0. 'mpside' - the sidedness of the finite difference when computing numerical derivatives. This field can take four values: 0 - one-sided derivative computed automatically 1 - one-sided derivative (f(x+h) - f(x) )/h -1 - one-sided derivative (f(x) - f(x-h))/h 2 - two-sided derivative (f(x+h) - f(x-h))/(2*h) Where H is the STEP parameter described above. The "automatic" one-sided derivative method will chose a direction for the finite difference which does not violate any constraints. The other methods do not perform this check. The two-sided method is in principle more precise, but requires twice as many function evaluations. Default: 0. 'mpmaxstep' - the maximum change to be made in the parameter value. During the fitting process, the parameter will never be changed by more than this value in one iteration. A value of 0 indicates no maximum. Default: 0. 'tied' - a string expression which "ties" the parameter to other free or fixed parameters. Any expression involving constants and the parameter array P are permitted. Example: if parameter 2 is always to be twice parameter 1 then use the following: parinfo(2).tied = '2 * p(1)'. Since they are totally constrained, tied parameters are considered to be fixed; no errors are computed for them. [ NOTE: the PARNAME can't be used in expressions. ] 'mpprint' - if set to 1, then the default iterfunct will print the parameter value. If set to 0, the parameter value will not be printed. This tag can be used to selectively print only a few parameter values out of many. Default: 1 (all parameters printed) Future modifications to the PARINFO structure, if any, will involve adding dictionary tags beginning with the two letters "MP". Therefore programmers are urged to avoid using tags starting with the same letters; otherwise they are free to include their own fields within the PARINFO structure, and they will be ignored. PARINFO Example: parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}]*5 parinfo[0]['fixed'] = 1 parinfo[4]['limited'][0] = 1 parinfo[4]['limits'][0] = 50. values = [5.7, 2.2, 500., 1.5, 2000.] for i in range(5): parinfo[i]['value']=values[i] A total of 5 parameters, with starting values of 5.7, 2.2, 500, 1.5, and 2000 are given. The first parameter is fixed at a value of 5.7, and the last parameter is constrained to be above 50. EXAMPLE import mpfit import Numeric x = Numeric.arange(100, Numeric.float) p0 = [5.7, 2.2, 500., 1.5, 2000.] y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*Numeric.sqrt(x) + p[4]*Numeric.log(x)) fa = {'x':x, 'y':y, 'err':err} m = mpfit('myfunct', p0, functkw=fa) print 'status = ', m.status if (m.status <= 0): print 'error message = ', m.errmsg print 'parameters = ', m.params Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X, Y, and ERR keyword parameters that are given by FUNCTKW. The results can be obtained from the returned object m. THEORY OF OPERATION There are many specific strategies for function minimization. One very popular technique is to use function gradient information to realize the local structure of the function. Near a local minimum the function value can be taylor expanded about x0 as follows: f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0) ----- --------------- ------------------------------- (1) Order 0th 1st 2nd Here f'(x) is the gradient vector of f at x, and f''(x) is the Hessian matrix of second derivatives of f at x. The vector x is the set of function parameters, not the measured data vector. One can find the minimum of f, f(xm) using Newton's method, and arrives at the following linear equation: f''(x0) . (xm-x0) = - f'(x0) (2) If an inverse can be found for f''(x0) then one can solve for (xm-x0), the step vector from the current position x0 to the new projected minimum. Here the problem has been linearized (ie, the gradient information is known to first order). f''(x0) is symmetric n x n matrix, and should be positive definite. The Levenberg - Marquardt technique is a variation on this theme. It adds an additional diagonal term to the equation which may aid the convergence properties: (f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a) where I is the identity matrix. When nu is large, the overall matrix is diagonally dominant, and the iterations follow steepest descent. When nu is small, the iterations are quadratically convergent. In principle, if f''(x0) and f'(x0) are known then xm-x0 can be determined. However the Hessian matrix is often difficult or impossible to compute. The gradient f'(x0) may be easier to compute, if even by finite difference techniques. So-called quasi-Newton techniques attempt to successively estimate f''(x0) by building up gradient information as the iterations proceed. In the least squares problem there are further simplifications which assist in solving eqn (2). The function to be minimized is a sum of squares: f = Sum(hi^2) (3) where hi is the ith residual out of m residuals as described above. This can be substituted back into eqn (2) after computing the derivatives: f' = 2 Sum(hi hi') f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4) If one assumes that the parameters are already close enough to a minimum, then one typically finds that the second term in f'' is negligible [or, in any case, is too difficult to compute]. Thus, equation (2) can be solved, at least approximately, using only gradient information. In matrix notation, the combination of eqns (2) and (4) becomes: hT' . h' . dx = - hT' . h (5) Where h is the residual vector (length m), hT is its transpose, h' is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The user function supplies the residual vector h, and in some cases h' when it is not found by finite differences (see MPFIT_FDJAC2, which finds h and hT'). Even if dx is not the best absolute step to take, it does provide a good estimate of the best *direction*, so often a line minimization will occur along the dx vector direction. The method of solution employed by MINPACK is to form the Q . R factorization of h', where Q is an orthogonal matrix such that QT . Q = I, and R is upper right triangular. Using h' = Q . R and the ortogonality of Q, eqn (5) becomes (RT . QT) . (Q . R) . dx = - (RT . QT) . h RT . R . dx = - RT . QT . h (6) R . dx = - QT . h where the last statement follows because R is upper triangular. Here, R, QT and h are known so this is a matter of solving for dx. The routine MPFIT_QRFAC provides the QR factorization of h, with pivoting, and MPFIT_QRSOLV provides the solution for dx. REFERENCES MINPACK-1, Jorge More', available from netlib (www.netlib.org). "Optimization Software Guide," Jorge More' and Stephen Wright, SIAM, *Frontiers in Applied Mathematics*, Number 14. More', Jorge J., "The Levenberg-Marquardt Algorithm: Implementation and Theory," in *Numerical Analysis*, ed. Watson, G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977. MODIFICATION HISTORY Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM Copyright (C) 1997-2002, Craig Markwardt This software is provided as is without any warranty whatsoever. Permission to use, copy, modify, and distribute modified or unmodified copies is granted, provided this copyright and disclaimer are included unchanged. Translated from MPFIT (Craig Markwardt's IDL package) to Python, August, 2002. Mark Rivers """ from . import numerixenv numerixenv.check() import numpy import types # Original FORTRAN documentation # ********** # # subroutine lmdif # # the purpose of lmdif is to minimize the sum of the squares of # m nonlinear functions in n variables by a modification of # the levenberg-marquardt algorithm. the user must provide a # subroutine which calculates the functions. the jacobian is # then calculated by a forward-difference approximation. # # the subroutine statement is # # subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn, # diag,mode,factor,nprint,info,nfev,fjac, # ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4) # # where # # fcn is the name of the user-supplied subroutine which # calculates the functions. fcn must be declared # in an external statement in the user calling # program, and should be written as follows. # # subroutine fcn(m,n,x,fvec,iflag) # integer m,n,iflag # double precision x(n),fvec(m) # ---------- # calculate the functions at x and # return this vector in fvec. # ---------- # return # end # # the value of iflag should not be changed by fcn unless # the user wants to terminate execution of lmdif. # in this case set iflag to a negative integer. # # m is a positive integer input variable set to the number # of functions. # # n is a positive integer input variable set to the number # of variables. n must not exceed m. # # x is an array of length n. on input x must contain # an initial estimate of the solution vector. on output x # contains the final estimate of the solution vector. # # fvec is an output array of length m which contains # the functions evaluated at the output x. # # ftol is a nonnegative input variable. termination # occurs when both the actual and predicted relative # reductions in the sum of squares are at most ftol. # therefore, ftol measures the relative error desired # in the sum of squares. # # xtol is a nonnegative input variable. termination # occurs when the relative error between two consecutive # iterates is at most xtol. therefore, xtol measures the # relative error desired in the approximate solution. # # gtol is a nonnegative input variable. termination # occurs when the cosine of the angle between fvec and # any column of the jacobian is at most gtol in absolute # value. therefore, gtol measures the orthogonality # desired between the function vector and the columns # of the jacobian. # # maxfev is a positive integer input variable. termination # occurs when the number of calls to fcn is at least # maxfev by the end of an iteration. # # epsfcn is an input variable used in determining a suitable # step length for the forward-difference approximation. this # approximation assumes that the relative errors in the # functions are of the order of epsfcn. if epsfcn is less # than the machine precision, it is assumed that the relative # errors in the functions are of the order of the machine # precision. # # diag is an array of length n. if mode = 1 (see # below), diag is internally set. if mode = 2, diag # must contain positive entries that serve as # multiplicative scale factors for the variables. # # mode is an integer input variable. if mode = 1, the # variables will be scaled internally. if mode = 2, # the scaling is specified by the input diag. other # values of mode are equivalent to mode = 1. # # factor is a positive input variable used in determining the # initial step bound. this bound is set to the product of # factor and the euclidean norm of diag*x if nonzero, or else # to factor itself. in most cases factor should lie in the # interval (.1,100.). 100. is a generally recommended value. # # nprint is an integer input variable that enables controlled # printing of iterates if it is positive. in this case, # fcn is called with iflag = 0 at the beginning of the first # iteration and every nprint iterations thereafter and # immediately prior to return, with x and fvec available # for printing. if nprint is not positive, no special calls # of fcn with iflag = 0 are made. # # info is an integer output variable. if the user has # terminated execution, info is set to the (negative) # value of iflag. see description of fcn. otherwise, # info is set as follows. # # info = 0 improper input parameters. # # info = 1 both actual and predicted relative reductions # in the sum of squares are at most ftol. # # info = 2 relative error between two consecutive iterates # is at most xtol. # # info = 3 conditions for info = 1 and info = 2 both hold. # # info = 4 the cosine of the angle between fvec and any # column of the jacobian is at most gtol in # absolute value. # # info = 5 number of calls to fcn has reached or # exceeded maxfev. # # info = 6 ftol is too small. no further reduction in # the sum of squares is possible. # # info = 7 xtol is too small. no further improvement in # the approximate solution x is possible. # # info = 8 gtol is too small. fvec is orthogonal to the # columns of the jacobian to machine precision. # # nfev is an integer output variable set to the number of # calls to fcn. # # fjac is an output m by n array. the upper n by n submatrix # of fjac contains an upper triangular matrix r with # diagonal elements of nonincreasing magnitude such that # # t t t # p *(jac *jac)*p = r *r, # # where p is a permutation matrix and jac is the final # calculated jacobian. column j of p is column ipvt(j) # (see below) of the identity matrix. the lower trapezoidal # part of fjac contains information generated during # the computation of r. # # ldfjac is a positive integer input variable not less than m # which specifies the leading dimension of the array fjac. # # ipvt is an integer output array of length n. ipvt # defines a permutation matrix p such that jac*p = q*r, # where jac is the final calculated jacobian, q is # orthogonal (not stored), and r is upper triangular # with diagonal elements of nonincreasing magnitude. # column j of p is column ipvt(j) of the identity matrix. # # qtf is an output array of length n which contains # the first n elements of the vector (q transpose)*fvec. # # wa1, wa2, and wa3 are work arrays of length n. # # wa4 is a work array of length m. # # subprograms called # # user-supplied ...... fcn # # minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac # # fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # # ********** class mpfit: def __init__(self, fcn, xall=None, functkw={}, parinfo=None, ftol=1.e-10, xtol=1.e-10, gtol=1.e-10, damp=0., maxiter=200, factor=100., nprint=1, iterfunct='default', iterkw={}, nocovar=0, fastnorm=0, rescale=0, autoderivative=1, quiet=0, diag=None, epsfcn=None, debug=0): """ Inputs: fcn: The function to be minimized. The function should return the weighted deviations between the model and the data, as described above. xall: An array of starting values for each of the parameters of the model. The number of parameters should be fewer than the number of measurements. This parameter is optional if the parinfo keyword is used (but see parinfo). The parinfo keyword provides a mechanism to fix or constrain individual parameters. Keywords: autoderivative: If this is set, derivatives of the function will be computed automatically via a finite differencing procedure. If not set, then fcn must provide the (analytical) derivatives. Default: set (=1) NOTE: to supply your own analytical derivatives, explicitly pass autoderivative=0 fastnorm: Set this keyword to select a faster algorithm to compute sum-of-square values internally. For systems with large numbers of data points, the standard algorithm can become prohibitively slow because it cannot be vectorized well. By setting this keyword, MPFIT will run faster, but it will be more prone to floating point overflows and underflows. Thus, setting this keyword may sacrifice some stability in the fitting process. Default: clear (=0) ftol: A nonnegative input variable. Termination occurs when both the actual and predicted relative reductions in the sum of squares are at most ftol (and status is accordingly set to 1 or 3). Therefore, ftol measures the relative error desired in the sum of squares. Default: 1E-10 functkw: A dictionary which contains the parameters to be passed to the user-supplied function specified by fcn via the standard Python keyword dictionary mechanism. This is the way you can pass additional data to your user-supplied function without using global variables. Consider the following example: if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.], 'errval':[1.,1.,1.] } then the user supplied function should be declared like this: def myfunct(p, fjac=None, xval=None, yval=None, errval=None): Default: {} No extra parameters are passed to the user-supplied function. gtol: A nonnegative input variable. Termination occurs when the cosine of the angle between fvec and any column of the jacobian is at most gtol in absolute value (and status is accordingly set to 4). Therefore, gtol measures the orthogonality desired between the function vector and the columns of the jacobian. Default: 1e-10 iterkw: The keyword arguments to be passed to iterfunct via the dictionary keyword mechanism. This should be a dictionary and is similar in operation to FUNCTKW. Default: {} No arguments are passed. iterfunct: The name of a function to be called upon each NPRINT iteration of the MPFIT routine. It should be declared in the following way: def iterfunct(myfunct, p, iter, fnorm, functkw=None, parinfo=None, quiet=0, dof=None, [iterkw keywords here]) # perform custom iteration update iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO and QUIET). myfunct: The user-supplied function to be minimized, p: The current set of model parameters iter: The iteration number functkw: The arguments to be passed to myfunct. fnorm: The chi-squared value. quiet: Set when no textual output should be printed. dof: The number of degrees of freedom, normally the number of points less the number of free parameters. See below for documentation of parinfo. In implementation, iterfunct can perform updates to the terminal or graphical user interface, to provide feedback while the fit proceeds. If the fit is to be stopped for any reason, then iterfunct should return a a status value between -15 and -1. Otherwise it should return None (e.g. no return statement) or 0. In principle, iterfunct should probably not modify the parameter values, because it may interfere with the algorithm's stability. In practice it is allowed. Default: an internal routine is used to print the parameter values. Set iterfunct=None if there is no user-defined routine and you don't want the internal default routine be called. maxiter: The maximum number of iterations to perform. If the number is exceeded, then the status value is set to 5 and MPFIT returns. Default: 200 iterations nocovar: Set this keyword to prevent the calculation of the covariance matrix before returning (see COVAR) Default: clear (=0) The covariance matrix is returned nprint: The frequency with which iterfunct is called. A value of 1 indicates that iterfunct is called with every iteration, while 2 indicates every other iteration, etc. Note that several Levenberg-Marquardt attempts can be made in a single iteration. Default value: 1 parinfo Provides a mechanism for more sophisticated constraints to be placed on parameter values. When parinfo is not passed, then it is assumed that all parameters are free and unconstrained. Values in parinfo are never modified during a call to MPFIT. See description above for the structure of PARINFO. Default value: None All parameters are free and unconstrained. quiet: Set this keyword when no textual output should be printed by MPFIT damp: A scalar number, indicating the cut-off value of residuals where "damping" will occur. Residuals with magnitudes greater than this number will be replaced by their hyperbolic tangent. This partially mitigates the so-called large residual problem inherent in least-squares solvers (as for the test problem CURVI, http://www.maxthis.com/curviex.htm). A value of 0 indicates no damping. Default: 0 Note: DAMP doesn't work with autoderivative=0 xtol: A nonnegative input variable. Termination occurs when the relative error between two consecutive iterates is at most xtol (and status is accordingly set to 2 or 3). Therefore, xtol measures the relative error desired in the approximate solution. Default: 1E-10 Outputs: Returns an object of type mpfit. The results are attributes of this class, e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar. .status An integer status code is returned. All values greater than zero can represent success (however .status == 5 may indicate failure to converge). It can have one of the following values: -16 A parameter or function value has become infinite or an undefined number. This is usually a consequence of numerical overflow in the user's model function, which must be avoided. -15 to -1 These are error codes that either MYFUNCT or iterfunct may return to terminate the fitting process. Values from -15 to -1 are reserved for the user functions and will not clash with MPFIT. 0 Improper input parameters. 1 Both actual and predicted relative reductions in the sum of squares are at most ftol. 2 Relative error between two consecutive iterates is at most xtol 3 Conditions for status = 1 and status = 2 both hold. 4 The cosine of the angle between fvec and any column of the jacobian is at most gtol in absolute value. 5 The maximum number of iterations has been reached. 6 ftol is too small. No further reduction in the sum of squares is possible. 7 xtol is too small. No further improvement in the approximate solution x is possible. 8 gtol is too small. fvec is orthogonal to the columns of the jacobian to machine precision. .fnorm The value of the summed squared residuals for the returned parameter values. .covar The covariance matrix for the set of parameters returned by MPFIT. The matrix is NxN where N is the number of parameters. The square root of the diagonal elements gives the formal 1-sigma statistical errors on the parameters if errors were treated "properly" in fcn. Parameter errors are also returned in .perror. To compute the correlation matrix, pcor, use this example: cov = mpfit.covar pcor = cov * 0. for i in range(n): for j in range(n): pcor[i,j] = cov[i,j]/Numeric.sqrt(cov[i,i]*cov[j,j]) If nocovar is set or MPFIT terminated abnormally, then .covar is set to a scalar with value None. .errmsg A string error or warning message is returned. .nfev The number of calls to MYFUNCT performed. .niter The number of iterations completed. .perror The formal 1-sigma errors in each parameter, computed from the covariance matrix. If a parameter is held fixed, or if it touches a boundary, then the error is reported as zero. If the fit is unweighted (i.e. no errors were given, or the weights were uniformly set to unity), then .perror will probably not represent the true parameter uncertainties. *If* you can assume that the true reduced chi-squared value is unity -- meaning that the fit is implicitly assumed to be of good quality -- then the estimated parameter uncertainties can be computed by scaling .perror by the measured chi-squared value. dof = len(x) - len(mpfit.params) # deg of freedom # scaled uncertainties pcerror = mpfit.perror * numpy.sqrt(mpfit.fnorm / dof) """ self.niter = 0 self.params = None self.covar = None self.perror = None self.status = 0 # Invalid input flag set while we check inputs self.debug = debug self.errmsg = '' self.fastnorm = fastnorm self.nfev = 0 self.damp = damp self.machar = machar(double=1) machep = self.machar.machep if fcn is None: self.errmsg = "Usage: parms = mpfit('myfunt', ... )" return if iterfunct == 'default': iterfunct = self.defiter ## Parameter damping doesn't work when user is providing their own ## gradients. if self.damp != 0 and autoderivative == 0: self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive' return ## Parameters can either be stored in parinfo, or x. x takes precedence if it exists if xall is None and parinfo is None: self.errmsg = 'ERROR: must pass parameters in P or PARINFO' return ## Be sure that PARINFO is of the right type if parinfo is not None: if (type(parinfo) != list): self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.' return else: if type(parinfo[0]) != dict: self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.' return if xall is not None and len(xall) != len(parinfo): self.errmsg = 'ERROR: number of elements in PARINFO and P must agree' return ## If the parameters were not specified at the command line, then ## extract them from PARINFO if xall is None: xall = self.parinfo(parinfo, 'value') if xall is None: self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.' return ## Make sure parameters are numpy arrays of type numpy.float #print 'xall', xall, type(xall) xall = numpy.asarray(xall, numpy.float) npar = len(xall) self.fnorm = -1. fnorm1 = -1. ## TIED parameters? ptied = self.parinfo(parinfo, 'tied', default='', n=npar) self.qanytied = 0 for i in range(npar): ptied[i] = ptied[i].strip() if (ptied[i] != ''): self.qanytied = 1 self.ptied = ptied ## FIXED parameters ? pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar) pfixed = (pfixed == 1) for i in range(npar): pfixed[i] = pfixed[i] or (ptied[i] != '') ## Tied parameters are also effectively fixed ## Finite differencing step, absolute and relative, and sidedness of deriv. step = self.parinfo(parinfo, 'step', default=0., n=npar) dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar) dside = self.parinfo(parinfo, 'mpside', default=0, n=npar) ## Maximum and minimum steps allowed to be taken in one iteration maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar) minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar) qmin = minstep * 0 ## Remove minstep for now!! qmax = maxstep != 0 wh = numpy.nonzero(((qmin!=0.) & (qmax!=0.)) & (maxstep < minstep)) #check if it's 1d array? if (len(wh[0]) > 0): self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP' return wh = numpy.nonzero((qmin!=0.) & (qmax!=0.)) qminmax = len(wh[0] > 0) ## Finish up the free parameters ifree = (numpy.nonzero(pfixed != 1))[0] nfree = len(ifree) if nfree == 0: self.errmsg = 'ERROR: no free parameters' return ## Compose only VARYING parameters self.params = xall ## self.params is the set of parameters to be returned x = numpy.take(self.params, ifree) ## x is the set of free parameters ## LIMITED parameters ? limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar) limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar) if limited is not None and limits is not None: ## Error checking on limits in parinfo wh = numpy.nonzero((limited[:,0] & (xall < limits[:,0])) | (limited[:,1] & (xall > limits[:,1]))) if (len(wh[0]) > 0): self.errmsg = 'ERROR: parameters are not within PARINFO limits' return wh = numpy.nonzero((limited[:,0] & limited[:,1]) & (limits[:,0] >= limits[:,1]) & (pfixed == 0)) if (len(wh[0]) > 0): self.errmsg = 'ERROR: PARINFO parameter limits are not consistent' return ## Transfer structure values to local variables qulim = numpy.take(limited[:,1], ifree) ulim = numpy.take(limits [:,1], ifree) qllim = numpy.take(limited[:,0], ifree) llim = numpy.take(limits [:,0], ifree) wh = numpy.nonzero((qulim!=0.) | (qllim!=0.)) if (len(wh[0]) > 0): qanylim = 1 else: qanylim = 0 else: ## Fill in local variables with dummy values qulim = numpy.zeros(nfree, dtype=n.int8) ulim = x * 0. qllim = qulim llim = x * 0. qanylim = 0 n = len(x) ## Check input parameters for errors if ((n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) or (maxiter <= 0) or (factor <= 0)): self.errmsg = 'ERROR: input keywords are inconsistent' return if (rescale != 0): self.errmsg = 'ERROR: DIAG parameter scales are inconsistent' if (len(diag) < n): return wh = (numpy.nonzero(diag <= 0))[0] if (len(wh) > 0): return self.errmsg = '' # Make sure x is a numpy array of type numpy.float x = numpy.asarray(x, numpy.float64) [self.status, fvec] = self.call(fcn, self.params, functkw) if (self.status < 0): self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed' return m = len(fvec) if (m < n): self.errmsg = 'ERROR: number of parameters must not exceed data' return self.fnorm = self.enorm(fvec) ## Initialize Levelberg-Marquardt parameter and iteration counter par = 0. self.niter = 1 qtf = x * 0. self.status = 0 ## Beginning of the outer loop while(1): ## If requested, call fcn to enable printing of iterates numpy.put(self.params, ifree, x) if self.qanytied: self.params = self.tie(self.params, ptied) if nprint > 0 and iterfunct is not None: if (((self.niter-1) % nprint) == 0): mperr = 0 xnew0 = self.params.copy() dof = max(len(fvec) - len(x), 0) status = iterfunct(fcn, self.params, self.niter, self.fnorm**2, functkw=functkw, parinfo=parinfo, quiet=quiet, dof=dof, **iterkw) if status is not None: self.status = status ## Check for user termination if (self.status < 0): self.errmsg = 'WARNING: premature termination by ' + str(iterfunct) return ## If parameters were changed (grrr..) then re-tie if (max(abs(xnew0-self.params)) > 0): if (self.qanytied): self.params = self.tie(self.params, ptied) x = numpy.take(self.params, ifree) ## Calculate the jacobian matrix self.status = 2 catch_msg = 'calling MPFIT_FDJAC2' fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside, epsfcn=epsfcn, autoderivative=autoderivative, dstep=dstep, functkw=functkw, ifree=ifree, xall=self.params) if fjac is None: self.errmsg = 'WARNING: premature termination by FDJAC2' return ## Determine if any of the parameters are pegged at the limits if qanylim: catch_msg = 'zeroing derivatives of pegged parameters' whlpeg = (numpy.nonzero(qllim & (x == llim)))[0] nlpeg = len(whlpeg) whupeg = (numpy.nonzero(qulim & (x == ulim)) )[0] nupeg = len(whupeg) ## See if any "pegged" values should keep their derivatives if nlpeg > 0: ## Total derivative of sum wrt lower pegged parameters for i in range(nlpeg): sum = numpy.sum(fvec * fjac[:,whlpeg[i]]) if (sum > 0): fjac[:,whlpeg[i]] = 0 if nupeg > 0: ## Total derivative of sum wrt upper pegged parameters for i in range(nupeg): sum = numpy.sum(fvec * fjac[:,whupeg[i]]) if (sum < 0): fjac[:,whupeg[i]] = 0 ## Compute the QR factorization of the jacobian [fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1) ## On the first iteration if "diag" is unspecified, scale ## according to the norms of the columns of the initial jacobian catch_msg = 'rescaling diagonal elements' if self.niter == 1: if rescale == 0 or len(diag) < n: diag = wa2.copy() wh = (numpy.nonzero(diag == 0) )[0] numpy.put(diag, wh, 1.) ## On the first iteration, calculate the norm of the scaled x ## and initialize the step bound delta wa3 = diag * x xnorm = self.enorm(wa3) delta = factor*xnorm if (delta == 0.): delta = factor ## Form (q transpose)*fvec and store the first n components in qtf catch_msg = 'forming (q transpose)*fvec' wa4 = fvec.copy() for j in range(n): lj = ipvt[j] temp3 = fjac[j,lj] if (temp3 != 0): fj = fjac[j:,lj] wj = wa4[j:] ## *** optimization wa4(j:*) wa4[j:] = wj - fj * numpy.sum(fj*wj) / temp3 fjac[j,lj] = wa1[j] qtf[j] = wa4[j] ## From this point on, only the square matrix, consisting of the ## triangle of R, is needed. fjac = fjac[0:n, 0:n] fjac.shape = [n, n] temp = fjac.copy() for i in range(n): temp[:,i] = fjac[:, ipvt[i]] fjac = temp.copy() ## Check for overflow. This should be a cheap test here since FJAC ## has been reduced to a (small) square matrix, and the test is ## O(N^2). #wh = where(finite(fjac) EQ 0, ct) #if ct GT 0 then goto, FAIL_OVERFLOW ## Compute the norm of the scaled gradient catch_msg = 'computing the scaled gradient' gnorm = 0. if (self.fnorm != 0): for j in range(n): l = ipvt[j] if (wa2[l] != 0): sum = numpy.sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm gnorm = max([gnorm,abs(sum/wa2[l])]) ## Test for convergence of the gradient norm if (gnorm <= gtol): self.status = 4 return ## Rescale if necessary if (rescale == 0): diag = numpy.choose(diag>wa2, (wa2, diag)) ## Beginning of the inner loop while(1): ## Determine the levenberg-marquardt parameter catch_msg = 'calculating LM parameter (MPFIT_)' [fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf, delta, wa1, wa2, par=par) ## Store the direction p and x+p. Calculate the norm of p wa1 = -wa1 if (qanylim == 0) and (qminmax == 0): ## No parameter limits, so just move to new position WA2 alpha = 1. wa2 = x + wa1 else: ## Respect the limits. If a step were to go out of bounds, then ## we should take a step in the same direction but shorter distance. ## The step should take us right to the limit in that case. alpha = 1. if (qanylim): ## Do not allow any steps out of bounds catch_msg = 'checking for a step out of bounds' if (nlpeg > 0): numpy.put(wa1, whlpeg, numpy.clip( numpy.take(wa1, whlpeg), 0., max(wa1))) if (nupeg > 0): numpy.put(wa1, whupeg, numpy.clip( numpy.take(wa1, whupeg), min(wa1), 0.)) dwa1 = abs(wa1) > machep whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)) )[0] if (len(whl) > 0): t = (((numpy.take(llim, whl) - numpy.take(x, whl)) / numpy.take(wa1, whl))) alpha = min(alpha, min(t)) whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)) )[0] if (len(whu) > 0): t = ((numpy.take(ulim, whu) - numpy.take(x, whu)) / numpy.take(wa1, whu)) alpha = min(alpha, min(t)) ## Obey any max step values. if (qminmax): nwa1 = wa1 * alpha whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)) )[0] if (len(whmax) > 0): mrat = max(numpy.take(nwa1, whmax) / numpy.take(maxstep, whmax)) if (mrat > 1): alpha = alpha / mrat ## Scale the resulting vector wa1 = wa1 * alpha wa2 = x + wa1 ## Adjust the final output values. If the step put us exactly ## on a boundary, make sure it is exact. wh = (numpy.nonzero((qulim!=0.) & (wa2 >= ulim*(1-machep))) )[0] if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(ulim, wh)) wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim*(1+machep))) )[0] if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(llim, wh)) # endelse wa3 = diag * wa1 pnorm = self.enorm(wa3) ## On the first iteration, adjust the initial step bound if (self.niter == 1): delta = min([delta,pnorm]) numpy.put(self.params, ifree, wa2) ## Evaluate the function at x+p and calculate its norm mperr = 0 catch_msg = 'calling '+str(fcn) [self.status, wa4] = self.call(fcn, self.params, functkw) if (self.status < 0): self.errmsg = 'WARNING: premature termination by "'+fcn+'"' return fnorm1 = self.enorm(wa4) ## Compute the scaled actual reduction catch_msg = 'computing convergence criteria' actred = -1. if ((0.1 * fnorm1) < self.fnorm): actred = - (fnorm1/self.fnorm)**2 + 1. ## Compute the scaled predicted reduction and the scaled directional ## derivative for j in range(n): wa3[j] = 0 wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]] ## Remember, alpha is the fraction of the full LM step actually ## taken temp1 = self.enorm(alpha*wa3)/self.fnorm temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm prered = temp1*temp1 + (temp2*temp2)/0.5 dirder = -(temp1*temp1 + temp2*temp2) ## Compute the ratio of the actual to the predicted reduction. ratio = 0. if (prered != 0): ratio = actred/prered ## Update the step bound if (ratio <= 0.25): if (actred >= 0): temp = .5 else: temp = .5*dirder/(dirder + .5*actred) if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1): temp = 0.1 delta = temp*min([delta,pnorm/0.1]) par = par/temp else: if (par == 0) or (ratio >= 0.75): delta = pnorm/.5 par = .5*par ## Test for successful iteration if (ratio >= 0.0001): ## Successful iteration. Update x, fvec, and their norms x = wa2 wa2 = diag * x fvec = wa4 xnorm = self.enorm(wa2) self.fnorm = fnorm1 self.niter = self.niter + 1 ## Tests for convergence if ((abs(actred) <= ftol) and (prered <= ftol) and (0.5 * ratio <= 1)): self.status = 1 if delta <= xtol*xnorm: self.status = 2 if ((abs(actred) <= ftol) and (prered <= ftol) and (0.5 * ratio <= 1) and (self.status == 2)): self.status = 3 if (self.status != 0): break ## Tests for termination and stringent tolerances if (self.niter >= maxiter): self.status = 5 if ((abs(actred) <= machep) and (prered <= machep) and (0.5*ratio <= 1)): self.status = 6 if delta <= machep*xnorm: self.status = 7 if gnorm <= machep: self.status = 8 if (self.status != 0): break ## End of inner loop. Repeat if iteration unsuccessful if (ratio >= 0.0001): break ## Check for over/underflow - SKIP FOR NOW ##wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct) ##if ct GT 0 OR finite(ratio) EQ 0 then begin ## errmsg = ('ERROR: parameter or function value(s) have become '+$ ## 'infinite# check model function for over- '+$ ## 'and underflow') ## self.status = -16 ## break if (self.status != 0): break; ## End of outer loop. catch_msg = 'in the termination phase' ## Termination, either normal or user imposed. if (len(self.params) == 0): return if (nfree == 0): self.params = xall.copy() else: numpy.put(self.params, ifree, x) if nprint > 0 and self.status > 0: catch_msg = 'calling ' + str(fcn) [status, fvec] = self.call(fcn, self.params, functkw) catch_msg = 'in the termination phase' self.fnorm = self.enorm(fvec) if self.fnorm is not None and fnorm1 is not None: self.fnorm = max([self.fnorm, fnorm1]) self.fnorm = self.fnorm**2. self.covar = None self.perror = None ## (very carefully) set the covariance matrix COVAR if (self.status > 0 and nocovar == 0 and n is not None and fjac is not None and ipvt is not None): sz = numpy.shape(fjac) if (n > 0 and sz[0] >= n and sz[1] >= n and len(ipvt) >= n): catch_msg = 'computing the covariance matrix' cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n]) cv.shape = [n, n] nn = len(xall) ## Fill in actual covariance matrix, accounting for fixed ## parameters. self.covar = numpy.zeros([nn, nn], numpy.float) for i in range(n): indices = ifree+ifree[i]*nn numpy.put(self.covar, indices, cv[:,i]) #numpy.put(self.covar, i, cv[:,i]) ## Compute errors in parameters catch_msg = 'computing parameter errors' self.perror = numpy.zeros(nn, numpy.float) d = numpy.diagonal(self.covar) wh = (numpy.nonzero(d >= 0) )[0] if len(wh) > 0: numpy.put(self.perror, wh, numpy.sqrt(numpy.take(d, wh))) return ## Default procedure to be called every iteration. It simply prints ## the parameter values. def defiter(self, fcn, x, iter, fnorm=None, functkw=None, quiet=0, iterstop=None, parinfo=None, format=None, pformat='%.10g', dof=1): if self.debug: print('Entering defiter...') if quiet: return if fnorm is None: [status, fvec] = self.call(fcn, x, functkw) fnorm = self.enorm(fvec)**2 ## Determine which parameters to print nprint = len(x) print("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof)) for i in range(nprint): if parinfo is not None and 'parname' in parinfo[i]: p = ' ' + parinfo[i]['parname'] + ' = ' else: p = ' P' + str(i) + ' = ' if parinfo is not None and 'mpprint' in parinfo[i]: iprint = parinfo[i]['mpprint'] else: iprint = 1 if iprint: print(p + (pformat % x[i]) + ' ') return 0 ## DO_ITERSTOP: ## if keyword_set(iterstop) then begin ## k = get_kbrd(0) ## if k EQ string(byte(7)) then begin ## message, 'WARNING: minimization not complete', /info ## print, 'Do you want to terminate this procedure? (y/n)', $ ## format='(A,$)' ## k = '' ## read, k ## if strupcase(strmid(k,0,1)) EQ 'Y' then begin ## message, 'WARNING: Procedure is terminating.', /info ## mperr = -1 ## endif ## endif ## endif ## Procedure to parse the parameter values in PARINFO, which is a list of dictionaries def parinfo(self, parinfo=None, key='a', default=None, n=0): if self.debug: print('Entering parinfo...') if n == 0 and parinfo is not None: n = len(parinfo) if n == 0: values = default return(values) values = [] for i in range(n): if parinfo is not None and key in parinfo[i]: values.append(parinfo[i][key]) else: values.append(default) # Convert to numeric arrays if possible test = default if type(default) == list: test=default[0] if type(test) == int: values = numpy.asarray(values, dtype=numpy.int) elif type(test) == float: values = numpy.asarray(values, dtype=numpy.float) return(values) ## Call user function or procedure, with _EXTRA or not, with ## derivatives or not. def call(self, fcn, x, functkw, fjac=None): if self.debug: print('Entering call...') if self.qanytied: x = self.tie(x, self.ptied) self.nfev = self.nfev + 1 if fjac is None: [status, f] = fcn(x, fjac=fjac, **functkw) if self.damp > 0: ## Apply the damping if requested. This replaces the residuals ## with their hyperbolic tangent. Thus residuals larger than ## DAMP are essentially clipped. f = numpy.tanh(f/self.damp) return([status, f]) else: return(fcn(x, fjac=fjac, **functkw)) def enorm(self, vec): if (self.debug): print('Entering enorm...') ## NOTE: it turns out that, for systems that have a lot of data ## points, this routine is a big computing bottleneck. The extended ## computations that need to be done cannot be effectively ## vectorized. The introduction of the FASTNORM configuration ## parameter allows the user to select a faster routine, which is ## based on TOTAL() alone. # Very simple-minded sum-of-squares if (self.fastnorm): ans = numpy.sqrt(numpy.sum(vec*vec)) else: agiant = self.machar.rgiant / len(vec) adwarf = self.machar.rdwarf * len(vec) ## This is hopefully a compromise between speed and robustness. ## Need to do this because of the possibility of over- or underflow. mx = max(vec) mn = min(vec) mx = max(abs(mx), abs(mn)) if mx == 0: return(vec[0]*0.) if mx > agiant or mx < adwarf: ans = mx * numpy.sqrt(numpy.sum((vec/mx)*(vec/mx))) else: ans = numpy.sqrt(numpy.sum(vec*vec)) return(ans) def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None, epsfcn=None, autoderivative=1, functkw=None, xall=None, ifree=None, dstep=None): if (self.debug): print('Entering fdjac2...') machep = self.machar.machep if epsfcn is None: epsfcn = machep if xall is None: xall = x if ifree is None: ifree = numpy.arange(len(xall)) if step is None: step = x * 0. nall = len(xall) eps = numpy.sqrt(max([epsfcn, machep])) m = len(fvec) n = len(x) ## Compute analytical derivative if requested if (autoderivative == 0): mperr = 0 fjac = numpy.zeros(nall, numpy.float) numpy.put(fjac, ifree, 1.0) ## Specify which parameters need derivatives [status, fp] = self.call(fcn, xall, functkw, fjac=fjac) if len(fjac) != m*nall: print('ERROR: Derivative matrix was not computed properly.') return(None) ## This definition is c1onsistent with CURVEFIT ## Sign error found (thanks Jesus Fernandez ) fjac.shape = [m,nall] fjac = -fjac ## Select only the free parameters if len(ifree) < nall: fjac = fjac[:,ifree] fjac.shape = [m, n] return(fjac) fjac = numpy.zeros([m, n], numpy.float) h = eps * abs(x) ## if STEP is given, use that if step is not None: stepi = numpy.take(step, ifree) wh = (numpy.nonzero(stepi > 0) )[0] if (len(wh) > 0): numpy.put(h, wh, numpy.take(stepi, wh)) ## if relative step is given, use that if (len(dstep) > 0): dstepi = numpy.take(dstep, ifree) wh = (numpy.nonzero(dstepi > 0) )[0] if len(wh) > 0: numpy.put(h, wh, abs(numpy.take(dstepi,wh)*numpy.take(x,wh))) ## In case any of the step values are zero wh = (numpy.nonzero(h == 0) )[0] if len(wh) > 0: numpy.put(h, wh, eps) ## Reverse the sign of the step if we are up against the parameter ## limit, or if the user requested it. #mask = dside == -1 mask = numpy.take((dside == -1), ifree) if len(ulimited) > 0 and len(ulimit) > 0: #mask = mask or (ulimited and (x > ulimit-h)) mask = mask | (ulimited & (x > ulimit-h)) wh = (numpy.nonzero(mask))[0] if len(wh) > 0: numpy.put(h, wh, -numpy.take(h, wh)) ## Loop through parameters, computing the derivative for each for j in range(n): xp = xall.copy() xp[ifree[j]] = xp[ifree[j]] + h[j] [status, fp] = self.call(fcn, xp, functkw) if (status < 0): return(None) if abs(dside[j]) <= 1: ## COMPUTE THE ONE-SIDED DERIVATIVE ## Note optimization fjac(0:*,j) fjac[0:,j] = (fp-fvec)/h[j] else: ## COMPUTE THE TWO-SIDED DERIVATIVE xp[ifree[j]] = xall[ifree[j]] - h[j] mperr = 0 [status, fm] = self.call(fcn, xp, functkw) if (status < 0): return(None) ## Note optimization fjac(0:*,j) fjac[0:,j] = (fp-fm)/(2*h[j]) return(fjac) # Original FORTRAN documentation # ********** # # subroutine qrfac # # this subroutine uses householder transformations with column # pivoting (optional) to compute a qr factorization of the # m by n matrix a. that is, qrfac determines an orthogonal # matrix q, a permutation matrix p, and an upper trapezoidal # matrix r with diagonal elements of nonincreasing magnitude, # such that a*p = q*r. the householder transformation for # column k, k = 1,2,...,min(m,n), is of the form # # t # i - (1/u(k))*u*u # # where u has zeros in the first k-1 positions. the form of # this transformation and the method of pivoting first # appeared in the corresponding linpack subroutine. # # the subroutine statement is # # subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa) # # where # # m is a positive integer input variable set to the number # of rows of a. # # n is a positive integer input variable set to the number # of columns of a. # # a is an m by n array. on input a contains the matrix for # which the qr factorization is to be computed. on output # the strict upper trapezoidal part of a contains the strict # upper trapezoidal part of r, and the lower trapezoidal # part of a contains a factored form of q (the non-trivial # elements of the u vectors described above). # # lda is a positive integer input variable not less than m # which specifies the leading dimension of the array a. # # pivot is a logical input variable. if pivot is set true, # then column pivoting is enforced. if pivot is set false, # then no column pivoting is done. # # ipvt is an integer output array of length lipvt. ipvt # defines the permutation matrix p such that a*p = q*r. # column j of p is column ipvt(j) of the identity matrix. # if pivot is false, ipvt is not referenced. # # lipvt is a positive integer input variable. if pivot is false, # then lipvt may be as small as 1. if pivot is true, then # lipvt must be at least n. # # rdiag is an output array of length n which contains the # diagonal elements of r. # # acnorm is an output array of length n which contains the # norms of the corresponding columns of the input matrix a. # if this information is not needed, then acnorm can coincide # with rdiag. # # wa is a work array of length n. if pivot is false, then wa # can coincide with rdiag. # # subprograms called # # minpack-supplied ... dpmpar,enorm # # fortran-supplied ... dmax1,dsqrt,min0 # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # # ********** # NOTE: in IDL the factors appear slightly differently than described # above. The matrix A is still m x n where m >= n. # # The "upper" triangular matrix R is actually stored in the strict # lower left triangle of A under the standard notation of IDL. # # The reflectors that generate Q are in the upper trapezoid of A upon # output. # # EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]] # aa = [[9.,2.,6.],[4.,8.,7.]] # mpfit_qrfac, aa, aapvt, rdiag, aanorm # IDL> print, aa # 1.81818* 0.181818* 0.545455* # -8.54545+ 1.90160* 0.432573* # IDL> print, rdiag # -11.0000+ -7.48166+ # # The components marked with a * are the components of the # reflectors, and those marked with a + are components of R. # # To reconstruct Q and R we proceed as follows. First R. # r = fltarr(m, n) # for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag # r(lindgen(n)*(m+1)) = rdiag # # Next, Q, which are composed from the reflectors. Each reflector v # is taken from the upper trapezoid of aa, and converted to a matrix # via (I - 2 vT . v / (v . vT)). # # hh = ident ## identity matrix # for i = 0, n-1 do begin # v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 ## extract reflector # hh = hh ## (ident - 2*(v # v)/total(v * v)) ## generate matrix # endfor # # Test the result: # IDL> print, hh ## transpose(r) # 9.00000 4.00000 # 2.00000 8.00000 # 6.00000 7.00000 # # Note that it is usually never necessary to form the Q matrix # explicitly, and MPFIT does not. def qrfac(self, a, pivot=0): if (self.debug): print('Entering qrfac...') machep = self.machar.machep sz = numpy.shape(a) m = sz[0] n = sz[1] ## Compute the initial column norms and initialize arrays acnorm = numpy.zeros(n, numpy.float) for j in range(n): acnorm[j] = self.enorm(a[:,j]) rdiag = acnorm.copy() wa = rdiag.copy() ipvt = numpy.arange(n) ## Reduce a to r with householder transformations minmn = min([m,n]) for j in range(minmn): if (pivot != 0): ## Bring the column of largest norm into the pivot position rmax = max(rdiag[j:]) kmax = (numpy.nonzero(rdiag[j:] == rmax) )[0] ct = len(kmax) kmax = kmax + j if ct > 0: kmax = kmax[0] ## Exchange rows via the pivot only. Avoid actually exchanging ## the rows, in case there is lots of memory transfer. The ## exchange occurs later, within the body of MPFIT, after the ## extraneous columns of the matrix have been shed. if kmax != j: temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp rdiag[kmax] = rdiag[j] wa[kmax] = wa[j] ## Compute the householder transformation to reduce the jth ## column of A to a multiple of the jth unit vector lj = ipvt[j] ajj = a[j:,lj] ajnorm = self.enorm(ajj) if ajnorm == 0: break if a[j,j] < 0: ajnorm = -ajnorm ajj = ajj / ajnorm ajj[0] = ajj[0] + 1 ## *** Note optimization a(j:*,j) a[j:,lj] = ajj ## Apply the transformation to the remaining columns ## and update the norms ## NOTE to SELF: tried to optimize this by removing the loop, ## but it actually got slower. Reverted to "for" loop to keep ## it simple. if (j+1 < n): for k in range(j+1, n): lk = ipvt[k] ajk = a[j:,lk] ## *** Note optimization a(j:*,lk) ## (corrected 20 Jul 2000) if a[j,lj] != 0: a[j:,lk] = ajk - ajj * numpy.sum(ajk*ajj)/a[j,lj] if ((pivot != 0) and (rdiag[k] != 0)): temp = a[j,lk]/rdiag[k] rdiag[k] = rdiag[k] * numpy.sqrt(max((1.-temp**2), 0.)) temp = rdiag[k]/wa[k] if ((0.05*temp*temp) <= machep): rdiag[k] = self.enorm(a[j+1:,lk]) wa[k] = rdiag[k] rdiag[j] = -ajnorm return([a, ipvt, rdiag, acnorm]) # Original FORTRAN documentation # ********** # # subroutine qrsolv # # given an m by n matrix a, an n by n diagonal matrix d, # and an m-vector b, the problem is to determine an x which # solves the system # # a*x = b , d*x = 0 , # # in the least squares sense. # # this subroutine completes the solution of the problem # if it is provided with the necessary information from the # factorization, with column pivoting, of a. that is, if # a*p = q*r, where p is a permutation matrix, q has orthogonal # columns, and r is an upper triangular matrix with diagonal # elements of nonincreasing magnitude, then qrsolv expects # the full upper triangle of r, the permutation matrix p, # and the first n components of (q transpose)*b. the system # a*x = b, d*x = 0, is then equivalent to # # t t # r*z = q *b , p *d*p*z = 0 , # # where x = p*z. if this system does not have full rank, # then a least squares solution is obtained. on output qrsolv # also provides an upper triangular matrix s such that # # t t t # p *(a *a + d*d)*p = s *s . # # s is computed within qrsolv and may be of separate interest. # # the subroutine statement is # # subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa) # # where # # n is a positive integer input variable set to the order of r. # # r is an n by n array. on input the full upper triangle # must contain the full upper triangle of the matrix r. # on output the full upper triangle is unaltered, and the # strict lower triangle contains the strict upper triangle # (transposed) of the upper triangular matrix s. # # ldr is a positive integer input variable not less than n # which specifies the leading dimension of the array r. # # ipvt is an integer input array of length n which defines the # permutation matrix p such that a*p = q*r. column j of p # is column ipvt(j) of the identity matrix. # # diag is an input array of length n which must contain the # diagonal elements of the matrix d. # # qtb is an input array of length n which must contain the first # n elements of the vector (q transpose)*b. # # x is an output array of length n which contains the least # squares solution of the system a*x = b, d*x = 0. # # sdiag is an output array of length n which contains the # diagonal elements of the upper triangular matrix s. # # wa is a work array of length n. # # subprograms called # # fortran-supplied ... dabs,dsqrt # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # def qrsolv(self, r, ipvt, diag, qtb, sdiag): if (self.debug): print('Entering qrsolv...') sz = numpy.shape(r) m = sz[0] n = sz[1] ## copy r and (q transpose)*b to preserve input and initialize s. ## in particular, save the diagonal elements of r in x. for j in range(n): r[j:n,j] = r[j,j:n] x = numpy.diagonal(r).copy() wa = qtb.copy() ## Eliminate the diagonal matrix d using a givens rotation for j in range(n): l = ipvt[j] if (diag[l] == 0): break sdiag[j:] = 0 sdiag[j] = diag[l] ## The transformations to eliminate the row of d modify only a ## single element of (q transpose)*b beyond the first n, which ## is initially zero. qtbpj = 0. for k in range(j,n): if (sdiag[k] == 0): break if (abs(r[k,k]) < abs(sdiag[k])): cotan = r[k,k]/sdiag[k] sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan) cosine = sine*cotan else: tang = sdiag[k]/r[k,k] cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang) sine = cosine*tang ## Compute the modified diagonal element of r and the ## modified element of ((q transpose)*b,0). r[k,k] = cosine*r[k,k] + sine*sdiag[k] temp = cosine*wa[k] + sine*qtbpj qtbpj = -sine*wa[k] + cosine*qtbpj wa[k] = temp ## Accumulate the transformation in the row of s if (n > k+1): temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n] sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n] r[k+1:n,k] = temp sdiag[j] = r[j,j] r[j,j] = x[j] ## Solve the triangular system for z. If the system is singular ## then obtain a least squares solution nsing = n wh = (numpy.nonzero(sdiag == 0) )[0] if (len(wh) > 0): nsing = wh[0] wa[nsing:] = 0 if (nsing >= 1): wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] ## Degenerate case ## *** Reverse loop *** for j in range(nsing-2,-1,-1): sum = numpy.sum(r[j+1:nsing,j]*wa[j+1:nsing]) wa[j] = (wa[j]-sum)/sdiag[j] ## Permute the components of z back to components of x numpy.put(x, ipvt, wa) return(r, x, sdiag) # Original FORTRAN documentation # # subroutine lmpar # # given an m by n matrix a, an n by n nonsingular diagonal # matrix d, an m-vector b, and a positive number delta, # the problem is to determine a value for the parameter # par such that if x solves the system # # a*x = b , sqrt(par)*d*x = 0 , # # in the least squares sense, and dxnorm is the euclidean # norm of d*x, then either par is zero and # # (dxnorm-delta) .le. 0.1*delta , # # or par is positive and # # abs(dxnorm-delta) .le. 0.1*delta . # # this subroutine completes the solution of the problem # if it is provided with the necessary information from the # qr factorization, with column pivoting, of a. that is, if # a*p = q*r, where p is a permutation matrix, q has orthogonal # columns, and r is an upper triangular matrix with diagonal # elements of nonincreasing magnitude, then lmpar expects # the full upper triangle of r, the permutation matrix p, # and the first n components of (q transpose)*b. on output # lmpar also provides an upper triangular matrix s such that # # t t t # p *(a *a + par*d*d)*p = s *s . # # s is employed within lmpar and may be of separate interest. # # only a few iterations are generally needed for convergence # of the algorithm. if, however, the limit of 10 iterations # is reached, then the output par will contain the best # value obtained so far. # # the subroutine statement is # # subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag, # wa1,wa2) # # where # # n is a positive integer input variable set to the order of r. # # r is an n by n array. on input the full upper triangle # must contain the full upper triangle of the matrix r. # on output the full upper triangle is unaltered, and the # strict lower triangle contains the strict upper triangle # (transposed) of the upper triangular matrix s. # # ldr is a positive integer input variable not less than n # which specifies the leading dimension of the array r. # # ipvt is an integer input array of length n which defines the # permutation matrix p such that a*p = q*r. column j of p # is column ipvt(j) of the identity matrix. # # diag is an input array of length n which must contain the # diagonal elements of the matrix d. # # qtb is an input array of length n which must contain the first # n elements of the vector (q transpose)*b. # # delta is a positive input variable which specifies an upper # bound on the euclidean norm of d*x. # # par is a nonnegative variable. on input par contains an # initial estimate of the levenberg-marquardt parameter. # on output par contains the final estimate. # # x is an output array of length n which contains the least # squares solution of the system a*x = b, sqrt(par)*d*x = 0, # for the output par. # # sdiag is an output array of length n which contains the # diagonal elements of the upper triangular matrix s. # # wa1 and wa2 are work arrays of length n. # # subprograms called # # minpack-supplied ... dpmpar,enorm,qrsolv # # fortran-supplied ... dabs,dmax1,dmin1,dsqrt # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None): if (self.debug): print('Entering lmpar...') dwarf = self.machar.minnum sz = numpy.shape(r) m = sz[0] n = sz[1] ## Compute and store in x the gauss-newton direction. If the ## jacobian is rank-deficient, obtain a least-squares solution nsing = n wa1 = qtb.copy() wh = (numpy.nonzero(numpy.diagonal(r) == 0) )[0] if len(wh) > 0: nsing = wh[0] wa1[wh[0]:] = 0 if nsing > 1: ## *** Reverse loop *** for j in range(nsing-1,-1,-1): wa1[j] = wa1[j]/r[j,j] if (j-1 >= 0): wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j] ## Note: ipvt here is a permutation array numpy.put(x, ipvt, wa1) ## Initialize the iteration counter. Evaluate the function at the ## origin, and test for acceptance of the gauss-newton direction iter = 0 wa2 = diag * x dxnorm = self.enorm(wa2) fp = dxnorm - delta if (fp <= 0.1*delta): return[r, 0., x, sdiag] ## If the jacobian is not rank deficient, the newton step provides a ## lower bound, parl, for the zero of the function. Otherwise set ## this bound to zero. parl = 0. if nsing >= n: wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm wa1[0] = wa1[0] / r[0,0] ## Degenerate case for j in range(1,n): ## Note "1" here, not zero sum = numpy.sum(r[0:j,j]*wa1[0:j]) wa1[j] = (wa1[j] - sum)/r[j,j] temp = self.enorm(wa1) parl = ((fp/delta)/temp)/temp ## Calculate an upper bound, paru, for the zero of the function for j in range(n): sum = numpy.sum(r[0:j+1,j]*qtb[0:j+1]) wa1[j] = sum/diag[ipvt[j]] gnorm = self.enorm(wa1) paru = gnorm/delta if paru == 0: paru = dwarf/min([delta,0.1]) ## If the input par lies outside of the interval (parl,paru), set ## par to the closer endpoint par = max([par,parl]) par = min([par,paru]) if par == 0: par = gnorm/dxnorm ## Beginning of an interation while(1): iter = iter + 1 ## Evaluate the function at the current value of par if par == 0: par = max([dwarf, paru*0.001]) temp = numpy.sqrt(par) wa1 = temp * diag [r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag) wa2 = diag*x dxnorm = self.enorm(wa2) temp = fp fp = dxnorm - delta if ((abs(fp) <= 0.1*delta) or ((parl == 0) and (fp <= temp) and (temp < 0)) or (iter == 10)): break; ## Compute the newton correction wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm for j in range(n-1): wa1[j] = wa1[j]/sdiag[j] wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j] wa1[n-1] = wa1[n-1]/sdiag[n-1] ## Degenerate case temp = self.enorm(wa1) parc = ((fp/delta)/temp)/temp ## Depending on the sign of the function, update parl or paru if fp > 0: parl = max([parl,par]) if fp < 0: paru = min([paru,par]) ## Compute an improved estimate for par par = max([parl, par+parc]) ## End of an iteration ## Termination return[r, par, x, sdiag] ## Procedure to tie one parameter to another. def tie(self, p, ptied=None): if self.debug: print('Entering tie...') if ptied is None: return for i in range(len(ptied)): if ptied[i] == '': continue cmd = 'p[' + str(i) + '] = ' + ptied[i] exec(cmd) return p # Original FORTRAN documentation # ********** # # subroutine covar # # given an m by n matrix a, the problem is to determine # the covariance matrix corresponding to a, defined as # # t # inverse(a *a) . # # this subroutine completes the solution of the problem # if it is provided with the necessary information from the # qr factorization, with column pivoting, of a. that is, if # a*p = q*r, where p is a permutation matrix, q has orthogonal # columns, and r is an upper triangular matrix with diagonal # elements of nonincreasing magnitude, then covar expects # the full upper triangle of r and the permutation matrix p. # the covariance matrix is then computed as # # t t # p*inverse(r *r)*p . # # if a is nearly rank deficient, it may be desirable to compute # the covariance matrix corresponding to the linearly independent # columns of a. to define the numerical rank of a, covar uses # the tolerance tol. if l is the largest integer such that # # abs(r(l,l)) .gt. tol*abs(r(1,1)) , # # then covar computes the covariance matrix corresponding to # the first l columns of r. for k greater than l, column # and row ipvt(k) of the covariance matrix are set to zero. # # the subroutine statement is # # subroutine covar(n,r,ldr,ipvt,tol,wa) # # where # # n is a positive integer input variable set to the order of r. # # r is an n by n array. on input the full upper triangle must # contain the full upper triangle of the matrix r. on output # r contains the square symmetric covariance matrix. # # ldr is a positive integer input variable not less than n # which specifies the leading dimension of the array r. # # ipvt is an integer input array of length n which defines the # permutation matrix p such that a*p = q*r. column j of p # is column ipvt(j) of the identity matrix. # # tol is a nonnegative input variable used to define the # numerical rank of a in the manner described above. # # wa is a work array of length n. # # subprograms called # # fortran-supplied ... dabs # # argonne national laboratory. minpack project. august 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # # ********** def calc_covar(self, rr, ipvt=None, tol=1.e-14): if self.debug: print('Entering calc_covar...') if numpy.rank(rr) != 2: print('ERROR: r must be a two-dimensional matrix') return(-1) s = numpy.shape(rr) n = s[0] if s[0] != s[1]: print('ERROR: r must be a square matrix') return(-1) if ipvt is None: ipvt = numpy.arange(n) r = rr.copy() r.shape = [n,n] ## For the inverse of r in the full upper triangle of r l = -1 tolr = tol * abs(r[0,0]) for k in range(n): if (abs(r[k,k]) <= tolr): break r[k,k] = 1./r[k,k] for j in range(k): temp = r[k,k] * r[j,k] r[j,k] = 0. r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j] l = k ## Form the full upper triangle of the inverse of (r transpose)*r ## in the full upper triangle of r if l >= 0: for k in range(l+1): for j in range(k): temp = r[j,k] r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k] temp = r[k,k] r[0:k+1,k] = temp * r[0:k+1,k] ## For the full lower triangle of the covariance matrix ## in the strict lower triangle or and in wa wa = numpy.repeat([r[0,0]], n) for j in range(n): jj = ipvt[j] sing = j > l for i in range(j+1): if sing: r[i,j] = 0. ii = ipvt[i] if ii > jj: r[ii,jj] = r[i,j] if ii < jj: r[jj,ii] = r[i,j] wa[jj] = r[j,j] ## Symmetrize the covariance matrix in r for j in range(n): r[0:j+1,j] = r[j,0:j+1] r[j,j] = wa[j] return(r) class machar: def __init__(self, double=1): if (double == 0): self.machep = 1.19209e-007 self.maxnum = 3.40282e+038 self.minnum = 1.17549e-038 self.maxgam = 171.624376956302725 else: self.machep = 2.2204460e-016 self.maxnum = 1.7976931e+308 self.minnum = 2.2250739e-308 self.maxgam = 171.624376956302725 self.maxlog = numpy.log(self.maxnum) self.minlog = numpy.log(self.minnum) self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10 self.rgiant = numpy.sqrt(self.maxnum) * 0.1 stsci.tools-3.4.12/lib/stsci/tools/numerixenv.py0000644001120100020070000000056113006721301023365 0ustar jhunkSTSCI\science00000000000000from __future__ import division # confidence medium import os def check_input(xxx): """Check if input is a Numarray Array.""" try: import numarray return isinstance(xxx,numarray.numarraycore.NumArray) except ImportError: pass def check(): """Check for running numarray version of pyfits with numpy code.""" pass stsci.tools-3.4.12/lib/stsci/tools/parseinput.py0000644001120100020070000001623313112074217023367 0ustar jhunkSTSCI\science00000000000000 # Program: parseinput.py # Author: Christopher Hanley # # License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE # # History: # Version 0.1, 11/02/2004: Initial Creation -- CJH # Version 0.1.2 01/10/2005: Removed the appending of "_drz.fits" to extracted # file names. -- CJH # Version 0.1.3 01/18/2005: Added the NICMOS '_asc.fits' to the list of # valid association file names. # Version 0.1.4 01/25/2005: Removed reliance on ASN dict keys for ordering # the output filelist. WJH/CJH # Version 0.1.5 10/11/2005: Corrected typo in errorstr variable name discovered # by external user j.e.geach@durham.ac.uk. from __future__ import division # confidence high __version__ = '0.1.5 (10/11/2005)' __author__ = 'Christopher Hanley' # irafglob provides the ability to recursively parse user input that # is in the form of wildcards and '@' files. from . import irafglob from .irafglob import irafglob from . import fileutil from stsci.tools.asnutil import readASNTable def parseinput(inputlist,outputname=None, atfile=None): """ Recursively parse user input based upon the irafglob program and construct a list of files that need to be processed. This program addresses the following deficiencies of the irafglob program:: parseinput can extract filenames from association tables Returns ------- This program will return a list of input files that will need to be processed in addition to the name of any outfiles specified in an association table. Parameters ---------- inputlist - string specification of input files using either wild-cards, @-file or comma-separated list of filenames outputname - string desired name for output product to be created from the input files atfile - object function to use in interpreting the @-file columns that gets passed to irafglob Returns ------- files - list of strings names of output files to be processed newoutputname - string name of output file to be created. See Also -------- stsci.tools.irafglob """ # Initalize some variables files = [] # list used to store names of input files newoutputname = outputname # Outputname returned to calling program. # The value of outputname is only changed # if it had a value of 'None' on input. # We can use irafglob to parse the input. If the input wasn't # an association table, it needs to be either a wildcard, '@' file, # or comma seperated list. files = irafglob(inputlist, atfile=atfile) # Now that we have expanded the inputlist into a python list # containing the list of input files, it is necessary to examine # each of the files to make sure none of them are association tables. # # If an association table is found, the entries should be read # Determine if the input is an association table for file in files: if (checkASN(file) == True): # Create a list to store the files extracted from the # association tiable assoclist = [] # The input is an association table try: # Open the association table assocdict = readASNTable(file, None, prodonly=False) except: errorstr = "###################################\n" errorstr += "# #\n" errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n" errorstr += str(file)+'\n' errorstr += "# DURING FILE PARSING. #\n" errorstr += "# #\n" errorstr += "# Please determine if the file is #\n" errorstr += "# in the current directory and #\n" errorstr += "# that it has been properly #\n" errorstr += "# formatted. #\n" errorstr += "# #\n" errorstr += "# This error message is being #\n" errorstr += "# generated from within the #\n" errorstr += "# parseinput.py module. #\n" errorstr += "# #\n" errorstr += "###################################\n" raise ValueError(errorstr) # Extract the output name from the association table if None # was provided on input. if outputname is None: newoutputname = assocdict['output'] # Loop over the association dictionary to extract the input # file names. for f in assocdict['order']: assoclist.append(fileutil.buildRootname(f)) # Remove the name of the association table from the list of files files.remove(file) # Append the list of filenames generated from the association table # to the master list of input files. files.extend(assoclist) # Return the list of the input files and the output name if provided in an association. return files, newoutputname def checkASN(filename): """ Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value """ # Extract the file extn type: extnType = filename[filename.rfind('_')+1:filename.rfind('.')] # Determine if this extn name is valid for an assocation file if isValidAssocExtn(extnType): return True else: return False def isValidAssocExtn(extname): """ Determine if the extension name given as input could represent a valid association file. Parameters ---------- extname : string Returns ------- isValid : boolean value """ # Define a list of valid extension types to define an association table. validExtnNames = ['asn','asc'] # Loop over the list of valid extension types and compare with the input # extension name. If there is ever a match return True. for validName in validExtnNames: if (extname == validName): return True return False def countinputs(inputlist): """ Determine the number of inputfiles provided by the user and the number of those files that are association tables Parameters ---------- inputlist : string the user input Returns ------- numInputs: int number of inputs provided by the user numASNfiles: int number of association files provided as input """ # Initialize return values numInputs = 0 numASNfiles = 0 # User irafglob to count the number of inputfiles files = irafglob(inputlist, atfile=None) # Use the "len" ufunc to count the number of entries in the list numInputs = len(files) # Loop over the list and see if any of the entries are association files for file in files: if (checkASN(file) == True): numASNfiles += 1 return numInputs,numASNfiles stsci.tools-3.4.12/lib/stsci/tools/readgeis.py0000644001120100020070000003510113241163620022753 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python # $Id$ """ readgeis: Read GEIS file and convert it to a FITS extension file. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE Usage: readgeis.py [options] GEISname FITSname GEISname is the input GEIS file in GEIS format, and FITSname is the output file in FITS format. GEISname can be a directory name. In this case, it will try to use all `*.??h` files as input file names. If FITSname is omitted or is a directory name, this task will try to construct the output names from the input names, i.e.: abc.xyh will have an output name of abc_xyf.fits :Options: -h print the help (this text) :Example: If used in Pythons script, a user can, e. g.:: >>> import readgeis >>> hdulist = readgeis.readgeis(GEISFileName) (do whatever with hdulist) >>> hdulist.writeto(FITSFileName) The most basic usage from the command line:: readgeis.py test1.hhh test1.fits This command will convert the input GEIS file test1.hhh to a FITS file test1.fits. From the command line:: readgeis.py . this will convert all `*.??h` files in the current directory to FITS files (of corresponding names) and write them in the current directory. Another example of usage from the command line:: readgeis.py "u*" "*" this will convert all `u*.??h` files in the current directory to FITS files (of corresponding names) and write them in the current directory. Note that when using wild cards, it is necessary to put them in quotes. """ # Developed by Science Software Branch, STScI, USA. # This version needs pyfits 0.9.6.3 or later # and numpy version 1.0.4 or later from __future__ import division, print_function # confidence high __version__ = "2.2 (18 Feb, 2011), \xa9 AURA" import os, sys from astropy.io import fits import numpy from numpy import memmap from functools import reduce def stsci(hdulist): """For STScI GEIS files, need to do extra steps.""" instrument = hdulist[0].header.get('INSTRUME', '') # Update extension header keywords if instrument in ("WFPC2", "FOC"): rootname = hdulist[0].header.get('ROOTNAME', '') filetype = hdulist[0].header.get('FILETYPE', '') for i in range(1, len(hdulist)): # Add name and extver attributes to match PyFITS data structure hdulist[i].name = filetype hdulist[i]._extver = i # Add extension keywords for this chip to extension hdulist[i].header['EXPNAME'] = (rootname, "9 character exposure identifier") hdulist[i].header['EXTVER']= (i, "extension version number") hdulist[i].header['EXTNAME'] = (filetype, "extension name") hdulist[i].header['INHERIT'] = (True, "inherit the primary header") hdulist[i].header['ROOTNAME'] = (rootname, "rootname of the observation set") def stsci2(hdulist, filename): """For STScI GEIS files, need to do extra steps.""" # Write output file name to the primary header instrument = hdulist[0].header.get('INSTRUME', '') if instrument in ("WFPC2", "FOC"): hdulist[0].header['FILENAME'] = filename def readgeis(input): """Input GEIS files "input" will be read and a HDUList object will be returned. The user can use the writeto method to write the HDUList object to a FITS file. """ global dat cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os geis_fmt = {'REAL':'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'} end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension header groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] _range = range(1, pcount+1) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # delete from the end, so it will not conflict with previous delete for i in range(len(phdr)-1, -1, -1): if phdr.cards[i].keyword in _list: del phdr[i] # clean up other primary header keywords phdr['SIMPLE'] = True phdr['BITPIX'] = 16 phdr['GROUPS'] = False _after = 'NAXIS' if _naxis0 > 0: _after += str(_naxis0) phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after) phdr.set('NEXTEND', value=gcount, comment="Number of standard extensions") hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=None)]) # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() # dat = memmap(data_file, mode='c') hdulist.mmobject = dat errormsg = "" loc = 0 for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape) if _uint16: ext_dat += _bzero # Check to see whether there are any NaN's or infs which might indicate # a byte-swapping problem, such as being written out on little-endian # and being read in on big-endian or vice-versa. if _code.find('float') >= 0 and \ (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))): errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had floating point data values =\n" errormsg += "= of NaN and/or Inf. =\n" errormsg += "===================================\n" elif _code.find('int') >= 0: # Check INT data for max values ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat) if ext_dat_exp.max() == int(_bitpix) - 1: # Potential problems with byteswapping errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had integer data values =\n" errormsg += "= with maximum bitvalues. =\n" errormsg += "===================================\n" ext_hdu = fits.ImageHDU(data=ext_dat) rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats) loc += group_size # Create separate PyFITS Card objects for each entry in 'rec' for i in range(1, pcount+1): #val = rec.field(i-1)[0] val = rec[0][i-1] if val.dtype.kind == 'S': val = val.decode('ascii') if i in bools: if val: val = True else: val = False if i in floats: # use fromstring, format in Card is deprecated in pyfits 0.9 _str = '%-8s= %20.7G / %s' % (key[i-1], val, comm[i-1]) _card = fits.Card.fromstring(_str) else: _card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1]) ext_hdu.header.append(_card) # deal with bscale/bzero if (_bscale != 1 or _bzero != 0): ext_hdu.header['BSCALE'] = _bscale ext_hdu.header['BZERO'] = _bzero hdulist.append(ext_hdu) if errormsg != "": errormsg += "===================================\n" errormsg += "= This file may have been =\n" errormsg += "= written out on a platform =\n" errormsg += "= with a different byte-order. =\n" errormsg += "= =\n" errormsg += "= Please verify that the values =\n" errormsg += "= are correct or apply the =\n" errormsg += "= '.byteswap()' method. =\n" errormsg += "===================================\n" print(errormsg) f1.close() stsci(hdulist) return hdulist def parse_path(f1, f2): """Parse two input arguments and return two lists of file names""" import glob # if second argument is missing or is a wild card, point it # to the current directory f2 = f2.strip() if f2 == '' or f2 == '*': f2 = './' # if the first argument is a directory, use all GEIS files if os.path.isdir(f1): f1 = os.path.join(f1, '*.??h') list1 = glob.glob(f1) list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.'] # if the second argument is a directory, use file names in the # first argument to construct file names, i.e. # abc.xyh will be converted to abc_xyf.fits if os.path.isdir(f2): list2 = [] for file in list1: name = os.path.split(file)[-1] fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits' list2.append(os.path.join(f2, fitsname)) else: list2 = [s.strip() for s in f2.split(",")] if list1 == [] or list2 == []: err_msg = "" if list1 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f1) if list2 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f2) raise IOError(err_msg) else: return list1, list2 #------------------------------------------------------------------------------- # special initialization when this is the main program if __name__ == "__main__": import getopt try: optlist, args = getopt.getopt(sys.argv[1:], 'h') except getopt.error as e: print(str(e)) print(__doc__) print("\t", __version__) # initialize default values help = 0 # read options for opt, value in optlist: if opt == "-h": help = 1 if (help): print(__doc__) print("\t", __version__) else: if len(args) == 1: args.append('') list1, list2 = parse_path (args[0], args[1]) npairs = min (len(list1), len(list2)) for i in range(npairs): if os.path.exists(list2[i]): print("Output file %s already exists, skip." % list2[i]) break try: hdulist = readgeis(list1[i]) stsci2(hdulist, list2[i]) hdulist.writeto(list2[i]) hdulist.close() print("%s -> %s" % (list1[i], list2[i])) except Exception as e : print("Conversion fails for %s: %s" % (list1[i], str(e))) break """ Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of AURA and its representatives may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ stsci.tools-3.4.12/lib/stsci/tools/stash.py0000644001120100020070000000123413006721301022305 0ustar jhunkSTSCI\science00000000000000'''This is a tool for stashing certain information used by the continuous integration system at STScI. It is not intended for, or even expected to work, in any other application. -- use this in shell scripts: d=`python -m stsci.tools.stash` cp file $d ''' from __future__ import print_function import sys import os # use os.path.join because the file name may be used outside of # python and we need it to be right on Windows. stash_dir = os.path.join(os.path.dirname(__file__),'stash') try : os.mkdir(stash_dir) except OSError : pass if __name__ == '__main__' : print(stash_dir) if not os.path.exists(stash_dir) : sys.exit(1) stsci.tools-3.4.12/lib/stsci/tools/stpyfits.py0000644001120100020070000003117413241163620023063 0ustar jhunkSTSCI\science00000000000000# $Id$ """ The stpyfits module is an extension to the `astropy.io.fits` module which offers additional features specific to STScI. These features include the handling of Constant Data Value Arrays. """ from __future__ import division import functools import sys import numpy as np import astropy from astropy.io import fits # A few imports for backward compatibility; in the earlier stpyfits these were # overridden, but with fits's new extension system it's not necessary from astropy.io.fits.util import _is_int from astropy.utils import lazyproperty from distutils.version import LooseVersion PY3K = sys.version_info[0] > 2 ASTROPY_VER_GE20 = LooseVersion(astropy.__version__) >= LooseVersion('2.0') STPYFITS_ENABLED = False # Not threadsafe TODO: (should it be?) # Register the extension classes; simply importing stpyfits does not # automatically enable it. Instead, it can be enabled/disabled using these # functions. def enable_stpyfits(): global STPYFITS_ENABLED if not STPYFITS_ENABLED: fits.register_hdu(ConstantValuePrimaryHDU) fits.register_hdu(ConstantValueImageHDU) STPYFITS_ENABLED = True def disable_stpyfits(): global STPYFITS_ENABLED if STPYFITS_ENABLED: fits.unregister_hdu(ConstantValuePrimaryHDU) fits.unregister_hdu(ConstantValueImageHDU) STPYFITS_ENABLED = False def with_stpyfits(func): @functools.wraps(func) def wrapped_with_stpyfits(*args, **kwargs): global STPYFITS_ENABLED was_enabled = STPYFITS_ENABLED enable_stpyfits() try: # BUG: Forcefully disable lazy loading. # Lazy loading breaks ability to initialize ConstantValueHDUs # TODO: Investigate the cause upstream (astropy.io.fits) if 'write' not in func.__name__: kwargs['lazy_load_hdus'] = False retval = func(*args, **kwargs) finally: # Only disable stpyfits if it wasn't already enabled if not was_enabled: disable_stpyfits() return retval return wrapped_with_stpyfits class _ConstantValueImageBaseHDU(fits.hdu.image._ImageBaseHDU): """ A class that extends the `astropy.io.fits.hdu.base._BaseHDU` class to extend its behavior to implement STScI specific extensions to `astropy.io.fits`. The `astropy.io.fits.hdu.base._BaseHDU class` is: """ __doc__ += fits.hdu.image._ImageBaseHDU.__doc__ def __init__(self, data=None, header=None, do_not_scale_image_data=False, uint=False, **kwargs): if header and 'PIXVALUE' in header and header['NAXIS'] == 0: header = header.copy() # Add NAXISn keywords for each NPIXn keyword in the header and # remove the NPIXn keywords naxis = 0 for card in reversed(header['NPIX*'].cards): try: idx = int(card.keyword[len('NPIX'):]) except ValueError: continue hdrlen = len(header) header.set('NAXIS' + str(idx), card.value, card.comment, after='NAXIS') del header[card.keyword] if len(header) < hdrlen: # A blank card was used when updating the header; add the # blank back in. # TODO: Fix header.set so that it has an option not to # use a blank card--this is a detail that we really # shouldn't have to worry about otherwise header.append() # Presumably the NPIX keywords are in order of their axis, but # just in case somehow they're not... naxis = max(naxis, idx) # Update the NAXIS keyword with the correct number of axes header['NAXIS'] = naxis elif header and 'PIXVALUE' in header: pixval = header['PIXVALUE'] if header['BITPIX'] > 0: if PY3K: pixval = int(pixval) else: pixval = long(pixval) arrayval = self._check_constant_value_data(data) if arrayval is not None: header = header.copy() # Update the PIXVALUE keyword if necessary if arrayval != pixval: header['PIXVALUE'] = arrayval else: header = header.copy() # There is a PIXVALUE keyword but NAXIS is not 0 and the data # does not match the PIXVALUE. # Must remove the PIXVALUE and NPIXn keywords so we recognize # that there is non-constant data in the file. del header['PIXVALUE'] for card in header['NPIX*'].cards: try: idx = int(card.keyword[len('NPIX'):]) except ValueError: continue del header[card.keyword] # Make sure to pass any arguments other than data and header as # keyword arguments, because PrimaryHDU and ImageHDU have stupidly # different signatures for __init__ super(_ConstantValueImageBaseHDU, self).__init__( data, header, do_not_scale_image_data=do_not_scale_image_data, uint=uint) @property def size(self): """ The HDU's size should always come up as zero so long as there's no actual data in it other than the constant value array. """ if 'PIXVALUE' in self._header: return 0 else: return super(_ConstantValueImageBaseHDU, self).size @lazyproperty def data(self): if ('PIXVALUE' in self._header and 'NPIX1' not in self._header and self._header['NAXIS'] > 0): bitpix = self._header['BITPIX'] dims = self.shape # Special case where the pixvalue can be present but all the NPIXn # keywords are zero. if sum(dims) == 0: return None code = BITPIX2DTYPE[bitpix] pixval = self._header['PIXVALUE'] if code in ['uint8', 'int16', 'int32', 'int64']: if PY3K: pixval = int(pixval) else: pixval = long(pixval) raw_data = np.zeros(shape=dims, dtype=code) + pixval if raw_data.dtype.str[0] != '>': raw_data = raw_data.byteswap(True) raw_data.dtype = raw_data.dtype.newbyteorder('>') if self._bzero != 0 or self._bscale != 1: if bitpix > 16: # scale integers to Float64 data = np.array(raw_data, dtype=np.float64) elif bitpix > 0: # scale integers to Float32 data = np.array(raw_data, dtype=np.float32) else: # floating point cases data = raw_data if self._bscale != 1: np.multiply(data, self._bscale, data) if self._bzero != 0: data += self._bzero # delete the keywords BSCALE and BZERO after scaling del self._header['BSCALE'] del self._header['BZERO'] self._header['BITPIX'] = DTYPE2BITPIX[data.dtype.name] else: data = raw_data return data else: return super(_ConstantValueImageBaseHDU, self).data @data.setter def data(self, data): self.__dict__['data'] = data self._modified = True if self.data is not None and not isinstance(data, np.ndarray): # Try to coerce the data into a numpy array--this will work, on # some level, for most objects try: data = np.array(data) except: raise TypeError('data object %r could not be coerced into an ' 'ndarray' % data) if isinstance(data, np.ndarray): self._bitpix = DTYPE2BITPIX[data.dtype.name] self._axes = list(data.shape) self._axes.reverse() elif self.data is None: self._axes = [] else: raise ValueError('not a valid data array') self.update_header() @classmethod def match_header(cls, header): """A constant value HDU will only be recognized as such if the header contains a valid PIXVALUE and NAXIS == 0. """ pixvalue = header.get('PIXVALUE') naxis = header.get('NAXIS', 0) return (super(_ConstantValueImageBaseHDU, cls).match_header(header) and (isinstance(pixvalue, float) or _is_int(pixvalue)) and naxis == 0) def update_header(self): if (not self._modified and not self._header._modified and (self._has_data and self.shape == self.data.shape)): # Not likely that anything needs updating return super(_ConstantValueImageBaseHDU, self).update_header() if 'PIXVALUE' in self._header and self._header['NAXIS'] > 0: # This is a Constant Value Data Array. Verify that the data # actually matches the PIXVALUE pixval = self._header['PIXVALUE'] if self._header['BITPIX'] > 0: if PY3K: pixval = int(pixval) else: pixval = long(pixval) if self.data is None or self.data.nbytes == 0: # Empty data array; just keep the existing PIXVALUE arrayval = self._header['PIXVALUE'] else: arrayval = self._check_constant_value_data(self.data) if arrayval is not None: if arrayval != pixval: self._header['PIXVALUE'] = arrayval naxis = self._header['NAXIS'] self._header['NAXIS'] = 0 for idx in range(naxis, 0, -1): axisval = self._header['NAXIS%d' % idx] self._header.set('NPIX%d' % idx, axisval, 'length of constant array axis %d' % idx, after='PIXVALUE') del self._header['NAXIS%d' % idx] else: # No longer a constant value array; remove any remaining # NPIX or PIXVALUE keywords try: del self._header['PIXVALUE'] except KeyError: pass try: del self._header['NPIX*'] except KeyError: pass def _summary(self): summ = super(_ConstantValueImageBaseHDU, self)._summary() if ASTROPY_VER_GE20: outsumm = ((summ[0], summ[1], summ[2].replace('ConstantValue', '')) + summ[3:]) else: outsumm = ((summ[0], summ[1].replace('ConstantValue', '')) + summ[2:]) return outsumm def _writedata_internal(self, fileobj): if 'PIXVALUE' in self._header: # This is a Constant Value Data Array, so no data is written return 0 else: return super(_ConstantValueImageBaseHDU, self)._writedata_internal(fileobj) def _check_constant_value_data(self, data): """Verify that the HDU's data is a constant value array.""" arrayval = data.flat[0] if np.all(data == arrayval): return arrayval return None class ConstantValuePrimaryHDU(_ConstantValueImageBaseHDU, fits.hdu.PrimaryHDU): """Primary HDUs with constant value arrays.""" class ConstantValueImageHDU(_ConstantValueImageBaseHDU, fits.hdu.ImageHDU): """Image extension HDUs with constant value arrays.""" # Import the rest of the astropy.io.fits module from astropy.io.fits import * # noqa # For backward-compatibility with older code that thinks PrimaryHDU and # ImageHDU should support the ConstantValue features PrimaryHDU = ConstantValuePrimaryHDU ImageHDU = ConstantValueImageHDU # Override the other "convenience" functions to use stpyfits open = fitsopen = with_stpyfits(fits.open) info = with_stpyfits(fits.info) append = with_stpyfits(fits.append) writeto = with_stpyfits(fits.writeto) update = with_stpyfits(fits.update) getheader = with_stpyfits(fits.getheader) getdata = with_stpyfits(fits.getdata) getval = with_stpyfits(fits.getval) setval = with_stpyfits(fits.setval) delval = with_stpyfits(fits.delval) __all__ = fits.__all__ + ['enable_stpyfits', 'disable_stpyfits', 'with_stpyfits', 'ConstantValuePrimaryHDU', 'ConstantValueImageHDU'] stsci.tools-3.4.12/lib/stsci/tools/swapgeis.py0000644001120100020070000005120113241163620023011 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python # $Id: readgeis.py 10520 2010-10-11 16:39:49Z hack $ """ swapgeis: Read GEIS file, byteswap it and write out to a new GEIS file. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE Usage: swapgeis.py [options] GEISname newGEISname GEISname is the input GEIS file in GEIS format, and FITSname is the output file in FITS format. GEISname can be a directory name. In this case, it will try to use all `*.??h` files as input file names. If newGEISname is omitted or is a directory name, this task will try to construct the output names from the input names, i.e.: abc.xyh will have an output name of abc_swap.xyh :Options: -h print the help (this text) -n do NOT clobber pre-existing output files :Example: If used in Pythons script, a user can, e. g.:: >>> import swapgeis >>> swapgeis.byteswap(GEISFileName) The most basic usage from the command line:: swapgeis.py test1.hhh test1_swap.hhh This command will convert the input GEIS file test1.hhh written out on one platform (Solaris?) to a byteswapped version test1_linux.hhh. From the command line:: swapgeis.py . this will byteswap all `*.??h` files in the current directory to GEIS files (of corresponding names) and write them in the current directory. Another example of usage from the command line:: swapgeis.py "u*" this will convert all `u*.??h` files in the current directory to byteswapped files (of corresponding names) and write them in the current directory. Note that when using wild cards, it is necessary to put them in quotes. """ # Developed by Science Software Branch, STScI, USA. # This version needs pyfits 0.9.6.3 or later # and numpy version 1.0.4 or later from __future__ import division, print_function # confidence high __version__ = "1.0 (25 Feb, 2011), \xa9 AURA" import os, sys, string, shutil from astropy.io import fits import numpy from functools import reduce dat = None dat = None # definitions used to convert GEIS record into numpy objects geis_fmt = {'REAL':'f', 'DOUBLE': 'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'} # definitions used to convert data into numpy array for use in `astropy.io.fits.Column` cols_fmt = {'REAL':'float', 'DOUBLE':'float', 'INTEGER':'int', 'LOGICAL':'S', 'CHARACTER': 'S'} # definitions used to define print format for `astropy.io.fits.Column` cols_pfmt = {'REAL':'E', 'DOUBLE': 'D', 'INTEGER': 'J', 'LOGICAL':'A', 'CHARACTER': 'A'} # Keywords which require special unit conversion # keywords which are output as long-floats without using exponential formatting kw_DOUBLE = ['CRVAL1','CRVAL2','FPKTTIME','LPKTTIME'] def byteswap(input,output=None,clobber=True): """Input GEIS files "input" will be read and converted to a new GEIS file whose byte-order has been swapped from its original state. Parameters ---------- input - str Full filename with path of input GEIS image header file output - str Full filename with path of output GEIS image header file If None, a default name will be created as input_swap.??h clobber - bool Overwrite any pre-existing output file? [Default: True] Notes ----- This function will automatically read and write out the data file using the GEIS image naming conventions. """ global dat cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' # Create default output name if no output name was specified by the user if output is None: output = input.replace('.','_swap.') out_data = output[:-1]+'d' if os.path.exists(output) and not clobber: errstr = 'Output file already exists! Please remove or rename and start again...' raise IOError(errstr) _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU so we can have access to keywords which describe # the number of groups and shape of each group's array # cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension header groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] _range = list(range(1, pcount+1)) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() f1.close() errormsg = "" loc = 0 outdat = b'' for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape).byteswap() outdat += ext_dat.tostring() ext_hdu = fits.hdu.ImageHDU(data=ext_dat) rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats).byteswap() outdat += rec.tostring() loc += group_size if os.path.exists(output): os.remove(output) if os.path.exists(out_data): os.remove(out_data) shutil.copy(input,output) outfile = open(out_data,mode='wb') outfile.write(outdat) outfile.close() print('Finished byte-swapping ',input,' to ',output) #------------------------------------------------------------------------------- """Input GEIS files "input" will be read and a HDUList object will be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF. The user can use the writeto method to write the HDUList object to a FITS file. """ # global dat # !!! (looks like this is a function missing its head) cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() phdr.set('FILENAME', value=input, after='DATE') # Determine starting point for adding Group Parameter Block keywords to Primary header phdr_indx = phdr.index('PSIZE') _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension table groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] cols = [] # column definitions used for extension table cols_dict = {} # provides name access to Column defs _range = list(range(1, pcount+1)) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) # identify keywords which require conversion to special units if ptype in kw_DOUBLE: _type = 'DOUBLE' fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) # Set up definitions for use in creating the group-parameter block table nrpt = '' nbits = str(int(_bytes)*8) if 'CHAR' in _type: nrpt = _bytes nbits = _bytes afmt = cols_fmt[_type]+ nbits if 'LOGICAL' in _type: afmt = cols_fmt[_type] cfmt = cols_pfmt[_type]+nrpt #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt)) cols.append(cols_dict[ptype]) # This keeps the columns in order _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # delete from the end, so it will not conflict with previous delete for i in range(len(phdr)-1, -1, -1): if phdr.cards[i].keyword in _list: del phdr[i] # clean up other primary header keywords phdr['SIMPLE'] = True phdr['GROUPS'] = False _after = 'NAXIS' if _naxis0 > 0: _after += str(_naxis0) phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after) # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() errormsg = "" # Define data array for all groups arr_shape = _naxis[:] arr_shape[0] = gcount arr_stack = numpy.zeros(arr_shape,dtype=_code) loc = 0 for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape) if _uint16: ext_dat += _bzero # Check to see whether there are any NaN's or infs which might indicate # a byte-swapping problem, such as being written out on little-endian # and being read in on big-endian or vice-versa. if _code.find('float') >= 0 and \ (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))): errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had floating point data values =\n" errormsg += "= of NaN and/or Inf. =\n" errormsg += "===================================\n" elif _code.find('int') >= 0: # Check INT data for max values ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat) if ext_dat_exp.max() == int(_bitpix) - 1: # Potential problems with byteswapping errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had integer data values =\n" errormsg += "= with maximum bitvalues. =\n" errormsg += "===================================\n" arr_stack[k] = ext_dat rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats) loc += group_size # Add data from this GPB to table for i in range(1, pcount+1): val = rec[0][i-1] if i in bools: if val: val = 'T' else: val = 'F' cols[i-1].array[k] = val # Based on the first group, add GPB keywords to PRIMARY header if k == 0: # Create separate PyFITS Card objects for each entry in 'rec' # and update Primary HDU with these keywords after PSIZE for i in range(1, pcount+1): #val = rec.field(i-1)[0] val = rec[0][i-1] if val.dtype.kind == 'S': val = val.decode('ascii') if i in bools: if val: val = True else: val = False if i in floats: # use fromstring, format in Card is deprecated in pyfits 0.9 _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1]) _card = fits.Card.fromstring(_str) else: _card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1]) phdr.insert(phdr_indx+i, _card) # deal with bscale/bzero if (_bscale != 1 or _bzero != 0): phdr['BSCALE'] = _bscale phdr['BZERO'] = _bzero #hdulist.append(ext_hdu) # Define new table based on Column definitions ext_table = fits.TableHDU.from_columns(cols) ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS') # Add column descriptions to header of table extension to match stwfits output for i in range(len(key)): ext_table.header.append(fits.Card(keyword=key[i], value=comm[i])) if errormsg != "": errormsg += "===================================\n" errormsg += "= This file may have been =\n" errormsg += "= written out on a platform =\n" errormsg += "= with a different byte-order. =\n" errormsg += "= =\n" errormsg += "= Please verify that the values =\n" errormsg += "= are correct or apply the =\n" errormsg += "= '.byteswap()' method. =\n" errormsg += "===================================\n" print(errormsg) f1.close() hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)]) hdulist.append(ext_table) return hdulist def parse_path(f1, f2): """Parse two input arguments and return two lists of file names""" import glob # if second argument is missing or is a wild card, point it # to the current directory f2 = f2.strip() if f2 == '' or f2 == '*': f2 = './' # if the first argument is a directory, use all GEIS files if os.path.isdir(f1): f1 = os.path.join(f1, '*.??h') list1 = glob.glob(f1) list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.'] # if the second argument is a directory, use file names in the # first argument to construct file names, i.e. # abc.xyh will be converted to abc_xyf.fits if os.path.isdir(f2): list2 = [] for file in list1: name = os.path.split(file)[-1] fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits' list2.append(os.path.join(f2, fitsname)) else: list2 = [s.strip() for s in f2.split(",")] if list1 == [] or list2 == []: err_msg = "" if list1 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f1) if list2 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f2) raise IOError(err_msg) else: return list1, list2 #------------------------------------------------------------------------------- # special initialization when this is the main program if __name__ == "__main__": import getopt try: optlist, args = getopt.getopt(sys.argv[1:], 'hn') except getopt.error as e: print(str(e)) print(__doc__) print("\t", __version__) # initialize default values help = 0 clobber = True # read options for opt, value in optlist: if opt == "-h": help = 1 if opt == '-n': clobber = False if (help): print(__doc__) print("\t", __version__) else: if len(args) == 1: args.append('') list1, list2 = parse_path (args[0], args[1]) npairs = min (len(list1), len(list2)) for i in range(npairs): try: byteswap(list1[i],list2[i],clobber=clobber) print("%s -> %s" % (list1[i], list2[i])) except Exception as e: print("Conversion fails for %s: %s" % (list1[i], str(e))) break """ Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of AURA and its representatives may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ stsci.tools-3.4.12/lib/stsci/tools/taskpars.py0000644001120100020070000000517713006721301023025 0ustar jhunkSTSCI\science00000000000000""" Contains the TaskPars class and any related functions. $Id$ """ from __future__ import division # confidence high class NoExecError(Exception): pass class TaskPars: """ This represents a task's collection of configurable parameters. This class is meant to be mostly abstract, though there is some functionality included which could be common to most derived classes. This also serves to document the interface which must be met for EPAR. """ def getName(self, *args, **kw): """ Returns the string name of the task. """ raise NotImplementedError("class TaskPars is not to be used directly") def getPkgname(self, *args, **kw): """ Returns the string name of the package, if applicable. """ raise NotImplementedError("class TaskPars is not to be used directly") def getParList(self, *args, **kw): """ Returns a list of parameter objects. """ raise NotImplementedError("class TaskPars is not to be used directly") def getDefaultParList(self, *args, **kw): """ Returns a list of parameter objects with default values set. """ raise NotImplementedError("class TaskPars is not to be used directly") def setParam(self, *args, **kw): """ Allows one to set the value of a single parameter. Initial signature is setParam(name, value, scope='', check=1) """ raise NotImplementedError("class TaskPars is not to be used directly") def getFilename(self, *args, **kw): """ Returns the string name of any associated config/parameter file. """ raise NotImplementedError("class TaskPars is not to be used directly") def saveParList(self, *args, **kw): """ Allows one to save the entire set to a file. """ raise NotImplementedError("class TaskPars is not to be used directly") def run(self, *args, **kw): """ Runs the task with the known parameters. """ raise NoExecError("Bug: class TaskPars is not to be used directly") def canPerformValidation(self): """ Returns bool. If True, expect tryValue() to be called next. """ return False def knowAsNative(self): """ Returns bool. Return true if the class prefers in-memory objects to keep (know) their parameter values in native format instead of as strings. """ return False def getHelpAsString(self): """ Meant to be overridden - return a task specific help string. """ return 'No help string available for task "'+self.getName()+'".\n '+ \ 'Implement getHelpAsString() in your TaskPars sub-class.' # also, eparam, lParam, tParam, dParam, tryValue ? stsci.tools-3.4.12/lib/stsci/tools/teal.py0000644001120100020070000014543513112074217022131 0ustar jhunkSTSCI\science00000000000000""" Main module for the ConfigObj version of the parameter task editor: TEAL. $Id$ """ from __future__ import absolute_import, division, print_function # confidence high import os, sys, traceback from . import configobj, cfgpars, editpar, vtor_checks from .cfgpars import APP_NAME from .irafutils import printColsAuto, rglob, setWritePrivs from . import capable PY3K = sys.version_info[0] > 2 if capable.OF_GRAPHICS: if PY3K: from tkinter.filedialog import askopenfilename from tkinter.messagebox import showerror, showwarning else: from tkFileDialog import askopenfilename from tkMessageBox import showerror, showwarning # tool help tealHelpString = """\ The TEAL (Task Editor And Launcher) GUI is used to edit task parameters in a parameter-dependent way. After editing, it allows the user to launch (execute) the task. It also allows the user to view task help in a separate window that remains accessible while the parameters are being edited. Editing Parameters -------------------- Parameter values are modified using various GUI widgets that depend on the parameter properties. It is possible to edit parameters using either the mouse or the keyboard. Most parameters have a context-dependent menu accessible via right-clicking that enables resetting the parameter (restoring its value to the task default), clearing the value, or even activating a file browser that allows a filename to be selected and entered into the parameter field. Some items on the right-click pop-up menu may be disabled depending on the parameter type (e.g. the file browser cannot be used for numeric parameters.) The mouse-editing behavior should be intuitive, so the notes below focus on keyboard-editing. When the editor starts, the first parameter is selected. To select another parameter, use the Tab key (Shift-Tab to go backwards) or Return to move the focus from item to item. The Up and Down arrow keys also move between fields. The toolbar buttons can also be selected with Tab. Use the space bar to "push" buttons or activate menus. Enumerated Parameters Parameters that have a list of choices use a drop-down menu. The space bar causes the menu to appear; once it is present, the up/down arrow keys can be used to select different items. Items in the list have accelerators (underlined, generally the first letter) that can be typed to jump directly to that item. When editing is complete, hit Return or Tab to accept the changes, or type Escape to close the menu without changing the current parameter value. Boolean Parameters Boolean parameters appear as Yes/No radio buttons. Hitting the space bar toggles the setting, while 'y' and 'n' can be typed to select the desired value. Text Entry Fields Strings, integers, floats, etc. appear as text-entry fields. Values are verified to to be legal before being stored in the parameter. If an an attempt is made to set a parameter to an illegal value, the program beeps and a warning message appears in the status bar at the bottom of the window. To see the value of a string that is longer than the entry widget, either use the left mouse button to do a slow "scroll" through the entry or use the middle mouse button to "pull" the value in the entry back and forth quickly. In either case, just click in the entry widget with the mouse and then drag to the left or right. If there is a selection highlighted, the middle mouse button may paste it in when clicked. It may be necessary to click once with the left mouse button to undo the selection before using the middle button. You can also use the left and right arrow keys to scroll through the selection. Control-A jumps to the beginning of the entry, and Control-E jumps to the end of the entry. The Menu Bar -------------- File menu: Execute Start the task running with the currently edited parameter values. If the Option "Save and Close on Execute" is set, this will save all the parameters and close the editor window. Save Save the parameters to the file named in the title bar. This does not close the editor window, nor does it execute the task. If however, this button appears as "Save & Quit", then it will in fact close the editor window after saving. Save As... Save the parameters to a user-specified file. This does not close the editor window, nor does it execute the task. Defaults Reset all parameters to the system default values for this task. Note that individual parameters can be reset using the menu shown by right-clicking on the parameter entry. Close Close the parameter editor. If there are unsaved changes, the user is prompted to save them. Either way, this action returns to the calling routine a Python dict of the currently selected parameter values. Cancel Cancel the editing session by exiting the parameter editor. All recent changes that were made to the parameters are lost (going back until the last Save or Save As). This action returns a Python None to the calling routine. Open... menu: Load and edit parameters from any applicable file found for the current task. This changes the current file being edited (see the name listed in the title bar) to the one selected to be opened. If no such files are found, this menu is not shown. Options menu: Display Task Help in a Window Help on the task is available through the Help menu. If this option is selected, the help text is displayed in a pop-up window. This is the default behavior. Display Task Help in a Browser If this option is selected, instead of a pop-up window, help is displayed in the user's web browser. This requires access to the internet and is a somewhat experimental feature. Any HTML version of the task's help need to be provided by the task. Save and Close on Execute If this option is selected, the parameter editing window will be closed right before task execution as if the Close button had been clicked. This is the default behavior. For short-running tasks, it may be interesting to leave TEAL open and continue to execute while tweaking certain parameter values. Help menu: Task Help Display help on the task whose parameters are being edited. By default the help pops up in a new window, but the help can also be displayed in a web browser by modifying the Options. TEAL Help Display this help. Show Log Display the historical log of all the status messages that so far have been displayed in the status area at the very bottom of the user interface. Toolbar Buttons ----------------- The Toolbar contains a set of buttons that provide shortcuts for the most common menu bar actions. Their names are the same as the menu items given above: Execute, Save (or Save & Quit), Close, Cancel, and Defaults. Note that the toolbar buttons are accessible from the keyboard using the Tab and Shift-Tab keys. They are located in sequence before the first parameter. If the first parameter is selected, Shift-Tab backs up to the "Task Help" button, and if the last parameter is selected then Tab wraps around and selects the "Execute" button. """ # Starts a GUI session, or simply loads a file def teal(theTask, parent=None, loadOnly=False, returnAs="dict", canExecute=True, strict=False, errorsToTerm=False, autoClose=True, defaults=False): # overrides=None): """ Start the GUI session, or simply load a task's ConfigObj. """ if loadOnly: # this forces returnAs="dict" obj = None try: obj = cfgpars.getObjectFromTaskArg(theTask, strict, defaults) # obj.strictUpdate(overrides) # ! would need to re-verify after this ! except Exception as re: # catches RuntimeError and KeyError and ... # Since we are loadOnly, don't pop up the GUI for this if strict: raise else: print(re.message.replace('\n\n','\n')) return obj else: assert returnAs in ("dict", "status", None), \ "Invalid value for returnAs arg: "+str(returnAs) dlg = None try: # if setting to all defaults, go ahead and load it here, pre-GUI if defaults: theTask = cfgpars.getObjectFromTaskArg(theTask, strict, True) # now create/run the dialog dlg = ConfigObjEparDialog(theTask, parent=parent, autoClose=autoClose, strict=strict, canExecute=canExecute) # overrides=overrides) except cfgpars.NoCfgFileError as ncf: log_last_error() if errorsToTerm: print(str(ncf).replace('\n\n','\n')) else: popUpErr(parent=parent,message=str(ncf),title="Unfound Task") except Exception as re: # catches RuntimeError and KeyError and ... log_last_error() if errorsToTerm: print(re.message.replace('\n\n','\n')) else: popUpErr(parent=parent, message=re.message, title="Bad Parameters") # Return, depending on the mode in which we are operating if returnAs is None: return if returnAs == "dict": if dlg is None or dlg.canceled(): return None else: return dlg.getTaskParsObj() # else, returnAs == "status" if dlg is None or dlg.canceled(): return -1 if dlg.executed(): return 1 return 0 # save/closed # Note that you should be careful not to use "status" and # autoClose=False, because the user can Save then Cancel def load(theTask, canExecute=True, strict=True, defaults=False): """ Shortcut to load TEAL .cfg files for non-GUI access where loadOnly=True. """ return teal(theTask, parent=None, loadOnly=True, returnAs="dict", canExecute=canExecute, strict=strict, errorsToTerm=True, defaults=defaults) def log_last_error(): import time f = open(cfgpars.getAppDir()+os.sep+'last_error.txt','w') f.write(time.asctime()+'\n\n') f.write(traceback.format_exc()+'\n') f.close() def unlearn(taskPkgName, deleteAll=False): """ Find the task named taskPkgName, and delete any/all user-owned .cfg files in the user's resource directory which apply to that task. Like a unix utility, this returns 0 on success (no files found or only 1 found but deleted). For multiple files found, this uses deleteAll, returning the file-name-list if deleteAll is False (to indicate the problem) and without deleting any files. MUST check return value. This does not prompt the user or print to the screen. """ # this WILL throw an exception if the taskPkgName isn't found flist = cfgpars.getUsrCfgFilesForPyPkg(taskPkgName) # can raise if flist is None or len(flist) == 0: return 0 if len(flist) == 1: os.remove(flist[0]) return 0 # at this point, we know more than one matching file was found if deleteAll: for f in flist: os.remove(f) return 0 else: return flist # let the caller know this is an issue def diffFromDefaults(theTask, report=False): """ Load the given file (or existing object), and return a dict of its values which are different from the default values. If report is set, print to stdout the differences. """ # get the 2 dicts (trees: dicts of dicts) defaultTree = load(theTask, canExecute=False, strict=True, defaults=True) thisTree = load(theTask, canExecute=False, strict=True, defaults=False) # they must be flattenable defaultFlat = cfgpars.flattenDictTree(defaultTree) thisFlat = cfgpars.flattenDictTree(thisTree) # use the "set" operations till there is a dict.diff() # thanks to: http://stackoverflow.com/questions/715234 diffFlat = dict( set(thisFlat.items()) - \ set(defaultFlat.items()) ) if report: defaults_of_diffs_only = {} # { k:defaultFlat[k] for k in diffFlat.keys() } for k in diffFlat: defaults_of_diffs_only[k] = defaultFlat[k] msg = 'Non-default values of "'+str(theTask)+'":\n'+ \ _flat2str(diffFlat)+ \ '\n\nDefault values:\n'+ \ _flat2str(defaults_of_diffs_only) print(msg) return diffFlat def _flat2str(fd): # waiting for a nice pretty-print rv = '{\n' for k in fd.keys(): rv += repr(k)+': '+repr(fd[k])+'\n' return rv+'}' def _isInstalled(fullFname): """ Return True if the given file name is located in an installed area (versus a user-owned file) """ if not fullFname: return False if not os.path.exists(fullFname): return False instAreas = [] try: import site instAreas = site.getsitepackages() except: pass # python 2.6 and lower don't have site.getsitepackages() if len(instAreas) < 1: instAreas = [ os.path.dirname(os.__file__) ] for ia in instAreas: if fullFname.find(ia) >= 0: return True return False def popUpErr(parent=None, message="", title="Error"): # withdraw root, could standardize w/ EditParDialog.__init__() if parent is None: if PY3K: import tkinter root = tkinter.Tk() else: import Tkinter root = Tkinter.Tk() # root.lift() root.after_idle(root.withdraw) showerror(message=message, title=title, parent=parent) # We'd love to somehow force the dialog to the front here in popUpErr (on OSX) # but cannot since the Python process started from the Terminal is not an # Aqua app (unless it became so within PyRAF). This thread # http://objectmix.com/python/350288-tkinter-osx-lift.html # describes it well. def execEmbCode(SCOPE, NAME, VAL, TEAL, codeStr): """ .cfgspc embedded code execution is done here, in a relatively confined space. The variables available to the code to be executed are: SCOPE, NAME, VAL, PARENT, TEAL The code string itself is expected to set a var named OUT """ # This was all we needed in Python 2.x # OUT = None # exec codeStr # return OUT # In Python 3 (& 2.x) be more explicit: http://bugs.python.org/issue4831 PARENT = None if TEAL: PARENT = TEAL.top OUT = None ldict = locals() # will have OUT in it exec(codeStr, globals(), ldict) return ldict['OUT'] def print_tasknames(pkgName, aDir, term_width=80, always=False, hidden=None): """ Print a message listing TEAL-enabled tasks available under a given installation directory (where pkgName resides). If always is True, this will always print when tasks are found; otherwise it will only print found tasks when in interactive mode. The parameter 'hidden' supports a list of input tasknames that should not be reported even though they still exist. """ # See if we can bail out early if not always: # We can't use the sys.ps1 check if in PyRAF since it changes sys if 'pyraf' not in sys.modules: # sys.ps1 is only defined in interactive mode if not hasattr(sys, 'ps1'): return # leave here, we're in someone's script # Check for tasks taskDict = cfgpars.findAllCfgTasksUnderDir(aDir) tasks = [x for x in taskDict.values() if len(x) > 0] if hidden: # could even account for a single taskname as input here if needed for x in hidden: if x in tasks: tasks.remove(x) # only be verbose if there something found if len(tasks) > 0: sortedUniqTasks = sorted(set(tasks)) if len(sortedUniqTasks) == 1: tlines = 'The following task in the '+pkgName+\ ' package can be run with TEAL:\n' else: tlines = 'The following tasks in the '+pkgName+\ ' package can be run with TEAL:\n' tlines += printColsAuto(sortedUniqTasks, term_width=term_width, min_pad=2) print(tlines) def getHelpFileAsString(taskname,taskpath): """ This functions will return useful help as a string read from a file in the task's installed directory called ".help". If no such file can be found, it will simply return an empty string. Notes ----- The location of the actual help file will be found under the task's installed directory using 'irafutils.rglob' to search all sub-dirs to find the file. This allows the help file to be either in the tasks installed directory or in any sub-directory, such as a "help/" directory. Parameters ---------- taskname: string Value of `__taskname__` for a module/task taskpath: string Value of `__file__` for an installed module which defines the task Returns ------- helpString: string multi-line string read from the file '.help' """ #get the local library directory where the code is stored pathsplit=os.path.split(taskpath) # taskpath should be task's __file__ if taskname.find('.') > -1: # if taskname is given as package.taskname... helpname=taskname.split(".")[1] # taskname should be __taskname__ from task's module else: helpname = taskname localdir = pathsplit[0] if localdir == '': localdir = '.' helpfile=rglob(localdir,helpname+".help")[0] if os.access(helpfile,os.R_OK): fh=open(helpfile,'r') ss=fh.readlines() fh.close() helpString="" for line in ss: helpString+=line else: helpString= '' return helpString def cfgGetBool(theObj, name, dflt): """ Get a stringified val from a ConfigObj obj and return it as bool """ strval = theObj.get(name, None) if strval is None: return dflt return strval.lower().strip() == 'true' # Main class class ConfigObjEparDialog(editpar.EditParDialog): # i.e. TEAL """ The TEAL GUI. """ FALSEVALS = (None, False, '', 0, 0.0, '0', '0.0', 'OFF', 'Off', 'off', 'NO', 'No', 'no', 'N', 'n', 'FALSE', 'False', 'false') def __init__(self, theTask, parent=None, title=APP_NAME, isChild=0, childList=None, autoClose=False, strict=False, canExecute=True): # overrides=None, self._do_usac = autoClose # Keep track of any passed-in args before creating the _taskParsObj # self._overrides = overrides self._canExecute = canExecute self._strict = strict # Init base - calls _setTaskParsObj(), sets self.taskName, etc # Note that this calls _overrideMasterSettings() editpar.EditParDialog.__init__(self, theTask, parent, isChild, title, childList, resourceDir=cfgpars.getAppDir()) # We don't return from this until the GUI is closed def _overrideMasterSettings(self): """ Override so that we can run in a different mode. """ # config-obj dict of defaults cod = self._getGuiSettings() # our own GUI setup self._appName = APP_NAME self._appHelpString = tealHelpString self._useSimpleAutoClose = self._do_usac self._showExtraHelpButton = False self._saveAndCloseOnExec = cfgGetBool(cod, 'saveAndCloseOnExec', True) self._showHelpInBrowser = cfgGetBool(cod, 'showHelpInBrowser', False) self._writeProtectOnSaveAs = cfgGetBool(cod, 'writeProtectOnSaveAsOpt', True) self._flagNonDefaultVals = cfgGetBool(cod, 'flagNonDefaultVals', None) self._optFile = APP_NAME.lower()+".optionDB" # our own colors # prmdrss teal: #00ffaa, pure cyan (teal) #00ffff (darker) #008080 # "#aaaaee" is a darker but good blue, but "#bbbbff" pops ltblu = "#ccccff" # light blue drktl = "#008888" # darkish teal self._frmeColor = cod.get('frameColor', drktl) self._taskColor = cod.get('taskBoxColor', ltblu) self._bboxColor = cod.get('buttonBoxColor', ltblu) self._entsColor = cod.get('entriesColor', ltblu) self._flagColor = cod.get('flaggedColor', 'brown') # double check _canExecute, but only if it is still set to the default if self._canExecute and self._taskParsObj: # default _canExecute=True self._canExecute = self._taskParsObj.canExecute() self._showExecuteButton = self._canExecute # check on the help string - just to see if it is HTML # (could use HTMLParser here if need be, be quick and simple tho) hhh = self.getHelpString(self.pkgName+'.'+self.taskName) if hhh: hhh = hhh.lower() if hhh.find('= 0 or hhh.find('') > 0: self._knowTaskHelpIsHtml = True elif hhh.startswith('http:') or hhh.startswith('https:'): self._knowTaskHelpIsHtml = True elif hhh.startswith('file:') and \ (hhh.endswith('.htm') or hhh.endswith('.html')): self._knowTaskHelpIsHtml = True def _preMainLoop(self): """ Override so that we can do some things right before activating. """ # Put the fname in the title. EditParDialog doesn't do this by default self.updateTitle(self._taskParsObj.filename) def _doActualSave(self, fname, comment, set_ro=False, overwriteRO=False): """ Override this so we can handle case of file not writable, as well as to make our _lastSavedState copy. """ self.debug('Saving, file name given: '+str(fname)+', set_ro: '+\ str(set_ro)+', overwriteRO: '+str(overwriteRO)) cantWrite = False inInstArea = False if fname in (None, ''): fname = self._taskParsObj.getFilename() # now do some final checks then save try: if _isInstalled(fname): # check: may be installed but not read-only inInstArea = cantWrite = True else: # in case of save-as, allow overwrite of read-only file if overwriteRO and os.path.exists(fname): setWritePrivs(fname, True, True) # try make writable # do the save rv=self._taskParsObj.saveParList(filename=fname,comment=comment) except IOError: cantWrite = True # User does not have privs to write to this file. Get name of local # choice and try to use that. if cantWrite: fname = self._taskParsObj.getDefaultSaveFilename() # Tell them the context is changing, and where we are saving msg = 'Read-only config file for task "' if inInstArea: msg = 'Installed config file for task "' msg += self._taskParsObj.getName()+'" is not to be overwritten.'+\ ' Values will be saved to: \n\n\t"'+fname+'".' showwarning(message=msg, title="Will not overwrite!") # Try saving to their local copy rv=self._taskParsObj.saveParList(filename=fname, comment=comment) # Treat like a save-as (update title for ALL save ops) self._saveAsPostSave_Hook(fname) # Limit write privs if requested (only if not in the rc dir) if set_ro and os.path.dirname(os.path.abspath(fname)) != \ os.path.abspath(self._rcDir): cfgpars.checkSetReadOnly(fname) # Before returning, make a copy so we know what was last saved. # The dict() method returns a deep-copy dict of the keyvals. self._lastSavedState = self._taskParsObj.dict() return rv def _saveAsPostSave_Hook(self, fnameToBeUsed_UNUSED): """ Override this so we can update the title bar. """ self.updateTitle(self._taskParsObj.filename) # _taskParsObj is correct def hasUnsavedChanges(self): """ Determine if there are any edits in the GUI that have not yet been saved (e.g. to a file). """ # Sanity check - this case shouldn't occur assert self._lastSavedState is not None, \ "BUG: Please report this as it should never occur." # Force the current GUI values into our model in memory, but don't # change anything. Don't save to file, don't even convert bad # values to their previous state in the gui. Note that this can # leave the GUI in a half-saved state, but since we are about to exit # this is OK. We only want prompting to occur if they decide to save. badList = self.checkSetSaveEntries(doSave=False, fleeOnBadVals=True, allowGuiChanges=False) if badList: return True # Then compare our data to the last known saved state. MAKE SURE # the LHS is the actual dict (and not 'self') to invoke the dict # comparison only. return self._lastSavedState != self._taskParsObj # Employ an edited callback for a given item? def _defineEditedCallbackObjectFor(self, parScope, parName): """ Override to allow us to use an edited callback. """ # We know that the _taskParsObj is a ConfigObjPars triggerStrs = self._taskParsObj.getTriggerStrings(parScope, parName) # Some items will have a trigger, but likely most won't if triggerStrs and len(triggerStrs) > 0: return self else: return None def _nonStandardEparOptionFor(self, paramTypeStr): """ Override to allow use of TealActionParButton. Return None or a class which derives from EparOption. """ if paramTypeStr == 'z': from . import teal_bttn return teal_bttn.TealActionParButton else: return None def updateTitle(self, atitle): """ Override so we can append read-only status. """ if atitle and os.path.exists(atitle): if _isInstalled(atitle): atitle += ' [installed]' elif not os.access(atitle, os.W_OK): atitle += ' [read only]' super(ConfigObjEparDialog, self).updateTitle(atitle) def edited(self, scope, name, lastSavedVal, newVal, action): """ This is the callback function invoked when an item is edited. This is only called for those items which were previously specified to use this mechanism. We do not turn this on for all items because the performance might be prohibitive. This kicks off any previously registered triggers. """ # Get name(s) of any triggers that this par triggers triggerNamesTup = self._taskParsObj.getTriggerStrings(scope, name) assert triggerNamesTup is not None and len(triggerNamesTup) > 0, \ 'Empty trigger name for: "'+name+'", consult the .cfgspc file.' # Loop through all trigger names - each one is a trigger to kick off - # in the order that they appear in the tuple we got. Most cases will # probably only have a single trigger in the tuple. for triggerName in triggerNamesTup: # First handle the known/canned trigger names # print (scope, name, newVal, action, triggerName) # DBG: debug line # _section_switch_ if triggerName == '_section_switch_': # Try to uniformly handle all possible par types here, not # just boolean (e.g. str, int, float, etc.) # Also, see logic in _BooleanMixin._coerceOneValue() state = newVal not in self.FALSEVALS self._toggleSectionActiveState(scope, state, (name,)) continue # _2_section_switch_ (see notes above in _section_switch_) if triggerName == '_2_section_switch_': state = newVal not in self.FALSEVALS # toggle most of 1st section (as usual) and ALL of next section self._toggleSectionActiveState(scope, state, (name,)) # get first par of next section (fpons) - is a tuple fpons = self.findNextSection(scope, name) nextSectScope = fpons[0] if nextSectScope: self._toggleSectionActiveState(nextSectScope, state, None) continue # Now handle rules with embedded code (eg. triggerName=='_rule1_') if '_RULES_' in self._taskParsObj and \ triggerName in self._taskParsObj['_RULES_'].configspec: # Get codeStr to execute it, but before we do so, check 'when' - # make sure this is an action that is allowed to cause a trigger ruleSig = self._taskParsObj['_RULES_'].configspec[triggerName] chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig) codeStr = chkArgsDict.get('code') # or None if didn't specify when2run = chkArgsDict.get('when') # or None if didn't specify greenlight = False # do we have a green light to eval the rule? if when2run is None: greenlight = True # means run rule for any possible action else: # 'when' was set to something so we need to check action # check value of action (poor man's enum) assert action in editpar.GROUP_ACTIONS, \ "Unknown action: "+str(action)+', expected one of: '+ \ str(editpar.GROUP_ACTIONS) # check value of 'when' (allow them to use comma-sep'd str) # (readers be aware that values must be those possible for # 'action', and 'always' is also allowed) whenlist = when2run.split(',') # warn for invalid values for w in whenlist: if not w in editpar.GROUP_ACTIONS and w != 'always': print('WARNING: skipping bad value for when kwd: "'+\ w+'" in trigger/rule: '+triggerName) # finally, do the correlation greenlight = 'always' in whenlist or action in whenlist # SECURITY NOTE: because this part executes arbitrary code, that # code string must always be found only in the configspec file, # which is intended to only ever be root-installed w/ the pkg. if codeStr: if not greenlight: continue # not an error, just skip this one self.showStatus("Evaluating "+triggerName+' ...') #dont keep self.top.update_idletasks() #allow msg to draw prior to exec # execute it and retrieve the outcome try: outval = execEmbCode(scope, name, newVal, self, codeStr) except Exception as ex: outval = 'ERROR in '+triggerName+': '+str(ex) print(outval) msg = outval+':\n'+('-'*99)+'\n'+traceback.format_exc() msg += 'CODE: '+codeStr+'\n'+'-'*99+'\n' self.debug(msg) self.showStatus(outval, keep=1) # Leave this debug line in until it annoys someone msg = 'Value of "'+name+'" triggered "'+triggerName+'"' stroutval = str(outval) if len(stroutval) < 30: msg += ' --> "'+stroutval+'"' self.showStatus(msg, keep=0) # Now that we have triggerName evaluated to outval, we need # to look through all the parameters and see if there are # any items to be affected by triggerName (e.g. '_rule1_') self._applyTriggerValue(triggerName, outval) continue # If we get here, we have an unknown/unusable trigger raise RuntimeError('Unknown trigger for: "'+name+'", named: "'+ \ str(triggerName)+'". Please consult the .cfgspc file.') def findNextSection(self, scope, name): """ Starts with given par (scope+name) and looks further down the list of parameters until one of a different non-null scope is found. Upon success, returns the (scope, name) tuple, otherwise (None, None). """ # first find index of starting point plist = self._taskParsObj.getParList() start = 0 for i in range(len(plist)): if scope == plist[i].scope and name == plist[i].name: start = i break else: print('WARNING: could not find starting par: '+scope+'.'+name) return (None, None) # now find first different (non-null) scope in a par, after start for i in range(start, len(plist)): if len(plist[i].scope) > 0 and plist[i].scope != scope: return (plist[i].scope, plist[i].name) # else didn't find it return (None, None) def _setTaskParsObj(self, theTask): """ Overridden version for ConfigObj. theTask can be either a .cfg file name or a ConfigObjPars object. """ # Create the ConfigObjPars obj self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask, self._strict, False) # Tell it that we can be used for catching debug lines self._taskParsObj.setDebugLogger(self) # Immediately make a copy of it's un-tampered internal dict. # The dict() method returns a deep-copy dict of the keyvals. self._lastSavedState = self._taskParsObj.dict() # do this here ??!! or before _lastSavedState ??!! # self._taskParsObj.strictUpdate(self._overrides) def _getSaveAsFilter(self): """ Return a string to be used as the filter arg to the save file dialog during Save-As. """ # figure the dir to use, start with the one from the file absRcDir = os.path.abspath(self._rcDir) thedir = os.path.abspath(os.path.dirname(self._taskParsObj.filename)) # skip if not writeable, or if is _rcDir if thedir == absRcDir or not os.access(thedir, os.W_OK): thedir = os.path.abspath(os.path.curdir) # create save-as filter string filt = thedir+'/*.cfg' envVarName = APP_NAME.upper()+'_CFG' if envVarName in os.environ: upx = os.environ[envVarName] if len(upx) > 0: filt = upx+"/*.cfg" # done return filt def _getOpenChoices(self): """ Go through all possible sites to find applicable .cfg files. Return as an iterable. """ tsk = self._taskParsObj.getName() taskFiles = set() dirsSoFar = [] # this helps speed this up (skip unneeded globs) # last dir aDir = os.path.dirname(self._taskParsObj.filename) if len(aDir) < 1: aDir = os.curdir dirsSoFar.append(aDir) taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk)) # current dir aDir = os.getcwd() if aDir not in dirsSoFar: dirsSoFar.append(aDir) taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk)) # task's python pkg dir (if tsk == python pkg name) try: x, pkgf = cfgpars.findCfgFileForPkg(tsk, '.cfg', taskName=tsk, pkgObj=self._taskParsObj.getAssocPkg()) taskFiles.update( (pkgf,) ) except cfgpars.NoCfgFileError: pass # no big deal - maybe there is no python package # user's own resourceDir aDir = self._rcDir if aDir not in dirsSoFar: dirsSoFar.append(aDir) taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk)) # extra loc - see if they used the app's env. var aDir = dirsSoFar[0] # flag to skip this if no env var found envVarName = APP_NAME.upper()+'_CFG' if envVarName in os.environ: aDir = os.environ[envVarName] if aDir not in dirsSoFar: dirsSoFar.append(aDir) taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk)) # At the very end, add an option which we will later interpret to mean # to open the file dialog. taskFiles = list(taskFiles) # so as to keep next item at end of seq taskFiles.sort() taskFiles.append("Other ...") return taskFiles # OPEN: load parameter settings from a user-specified file def pfopen(self, event=None): """ Load the parameter settings from a user-specified file. """ # Get the selected file name fname = self._openMenuChoice.get() # Also allow them to simply find any file - do not check _task_name_... # (could use tkinter's FileDialog, but this one is prettier) if fname[-3:] == '...': if capable.OF_TKFD_IN_EPAR: fname = askopenfilename(title="Load Config File", parent=self.top) else: from . import filedlg fd = filedlg.PersistLoadFileDialog(self.top, "Load Config File", self._getSaveAsFilter()) if fd.Show() != 1: fd.DialogCleanup() return fname = fd.GetFileName() fd.DialogCleanup() if not fname: return # canceled self.debug('Loading from: '+fname) # load it into a tmp object (use associatedPkg if we have one) try: tmpObj = cfgpars.ConfigObjPars(fname, associatedPkg=\ self._taskParsObj.getAssocPkg(), strict=self._strict) except Exception as ex: showerror(message=ex.message, title='Error in '+os.path.basename(fname)) self.debug('Error in '+os.path.basename(fname)) self.debug(traceback.format_exc()) return # check it to make sure it is a match if not self._taskParsObj.isSameTaskAs(tmpObj): msg = 'The current task is "'+self._taskParsObj.getName()+ \ '", but the selected file is for task "'+ \ str(tmpObj.getName())+'". This file was not loaded.' showerror(message=msg, title="Error in "+os.path.basename(fname)) self.debug(msg) self.debug(traceback.format_exc()) return # Set the GUI entries to these values (let the user Save after) newParList = tmpObj.getParList() try: self.setAllEntriesFromParList(newParList, updateModel=True) # go ahead and updateModel, even though it will take longer, # we need it updated for the copy of the dict we make below except editpar.UnfoundParamError as pe: showwarning(message=str(pe), title="Error in "+os.path.basename(fname)) # trip any triggers self.checkAllTriggers('fopen') # This new fname is our current context self.updateTitle(fname) self._taskParsObj.filename = fname # !! maybe try setCurrentContext() ? self.freshenFocus() self.showStatus("Loaded values from: "+fname, keep=2) # Since we are in a new context (and have made no changes yet), make # a copy so we know what the last state was. # The dict() method returns a deep-copy dict of the keyvals. self._lastSavedState = self._taskParsObj.dict() def unlearn(self, event=None): """ Override this so that we can set to default values our way. """ self.debug('Clicked defaults') self._setToDefaults() self.freshenFocus() def _handleParListMismatch(self, probStr, extra=False): """ Override to include ConfigObj filename and specific errors. Note that this only handles "missing" pars and "extra" pars, not wrong-type pars. So it isn't that big of a deal. """ # keep down the duplicate errors if extra: return True # the base class is already stating it will be ignored # find the actual errors, and then add that to the generic message errmsg = 'Warning: ' if self._strict: errmsg = 'ERROR: ' errmsg = errmsg+'mismatch between default and current par lists ' + \ 'for task "'+self.taskName+'".' if probStr: errmsg += '\n\t'+probStr errmsg += '\nTry editing/deleting: "' + \ self._taskParsObj.filename+'" (or, if in PyRAF: "unlearn ' + \ self.taskName+'").' print(errmsg) return True # as we said, not that big a deal def _setToDefaults(self): """ Load the default parameter settings into the GUI. """ # Create an empty object, where every item is set to it's default value try: tmpObj = cfgpars.ConfigObjPars(self._taskParsObj.filename, associatedPkg=\ self._taskParsObj.getAssocPkg(), setAllToDefaults=self.taskName, strict=False) except Exception as ex: msg = "Error Determining Defaults" showerror(message=msg+'\n\n'+ex.message, title="Error Determining Defaults") return # Set the GUI entries to these values (let the user Save after) tmpObj.filename = self._taskParsObj.filename = '' # name it later newParList = tmpObj.getParList() try: self.setAllEntriesFromParList(newParList) # needn't updateModel yet self.checkAllTriggers('defaults') self.updateTitle('') self.showStatus("Loaded default "+self.taskName+" values via: "+ \ os.path.basename(tmpObj._original_configspec), keep=1) except editpar.UnfoundParamError as pe: showerror(message=str(pe), title="Error Setting to Default Values") def getDict(self): """ Retrieve the current parameter settings from the GUI.""" # We are going to have to return the dict so let's # first make sure all of our models are up to date with the values in # the GUI right now. badList = self.checkSetSaveEntries(doSave=False) if badList: self.processBadEntries(badList, self.taskName, canCancel=False) return self._taskParsObj.dict() def loadDict(self, theDict): """ Load the parameter settings from a given dict into the GUI. """ # We are going to have to merge this info into ourselves so let's # first make sure all of our models are up to date with the values in # the GUI right now. badList = self.checkSetSaveEntries(doSave=False) if badList: if not self.processBadEntries(badList, self.taskName): return # now, self._taskParsObj is up-to-date # So now we update _taskParsObj with the input dict cfgpars.mergeConfigObj(self._taskParsObj, theDict) # now sync the _taskParsObj dict with its par list model # '\n'.join([str(jj) for jj in self._taskParsObj.getParList()]) self._taskParsObj.syncParamList(False) # Set the GUI entries to these values (let the user Save after) try: self.setAllEntriesFromParList(self._taskParsObj.getParList(), updateModel=True) self.checkAllTriggers('fopen') self.freshenFocus() self.showStatus('Loaded '+str(len(theDict))+ \ ' user par values for: '+self.taskName, keep=1) except Exception as ex: showerror(message=ex.message, title="Error Setting to Loaded Values") def _getGuiSettings(self): """ Return a dict (ConfigObj) of all user settings found in rcFile. """ # Put the settings into a ConfigObj dict (don't use a config-spec) rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg' if os.path.exists(rcFile): try: return configobj.ConfigObj(rcFile) except: raise RuntimeError('Error parsing: '+os.path.realpath(rcFile)) # tho, for simple types, unrepr=True eliminates need for .cfgspc # also, if we turn unrepr on, we don't need cfgGetBool else: return {} def _saveGuiSettings(self): """ The base class doesn't implement this, so we will - save settings (only GUI stuff, not task related) to a file. """ # Put the settings into a ConfigObj dict (don't use a config-spec) rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg' # if os.path.exists(rcFile): os.remove(rcFile) co = configobj.ConfigObj(rcFile) # can skip try-block, won't read file co['showHelpInBrowser'] = self._showHelpInBrowser co['saveAndCloseOnExec'] = self._saveAndCloseOnExec co['writeProtectOnSaveAsOpt'] = self._writeProtectOnSaveAs co['flagNonDefaultVals'] = self._flagNonDefaultVals co['frameColor'] = self._frmeColor co['taskBoxColor'] = self._taskColor co['buttonBoxColor'] = self._bboxColor co['entriesColor'] = self._entsColor co['flaggedColor'] = self._flagColor co.initial_comment = ['Automatically generated by '+\ APP_NAME+'. All edits will eventually be overwritten.'] co.initial_comment.append('To use platform default colors, delete each color line below.') co.final_comment = [''] # ensure \n at EOF co.write() def _applyTriggerValue(self, triggerName, outval): """ Here we look through the entire .cfgspc to see if any parameters are affected by this trigger. For those that are, we apply the action to the GUI widget. The action is specified by depType. """ # First find which items are dependent upon this trigger (cached) # e.g. { scope1.name1 : dep'cy-type, scope2.name2 : dep'cy-type, ... } depParsDict = self._taskParsObj.getParsWhoDependOn(triggerName) if not depParsDict: return if 0: print("Dependent parameters:\n"+str(depParsDict)+"\n") # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # Then go through the dependent pars and apply the trigger to them settingMsg = '' for absName in depParsDict: used = False # For each dep par, loop to find the widget for that scope.name for i in range(self.numParams): scopedName = theParamList[i].scope+'.'+theParamList[i].name # diff from makeFullName!! if absName == scopedName: # a match was found depType = depParsDict[absName] if depType == 'active_if': self.entryNo[i].setActiveState(outval) elif depType == 'inactive_if': self.entryNo[i].setActiveState(not outval) elif depType == 'is_set_by': self.entryNo[i].forceValue(outval, noteEdited=True) # WARNING! noteEdited=True may start recursion! if len(settingMsg) > 0: settingMsg += ", " settingMsg += '"'+theParamList[i].name+'" to "'+\ outval+'"' elif depType in ('set_yes_if', 'set_no_if'): if bool(outval): newval = 'yes' if depType == 'set_no_if': newval = 'no' self.entryNo[i].forceValue(newval, noteEdited=True) # WARNING! noteEdited=True may start recursion! if len(settingMsg) > 0: settingMsg += ", " settingMsg += '"'+theParamList[i].name+'" to "'+\ newval+'"' else: if len(settingMsg) > 0: settingMsg += ", " settingMsg += '"'+theParamList[i].name+\ '" (no change)' elif depType == 'is_disabled_by': # this one is only used with boolean types on = self.entryNo[i].convertToNative(outval) if on: # do not activate whole section or change # any values, only activate this one self.entryNo[i].setActiveState(True) else: # for off, set the bool par AND grey WHOLE section self.entryNo[i].forceValue(outval, noteEdited=True) self.entryNo[i].setActiveState(False) # we'd need this if the par had no _section_switch_ # self._toggleSectionActiveState( # theParamList[i].scope, False, None) if len(settingMsg) > 0: settingMsg += ", " settingMsg += '"'+theParamList[i].name+'" to "'+\ outval+'"' else: raise RuntimeError('Unknown dependency: "'+depType+ \ '" for par: "'+scopedName+'"') used = True break # Or maybe it is a whole section if absName.endswith('._section_'): scope = absName[:-10] depType = depParsDict[absName] if depType == 'active_if': self._toggleSectionActiveState(scope, outval, None) elif depType == 'inactive_if': self._toggleSectionActiveState(scope, not outval, None) used = True # Help to debug the .cfgspc rules if not used: raise RuntimeError('UNUSED "'+triggerName+'" dependency: '+ \ str({absName:depParsDict[absName]})) if len(settingMsg) > 0: # why ?! self.freshenFocus() self.showStatus('Automatically set '+settingMsg, keep=1) stsci.tools-3.4.12/lib/stsci/tools/teal_bttn.py0000644001120100020070000000542013006721301023140 0ustar jhunkSTSCI\science00000000000000"""teal_bttn.py: for defining the action "parameter" button widget to be used in TEAL. $Id$ """ from __future__ import division, print_function # confidence high import traceback from . import eparoption, vtor_checks class TealActionParButton(eparoption.ActionEparButton): def getButtonLabel(self): """ Return string to be used on as button label - "value" of par. """ # If the value has a comma, return the 2nd part, else use whole thing return self.value.split(',')[-1].strip() def getShowName(self): """ Return string to be used on LHS of button - "name" of par. """ # If the value has a comma, return the 1st part, else leave empty if self.value.find(',') >= 0: return self.value.split(',')[0] else: return '' def flagThisPar(self, currentVal, force): """ Override this to do nothing - the value of this par will never be wrong and thus never need to be flagged. """ pass def clicked(self): """ Called when this button is clicked. Execute code from .cfgspc """ try: from . import teal except: teal = None try: # start drilling down into the tpo to get the code tealGui = self._mainGuiObj tealGui.showStatus('Clicked "'+self.getButtonLabel()+'"', keep=1) pscope = self.paramInfo.scope pname = self.paramInfo.name tpo = tealGui._taskParsObj tup = tpo.getExecuteStrings(pscope, pname) code = '' if not tup: if teal: teal.popUpErr(tealGui.top, "No action to perform", "Action Button Error") return for exname in tup: if '_RULES_' in tpo and exname in tpo['_RULES_'].configspec: ruleSig = tpo['_RULES_'].configspec[exname] chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig) code = chkArgsDict.get('code') # a string or None # now go ahead and execute it teal.execEmbCode(pscope, pname, self.getButtonLabel(), tealGui, code) # done tealGui.debug('Finished: "'+self.getButtonLabel()+'"') except Exception as ex: msg = 'Error executing: "'+self.getButtonLabel()+'"\n'+ex.message msgFull = msg+'\n'+''.join(traceback.format_exc()) msgFull+= "CODE:\n"+code if tealGui: if teal: teal.popUpErr(tealGui.top, msg, "Action Button Error") tealGui.debug(msgFull) else: if teal: teal.popUpErr(None, msg, "Action Button Error") print(msgFull) stsci.tools-3.4.12/lib/stsci/tools/tester.py0000644001120100020070000000612613006721301022476 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python """ Package: stsci.tools Author: Christopher Hanley Purpose: ======== Provide driver function for package tests. Dependencies: ============= - nose 0.10.4 or greater. Usage Example: ============== All packages will need to import stsci.tools.tester and add the following function to the __init__.py of their package: import stsci.tools.tester def test(*args,**kwds): return stsci.tools.tester.test(modname=__name__, *args, **kwds) This assumes that all software packages are installed with the structure: package/ __init__.py modules.py tests/ tests/__init__.py tests/test_whatever.py Where the /tests subdirectory containts the python files that nose will recognize as tests. """ from __future__ import division, print_function import os import os.path import sys pytools_tester_active = False def test(modname, mode='nose', *args, **kwds): """ Purpose: ======== test: Run refcore nosetest suite of tests. The tests are located in the tests/ directory of the installed modules. """ global pytools_tester_active if modname is not None : curdir = sys.modules[modname].__file__ curdir = os.path.abspath(curdir) curdir = os.path.dirname(curdir) else: raise ValueError('name of module to test not given') DIRS = [os.path.join(curdir, testdir) for testdir in ['tests', 'test']] dirname = None for x in DIRS: if os.path.isdir(x) : dirname = x break if dirname is None : print('no tests found in: %s' % repr(DIRS)) return False if mode == 'nose' : print("Testing with nose in %s\n"%dirname) try: import nose except ImportError: print("Nose 0.10.4 or greater is required for running tests.") raise # First arg is blank, since it's skipped by nose # --exe is needed because easy_install sets all .py files as executable for # some reason args = ['', '--exe', '-w', dirname ] result = False try : pytools_tester_active = True result = nose.run(argv=args) except : pytools_tester_active = False raise pytools_tester_active = False return result if mode == 'pytest' : print("Testing with pytest in %s\n"%dirname) try : import pytest except ImportError : print("py.test is required for running tests") raise # do not use --doctest-modules ; it doesn't work right args = [ dirname ] try : import pandokia args = ['-p', 'pandokia.helpers.pytest_plugin' ] + args except ImportError : pass result = False try : pytools_tester_active = True result = pytest.main(args) except : pytools_tester_active = False raise pytools_tester_active = False return result raise ValueError("invalid test specification - mode must be one of 'nose' or 'pytest'") stsci.tools-3.4.12/lib/stsci/tools/tests/0000755001120100020070000000000013241171572021765 5ustar jhunkSTSCI\science00000000000000stsci.tools-3.4.12/lib/stsci/tools/tests/__init__.py0000644001120100020070000000004013006721301024056 0ustar jhunkSTSCI\science00000000000000from __future__ import division stsci.tools-3.4.12/lib/stsci/tools/tests/cdva2.fits0000644001120100020070000000550013006721301023641 0ustar jhunkSTSCI\science00000000000000SIMPLE = T / conforms to FITS standard BITPIX = 32 / array data type NAXIS = 0 / number of array dimensions EXTEND = T PIXVALUE= 1 / Constant Pixel Value NPIX1 = 10 / length of constant array axis 1 NPIX2 = 10 / length of constant array axis 2 END stsci.tools-3.4.12/lib/stsci/tools/tests/o4sp040b0_raw.fits0000644001120100020070000022220013006721301025044 0ustar jhunkSTSCI\science00000000000000SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 0 / Number of axes EXTEND = T / File may contain extensions ORIGIN = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator IRAF-TLM= '14:58:02 (23/02/2007)' / Time of last modification NEXTEND = 6 / Number of standard extensions DATE = '2007-02-23T19:57:58' / date this file was written (yyyy-mm-dd) FILENAME= 'o4sp040b0_raw.fits ' / name of file FILETYPE= 'SCI ' / type of data found in data file TELESCOP= 'HST' / telescope used to acquire data INSTRUME= 'STIS ' / identifier for instrument used to acquire data EQUINOX = 2000.0 / equinox of celestial coord. system / DATA DESCRIPTION KEYWORDS ROOTNAME= 'o4sp040b0 ' / rootname of the observation setPRIMESI = 'STIS ' / instrument designated as prime / TARGET INFORMATION TARGNAME= 'HD101998 ' / proposer's target name RA_TARG = 1.761216666667E+02 / right ascension of the target (deg) (J2000) DEC_TARG= 4.851611111111E+01 / declination of the target (deg) (J2000) / PROPOSAL INFORMATION PROPOSID= 7932 / PEP proposal identifier LINENUM = '3.120 ' / proposal logsheet line number PR_INV_L= 'Leitherer ' / last name of principal investigatorPR_INV_F= 'Claus ' / first name of principal investigator PR_INV_M= ' ' / middle name / initial of principal investigat / SUMMARY EXPOSURE INFORMATION TDATEOBS= '1998-04-20' / UT date of start of first exposure in file TTIMEOBS= '18:38:15' / UT start time of first exposure in file TEXPSTRT= 5.092377657113E+04 / start time (MJD) of 1st exposure in file TEXPEND = 50923.77948761 / end time (MJD) of last exposure in the file TEXPTIME= 120. / total exposure time (seconds) / TARGET OFFSETS (POSTARGS) POSTARG1= 0.000000 / POSTARG in axis 1 direction POSTARG2= 0.000000 / POSTARG in axis 2 direction / DIAGNOSTIC KEYWORDS OVERFLOW= 0 / Number of science data overflows CAL_VER = ' ' / CALSTIS code version PROCTIME= 5.279415092593E+04 / Pipeline processing time (MJD) / SCIENCE INSTRUMENT CONFIGURATION CFSTATUS= 'SUPPORTED ' / configuration status (support., avail., eng.) OBSTYPE = 'SPECTROSCOPIC ' / observation type - imaging or spectroscopic OBSMODE = 'ACCUM ' / operating mode PHOTMODE= ' ' / observation conSCLAMP = 'NONE ' / lamp status, NONE or name of lamp which is on LAMPSET = '0.0 ' / spectral cal lamp current value (milliamps) NRPTEXP = 1 / number of repeat exposures in set: default 1 SUBARRAY= F / data from a subarray (T) or full frame (F) DETECTOR= 'CCD ' / detector in use: NUV-MAMA, FUV-MAMA, or CCD OPT_ELEM= 'G750M ' / optical element in use APERTURE= '0.2X0.2 ' / aperture name PROPAPER= '0.2X0.2 ' / proposed aperture name FILTER = 'Clear ' / filter in use APER_FOV= '0.2x0.2 ' / aperture field of view CENWAVE = 8561 / central wavelength of spectrum CRSPLIT = 4 / number of cosmic ray split exposures / ENGINEERING PARAMETERS CCDAMP = 'D ' / CCD amplifier read out (A,B,C,D) CCDGAIN = 4 / commanded gain of CCD CCDOFFST= 3 / commanded CCD bias offset / READOUT DEFINITION PARAMETERS CENTERA1= 532 / subarray axis1 center pt in unbinned dect. pix CENTERA2= 523 / subarray axis2 center pt in unbinned dect. pix SIZAXIS1= 1062 / subarray axis1 size in unbinned detector pixelsSIZAXIS2= 1044 / subarray axis2 size in unbinned detector pixelsBINAXIS1= 1 / axis1 data bin size in unbinned detector pixelsBINAXIS2= 1 / axis2 data bin size in unbinned detector pixels / CALIBRATION SWITCHES: PERFORM, OMIT, COMPLETE DQICORR = 'PERFORM ' / data quality initialization ATODCORR= 'OMIT ' / correct for A to D conversion errors BLEVCORR= 'PERFORM ' / subtract bias level computed from overscan img BIASCORR= 'PERFORM ' / Subtract bias image CRCORR = 'PERFORM ' / combine observations to reject cosmic rays RPTCORR = 'OMIT ' / add individual repeat observations EXPSCORR= 'PERFORM ' / process individual observations after cr-rejectDARKCORR= 'PERFORM ' / Subtract dark image FLATCORR= 'PERFORM ' / flat field data SHADCORR= 'OMIT ' / apply shutter shading correction STATFLAG= T / Calculate statistics? WAVECORR= 'PERFORM ' / use wavecal to adjust wavelength zeropoint X1DCORR = 'PERFORM ' / Perform 1-D spectral extraction BACKCORR= 'PERFORM ' / subtract background (sky and interorder) HELCORR = 'PERFORM ' / convert to heliocenttric wavelengths DISPCORR= 'PERFORM ' / apply 2-dimensional dispersion solutions FLUXCORR= 'PERFORM ' / convert to absolute flux units X2DCORR = 'PERFORM ' / rectify 2-D spectral image / CALIBRATION REFERENCE FILES BPIXTAB = 'otab$h1v11475o_bpx.fits' / bad pixel table DARKFILE= 'oref$jce11265o_drk.fits' / dark image file name PFLTFILE= 'oref$k2910265o_pfl.fits' / pixel to pixel flat field file name DFLTFILE= 'N/A ' / delta flat field file name LFLTFILE= ' ' / low order flat PHOTTAB = 'otab$k9f1452qo_pht.fits' / Photometric throughput table APERTAB = 'otab$laf13369o_apt.fits' / relative aperture throughput table CCDTAB = 'otab$k2g1502eo_ccd.fits' / CCD calibration parameters ATODTAB = 'N/A ' / analog to digital correction file BIASFILE= 'oref$k5h1101io_bia.fits' / bias image file name SHADFILE= 'N/A ' / shutter shading correction file CRREJTAB= 'otab$j3m1403io_crr.fits' / cosmic ray rejection parameters WAVECAL = 'o4sp040b0_wav.fits ' / wavecal image file name APDESTAB= 'otab$m9a16591o_apd.fits' / aperture description table SPTRCTAB= 'otab$l2j0137so_1dt.fits' / spectrum trace table DISPTAB = 'otab$l2j0137to_dsp.fits' / dispersion coefficient table INANGTAB= 'otab$h5s11397o_iac.fits' / incidence angle correction table LAMPTAB = 'otab$l421050oo_lmp.fits' / template calibration lamp spectra table SDCTAB = 'otab$laf1336ao_sdc.fits' / 2-D spatial distortion correction table XTRACTAB= 'otab$lb21642oo_1dx.fits' / parameters for 1-D spectral extraction tabPCTAB = 'otab$n2o1817ko_pct.fits' / Photometry correction table WBIAFILE= 'oref$k5h1101io_bia.fits' / associated wavecal bias image file name WCPTAB = 'otab$lag1815lo_wcp.fits' / wavecal parameters table TDSTAB = 'N/A ' / time-dependent sensitivity algorithm used / COSMIC RAY REJECTION ALGORITHM PARAMETERS MEANEXP = 0.000000 / reference exposure time for parameters SCALENSE= 0.000000 / multiplicative scale factor applied to noise INITGUES= ' ' / initial guess method (MIN or MED) SKYSUB = ' ' / sky value subtracted (MODE or NONE) CRSIGMAS= ' ' / statistical rejection criteria CRRADIUS= 0.000000 / rejection propagation radius (pixels) CRTHRESH= 0.000000 / rejection propagation threshold BADINPDQ= 0 / data quality flag bits to reject REJ_RATE= 0.0 / rate at which pixels are affected by cosmic rayCRMASK = F / flag CR-rejected pixels in input files (T/F) / CALIBRATED ENGINEERING PARAMETERS ATODGAIN= 0.000000 / calibrated CCD amplifier gain value READNSE = 0.000000 / calibrated CCD read noise value / TARGET ACQUISITION DATASET IDENTIFIERS ACQNAME = 'o4sp04DHT ' / rootname of acquisition exposure ACQTYPE = ' ' / type of acquisition PEAKNAM1= 'o4sp04EMT ' / rootname of 1st peakup exposure PEAKNAM2= 'o4sp04ENT ' / rootname of 2nd peakup exposure / PATTERN KEYWORDS PATTERN1= 'NONE ' / primary pattern type P1_SHAPE= ' ' / primary pattern shape P1_PURPS= ' ' / primary pattern purpose P1_NPTS = 0 / number of points in primary pattern P1_PSPAC= 0.000000 / point spacing for primary pattern (arc-sec) P1_LSPAC= 0.000000 / line spacing for primary pattern (arc-sec) P1_ANGLE= 0.000000 / angle between sides of parallelogram patt (deg)P1_FRAME= ' ' / coordinate frame of primary pattern P1_ORINT= 0.000000 / orientation of pattern to coordinate frame (degP1_CENTR= ' ' / center pattern relative to pointing (yes/no) / ARCHIVE SEARCH KEYWORDS BANDWID = 572.0 / bandwidth of the data SPECRES = 7780.0 / approx. resolving power at central wavelength CENTRWV = 8561.0 / central wavelength of the data MINWAVE = 8275.0 / minimum wavelength in spectrum MAXWAVE = 8847.0 / maximum wavelength in spectrum PLATESC = 0.05 / plate scale (arcsec/pixel) / PAPER PRODUCT SUPPORT KEYWORDS PROPTTL1= 'Spectral Purity and slit throughputs for the First Order Spectroscop'PROPTTL2= 'ic Modes 'OBSET_ID= '04' / observation set id TARDESCR= 'STAR 'MTFLAG = ' ' / moving target flag; T if it is a moving target PARALLAX= 0.000000000000E+00 / target parallax from proposal MU_RA = 0.000000000000E+00 / target proper motion from proposal (degrees RA)MU_DEC = 0.000000000000E+00 / target proper motion from proposal (deg. DEC) MU_EPOCH= 'J2000.0' / epoch of proper motion from proposal / ASSOCIATION KEYWORDS ASN_ID = 'O4SP040B0 ' / unique identifier assigned to association ASN_TAB = 'o4sp040b0_asn.fits ' / name of the association table LRC_XSTS= F LRC_FAIL= F HISTORY Copied from o4sp040b0_raw.fits END XTENSION= 'IMAGE ' / Image extension BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 62 / Axis length NAXIS2 = 44 / Axis length PCOUNT = 0 / No 'random' parameters GCOUNT = 1 / Only one group ORIGIN = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator EXTNAME = 'SCI ' / Extension name EXTVER = 1 / Extension version INHERIT = F / Inherits global header DATE = '2007-02-23T19:57:58' / Date FITS file was generated IRAF-TLM= '14:57:58 (23/02/2007)' / Time of last modification ROOTNAME= 'o4sp040b0 ' / rootname of the observation setEXPNAME = 'o4sp04ezq ' / exposure identifier BUNIT = 'COUNTS ' / brightness units ASN_MTYP= 'CRSPLIT ' / Role of the Member in the Association / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 535.384 / x-coordinate of reference pixel CRPIX2 = 536.67 / y-coordinate of reference pixel CRVAL1 = 8.561000000000E+03 / first axis value at reference pixel CRVAL2 = 0.000000000000E+00 / second axis value at reference pixel CTYPE1 = 'LAMBDA ' / the coordinate type for the first axis CTYPE2 = 'ANGLE ' / the coordinate type for the second axis CD1_1 = 0.554 / partial of first axis coordinate w.r.t. x CD1_2 = 0.0 / partial of first axis coordinate w.r.t. y CD2_1 = 0.0 / partial of second axis coordinate w.r.t. x CD2_2 = 1.38889E-05 / partial of second axis coordinate w.r.t. y LTV1 = 19.0 / offset in X to subsection start LTV2 = 20.0 / offset in Y to subsection start LTM1_1 = 1.0 / reciprocal of sampling rate in X LTM2_2 = 1.0 / reciprocal of sampling rate in Y RA_APER = 1.761216666667E+02 / RA of aperture reference position DEC_APER= 4.851611111111E+01 / Declination of aperture reference position PA_APER = 1.143617019653E+02 / Position Angle of reference aperture center (deDISPAXIS= 1 / dispersion axis; 1 = axis 1, 2 = axis 2, none CUNIT1 = 'angstrom' / units of first coordinate value CUNIT2 = 'deg ' / units of second coordinate value / OFFSETS FROM ASSOCIATED WAVECAL SHIFTA1 = 0.000000 / Spectrum shift in AXIS1 calculated from WAVECALSHIFTA2 = 0.000000 / Spectrum shift in AXIS2 calculated from WAVECAL / EXPOSURE INFORMATION ORIENTAT= 114.362 / position angle of image y axis (deg. e of n) SUNANGLE= 113.360931 / angle between sun and V1 axis MOONANGL= 131.851257 / angle between moon and V1 axis SUN_ALT = -52.544285 / altitude of the sun above Earth's limb FGSLOCK = 'FINE ' / commanded FGS lock (FINE,COARSE,GYROS,UNKNOWN) DATE-OBS= '1998-04-20' / UT date of start of observation (yyyy-mm-dd) TIME-OBS= '18:38:15' / UT time of start of observation (hh:mm:ss) EXPSTART= 5.092377657113E+04 / exposure start time (Modified Julian Date) EXPEND = 5.092377691835E+04 / exposure end time (Modified Julian Date) EXPTIME = 30.000000 / exposure duration (seconds)--calculated EXPFLAG = 'NORMAL ' / Exposure interruption indicator / PATTERN KEYWORDS PATTSTEP= 0 / position number of this point in the pattern / REPEATED EXPOSURES INFO NCOMBINE= 1 / number of image sets combined during CR rejecti / DATA PACKET INFORMATION FILLCNT = 0 / number of segments containing fill ERRCNT = 0 / number of segments containing errors PODPSFF = F / podps fill present (T/F) STDCFFF = F / ST DDF fill present (T/F) STDCFFP = '0x5569' / ST DDF fill pattern (hex) / ENGINEERING PARAMETERS OSWABSP = 1234272 / Slit Wheel Absolute position OMSCYL1P= 4008 / Mode select cylinder 1 position OMSCYL3P= 1177 / Mode select cylinder 3 position OMSCYL4P= 5297 / Mode select cylinder 4 position OCBABAV = 26.7024 / (V) CEB A&B Amp Bias OCBCDAV = 26.6827 / (V) CEB C&D amp bias OCBLGCDV= -3.38871 / (V) CEB last gate C&D OCBSWALV= -6.00587 / (V) CB summing well A Lo OCBRCDLV= 0.0487692 / (V) CB reset gate CD Lo OCCDHTAV= -1.000000 / average CCD housing temperature (degC) / IMAGE STATISTICS AND DATA QUALITY FLAGS NGOODPIX= 1108728 / number of good pixels SDQFLAGS= 31743 / serious data quality flags GOODMIN = 1486.0 / minimum value of good pixels GOODMAX = 14113.0 / maximum value of good pixels GOODMEAN= 1526.857056 / mean value of good pixels SNRMIN = 0.000000 / minimum signal to noise of good pixels SNRMAX = 0.000000 / maximum signal to noise of good pixels SNRMEAN = 0.000000 / mean value of signal to noise of good pixels SOFTERRS= 0 / number of soft error pixels (DQF=1) MEANDARK= 0.0 / average of the dark values subtracted MEANBLEV= 0.0 / average of all bias levels subtracted / PHOTOMETRY KEYWORDS SPORDER = 1 / Spectral order DIFF2PT = 1.0 / Diffuse to point source conversion factor CONT2EML= 0.000000 / Intensity conversion: continuum -> emission SCALE_A1= 0.000000 / Size of one pixel (arcsec) along dispersion axiOMEGAPIX= 0.000000 / Solid angle (arcsec**2) subtended by one pixel BZERO = 32768 END …ã…å…á…à…ä…â…å…ä…à…à…å…å…â…â…à…ã…à…á…ã…æ…ä…â…æ…ä…ç…æ…æ…æ…æ…ã…å…ä…á…ã…ä…å…æ…å…ã…å…ä…å…æ…æ…å…æ…ä…ä…ä…æ…ã…æ…è…å…é…ç…ä…å…ã…æ…å…ã…ä…ã…å…ç…ã…å…ä…ç…ä…æ…è…ä…á…â…å…è…ä…ç…ç…æ…ä…è…å…ã…å…ä…ä…ä…ç…ã…æ…ç…æ…ã…å…å…ç…ç…å…ã…ä…â…ã…æ…æ…å…ã…æ…å…ã…æ…å…å…ä…å…ã…å…ä…ã…ã…æ…ä…ç…ä…æ…å…ã…ä…ã…å…ã…å…ä…â…æ…æ…ç…å…å…è…ç…å…å…ã…à…ä…ã…æ…ä…æ…æ…ä…ä…æ…æ…ç…æ…ã…ä…ä…ä…å…â…ä…æ…å…ç…ç…è…ä…ã…ä…ã…â…ä…æ…å…æ…ã…å…ã…ã…å…æ…ã…å…å…å…å…å…å…å…å…ç…å…ç…ä…ç…ã…ã…ç…ã…å…å…ä…å…å…æ…ã…å…æ…æ…ä…å…ã…æ…è…å…ã…è…ç…å…å…ç…é…å…æ…å…ä…á…ä…ã…ä…ä…ä…ã…â…ä…ä…æ…æ…ä…ä…å…å…ä…ç…ä…ä…ä…å…ä…æ…æ…ã…ä…à…ä…æ…æ…å…â…â…ä…ç…ç…ç…æ…æ…ã…ä…æ…å…å…ä…ã…è…æ…å…å…ç…ç…æ…ã…å…â…ã…ä…â…å…ç…ç…ä…æ…ä…è…è…ã…á…ä…â…ã…è…æ…ä…è…ä…ä…ä…å…å…æ…å…è…è…ã…ä…æ…æ…å…ã…ä…æ…æ…å…ç…ã…ã…å…ã…å…è…ä…å…æ…å…å…è…ä…ç…æ…ä…è…ä…ä…ä…ã…ä…ä…ã…ä…æ…è…ç…è…å…ä…á…ä…ä…å…å…á…æ…æ…ã…â…å…ç…â…ã…æ…ã…ä…á…ä…ç…å…æ…æ…ã…ç…è…å…ã…ä…æ…ã…ã…á…æ…æ…å…ã…è…ç…æ…ã…â…á…ä…á…ä…å…æ…ã…ä…ç…ä…ä…ã…æ…ä…ã…å…å…â…ç…ä…ä…ä…ç…æ…ç…æ…ä…å…ã…å…ä…á…ä…å…ä…ä…ç…ä…æ…ä…æ…ç…ã…ä…å…ã…å…â…ã…æ…è…è…ä…æ…ä…ç…ã…æ…æ…â…ç…ã…å…ç…á…æ…å…ç…ã…ç…ä…ã…ä…ã…ã…ã…ã…â…å…æ…æ…å…æ…à…è…å…ã…ã…ä…å…ã…ä…ã…æ…ä…ã…è…æ…ä…ä…á…ä…ä…á…æ…ä…æ…å…å…ä…ä…ã…è…å…å…ã…æ…ä…ã…ã…è…ã…æ…è…æ…ã…ã…ã…ä…å…ã…ä…â…ã…â…ç…ä…ç…á…ã…ã…á…ç…ä…æ…å…ã…ã…å…â…æ…å…ä…ä…ä…å…ä…ã…ç…â…â…æ…å…æ…ä…å…ä…æ…å…ç…å…á…ä…ä…ä…ã…â…ã…ä…å…æ…ã…æ…ç…ç…æ…â…ã…ã…ä…æ…å…Þ…æ…â…æ…æ…ç…ç…å…å…æ…å…ã…æ…å…ä…á…æ…æ…å…å…ä…ä…ä…á…ã…å…à…ä…å…ç…ä…ç…è…ã…æ…å…ä…æ…á…å…ã…å…å…ä…ã…æ…æ…ç…ä…ä…å…ä…ã…å…å…ã…å…ã…ë…å…á…æ…å…æ…ç…ä…å…â…ä…ä…æ…ä…ã…ã…á…è…æ…ç…ä…â…ä…æ…á…æ…á…ã…ä…å…ã…ä…å…ã…ç…á…â…å…æ…ä…ä…ã…æ…ä…ã…å…ç…ä…æ…è…à…ã…ã…ç…è…å…æ…å…ã…ä…æ…é…ä…å…å…å…ä…â…æ…æ…ä…ã…ã…è…å…å…ç…ç…ä…ã…ä…ã…æ…æ…å…ç…ä…ä…ç…è…ç…â…ä…ä…à…ä…ä…ä…ä…â…ç…ä…ä…å…ä…ä…ã…è…ã…å…â…ä…ä…â…ã…ä…æ…å…ç…æ…â…â…â…ä…ã…ã…ã…ã…ã…æ…æ…ä…ç…ç…æ…ã…ã…ã…â…ä…è…ä…ä…æ…ã…æ…ä…ç…ä…æ…ä…æ…ã…å…å…â…ã…â…ä…è…è…å…ã…å…å…â…ä…æ…ç…ä…ä…â…ç…æ…ç…ç…ä…â…å…å…á…ã…ã…ä…ä…ã…ã…æ…æ…æ…ç…å…å…á…ã…æ…æ…å…ä…å…å…ä…ä…å…ä…å…ã…ã…ä…á…æ…ã…å…å…ã…ç…å…ä…æ…ç…ä…ç…ç…å…ä…å…ä…æ…æ…å…æ…ç…ä…ä…â…ä…å…ã…å…ã…ä…ä…å…æ…æ…æ…ã…â…â…å…å…ä…ä…æ…â…ã…ã…ä…ç…å…æ…è…ä…á…é…æ…å…â…å…ä…ã…æ…ä…æ…ä…æ…æ…â…ã…ã…å…æ…ä…â…â…è…å…å…å…ç…æ…ä…ä…á…ã…ã…á…à…å…â…ä…ã…æ…ä…æ…ã…å…ã…è…ä…å…ç…ä…ã…ç…ä…è…æ…ç…ä…æ…ã…æ…å…á…æ…â…ä…á…æ…ä…å…ç…ä…ä…ä…ä…å…â…ã…â…ä…á…è…æ…ä…ä…å…å…ç…å…ä…â…ã…ã…ä…ä…ã…è…è…å…ä…ä…ç…ä…ä…æ…ã…â…ä…å…æ…æ…è…å…è…æ…ä…å…å…ã…â…ã…å…ä…á…ã…å…â…ä…è…â…à…ã…á…á…à…à…ã…à…â…á…ß…á…á…Þ…á…Ý…Ú…Ü…Ï…ä…ã…å…å…â…â…å…â…ä…á…ã…à…à…ã…å…á…â…á…ã…å…ã…è…ã…ã…à…â…ã…á…æ…æ…ä…ä…è…æ…å…ä…æ…å…ã…ä…ã…å…å…ä…æ…ä…ä…ç…å…å…æ…ä…ã…ã…æ…å…å…å…å…ã…â…ã…ã…æ…ã…å…ä…â…å…æ…æ…ä…ä…ã…ç…æ…å…ä…ã…æ…ã…ä…ä…å…ä…è…æ…ã…ã…æ…ä…ä…æ…æ…â…æ…ä…ã…æ…ã…å…ä…ã…æ…ã…ä…ã…á…ã…ã…ä…ä…ä…ç…ä…å…ã…ã…ä…ç…ä…ã…â…ã…ã…ä…æ…ä…ä…ä…å…æ…ä…å…æ…ä…ã…å…á…ã…ã…ä…å…å…ä…è…ä…è…æ…ç…ã…á…æ…å…ä…ç…ä…á…æ…è…ä…ä…ã…ã…å…á…ä…á…ä…á…ã…æ…ä…å…ä…ä…ã…ä…ä…ä…å…ã…æ…ã…è…ä…å…ç…ç…ã…è…à…á…æ…â…ä…â…â…ã…æ…æ…ç…é…å…ç…ä…å…ä…æ…ã…ä…å…å…è…ã…ç…æ…ä…ç…å…ã…ä…ä…ã…ä…â…ä…ã…å…ç…ç…å…ã…æ…æ…ã…ç…å…ä…ä…ã…ã…â…ä…ä…ã…æ…æ…å…ä…ç…ä…ã…ä…å…ä…ã…ç…ä…ç…ç…è…æ…ä…ä…å…ç…ä…ä…ç…å…å…æ…ç…å…æ…ä…ä…ã…ã…ç…æ…ä…ä…ä…å…å…ã…ä…å…ä…ç…å…â…ä…è…ä…æ…å…ã…å…æ…å…ã…ä…æ…ã…ã…â…ã…è…æ…ã…á…æ…é…è…å…ä…æ…å…å…ä…å…ã…ä…æ…å…å…å…ç…æ…æ…æ…ã…ã…æ…ä…â…â…ä…æ…é…ã…å…ä…æ…è…ã…è…á…ä…ã…ã…â…â…æ…æ…æ…ä…ç…ã…ä…å…ä…å…ä…æ…å…â…â…â…ä…ä…ç…å…å…ã…ã…â…ã…ã…æ…á…å…ä…é…æ…ä…æ…ä…â…æ…å…à…å…ã…ä…ã…å…á…æ…ç…ç…æ…ç…ä…ä…æ…ã…ç…á…ã…ä…ä…ä…æ…æ…ç…á…ã…å…â…â…ã…â…æ…ã…ç…æ…ã…ç…ä…å…å…æ…ã…æ…ã…æ…â…ã…å…ä…é…æ…ä…ç…å…è…ä…ä…å…æ…ä…ã…ä…ä…ä…è…ä…â…å…ä…ä…â…ä…ã…å…æ…å…ä…å…è…æ…å…å…à…è…ä…ã…ä…á…ä…â…ç…ç…å…æ…ä…æ…ç…ä…ä…ã…ã…ä…ç…å…ä…å…ç…ç…å…ç…å…ä…æ…ã…ã…ä…ã…ç…è…ä…ç…è…ç…å…ä…ä…æ…ä…à…ä…â…ä…ä…ä…å…å…ç…ã…ä…å…å…ã…ä…ä…å…ä…å…å…æ…å…é…æ…å…â…æ…æ…ä…á…ä…æ…â…ã…æ…é…ç…ä…æ…ã…æ…ã…ã…â…å…å…â…å…æ…æ…å…å…ç…æ…ä…ç…å…å…æ…ä…ã…æ…à…å…å…å…æ…å…ä…å…å…ä…ã…å…å…á…ç…è…å…æ…ä…â…ä…å…á…ã…â…ä…ã…ã…æ…ä…é…è…æ…æ…æ…ã…ç…å…ã…å…ä…ã…æ…ã…æ…å…æ…å…è…å…å…å…å…Ý…æ…å…å…æ…ä…ã…ä…å…æ…è…ä…ä…æ…ã…å…â…ä…æ…â…ä…ä…ã…æ…ä…æ…ç…ä…ã…ã…ä…è…æ…ä…ä…ç…ä…æ…å…å…ç…ç…â…â…à…â…æ…è…å…æ…ä…ä…â…å…ã…ã…ä…â…ç…ä…â…ä…æ…æ…è…å…ä…è…ã…å…ä…ä…å…ä…â…ã…ã…å…æ…æ…æ…å…ç…æ…ã…æ…á…â…ä…ä…å…ä…æ…ä…ã…ç…ä…æ…ä…â…á…â…ã…å…å…à…ä…æ…ä…å…ä…ã…ä…á…ä…ã…ä…æ…å…ç…å…ä…æ…ä…å…å…å…ä…â…ã…æ…å…æ…ä…ä…ä…æ…æ…æ…ã…â…ä…á…æ…ä…ä…é…æ…å…ä…å…è…ç…å…ã…æ…å…ä…æ…å…ç…ä…ã…ç…å…ä…ä…å…ä…æ…æ…ã…â…ç…æ…ã…á…ç…æ…ä…å…ã…å…à…ã…ç…å…å…ä…ä…å…ä…ã…æ…ã…æ…ç…ä…æ…å…å…æ…ä…å…æ…ç…å…å…å…å…ã…ã…ä…â…à…ã…ä…â…æ…å…ã…æ…æ…ã…è…ç…ä…å…ã…å…ä…ã…ã…ä…å…æ…ã…ã…á…æ…ä…á…æ…æ…ä…å…æ…å…å…å…ç…æ…è…à…ç…å…ä…å…â…å…á…å…è…é…è…æ…ç…æ…ä…å…ã…à…â…ä…å…å…å…ç…ä…é…æ…ã…å…ç…ä…ç…å…á…ä…ä…ç…ã…å…ä…æ…æ…æ…ã…à…â…æ…ä…å…ä…ã…å…ã…å…å…æ…æ…è…æ…â…ã…â…â…ä…æ…ç…è…æ…å…ä…ä…å…å…æ…ä…å…â…â…å…ä…æ…å…ç…ä…é…é…ä…æ…ã…ã…â…â…è…è…å…ã…ä…ä…á…å…ã…å…å…à…â…å…á…ä…å…æ…æ…ä…æ…å…ä…ä…ä…æ…å…ä…ã…ã…é…ç…æ…å…å…å…ã…æ…å…ã…ä…ä…ä…ä…å…è…å…ç…å…æ…å…å…å…ä…á…á…ã…ã…ã…á…ã…à…ã…â…à…à…ã…ß…à…ß…á…à…á…Ý…ß…Ú…á…Ò…á…ã…á…â…á…ä…ä…ä…â…å…ä…â…â…â…å…ä…à…à…â…æ…å…ä…ä…ä…ã…æ…ä…á…ä…ã…ç…å…ä…ã…ä…å…ä…å…á…à…å…ã…é…ä…å…æ…å…å…æ…æ…å…å…ã…ã…æ…â…æ…ä…ä…ã…à…æ…æ…ç…æ…ç…æ…ã…æ…æ…á…ã…ä…â…ã…æ…æ…å…æ…ä…ê…æ…æ…ç…à…å…ã…ä…ä…ä…å…è…ä…ä…æ…æ…ä…å…ä…â…â…å…â…ä…å…ã…ã…å…å…ç…æ…å…ä…å…å…â…ã…å…ä…ç…æ…æ…æ…å…ç…ä…æ…ç…ã…ã…å…ç…ã…ã…â…â…è…ä…ç…â…ä…ã…æ…å…ã…å…ä…ç…ç…è…æ…ã…ä…æ…ç…ç…å…ã…ã…ã…ä…æ…ä…ä…å…å…è…è…æ…å…â…ç…â…ä…ä…ä…ç…è…å…ç…æ…ã…æ…å…â…ä…ä…ã…è…ã…æ…â…ã…ã…å…æ…å…æ…å…ã…å…ã…â…ç…æ…ä…æ…ä…ç…á…æ…ç…å…ç…ä…å…ã…â…ã…ä…ä…å…ç…ã…å…å…å…å…å…ä…â…ã…ç…ä…ç…ç…æ…ç…ã…è…æ…â…ä…è…ã…ã…å…ç…æ…å…â…ç…æ…å…æ…å…ä…å…å…æ…ä…å…è…æ…æ…ã…æ…å…á…â…â…ã…á…â…æ…æ…ã…ä…â…å…ç…ã…æ…ä…å…à…ä…ã…ä…ä…ä…å…æ…å…å…ä…æ…å…ä…æ…â…â…å…æ…ä…ç…å…æ…å…æ…ã…ä…æ…æ…å…ã…ç…å…æ…ä…å…æ…ç…ä…é…ã…ã…ä…â…ä…ä…æ…å…ä…ä…å…æ…æ…å…â…ê…å…ã…á…â…å…å…å…ã…â…ä…å…ç…é…ä…å…ã…ã…á…ã…å…ä…â…æ…æ…ç…ç…â…ä…ç…å…á…æ…á…æ…ã…ã…ä…ã…æ…é…ã…æ…ä…æ…ä…æ…ã…á…ã…â…ä…æ…ä…è…æ…ä…ä…â…æ…å…ã…ã…å…ã…æ…ã…å…å…ä…ä…ã…â…æ…â…ã…å…â…ã…ä…æ…å…è…ç…å…å…ä…ã…ä…æ…å…å…å…å…ä…å…æ…å…ç…ä…ä…å…å…ä…æ…ã…â…æ…æ…æ…ä…å…â…ç…ä…å…ã…ã…å…á…ã…ä…æ…æ…è…ç…å…ä…ä…å…ã…ä…å…ä…å…ã…å…ä…æ…æ…é…æ…æ…á…ã…ê…æ…ã…ä…ç…æ…å…ä…ç…å…ä…ã…ã…ä…â…ä…ä…á…ä…ä…æ…å…æ…ä…ä…ç…å…â…ã…å…ä…ã…ç…å…å…è…æ…ç…æ…ã…æ…å…å…ä…ã…å…ä…æ…ä…è…ä…ä…ä…æ…å…å…â…å…ä…á…è…å…ã…æ…æ…ä…ä…ã…æ…å…ä…ä…ã…â…å…å…æ…æ…å…å…æ…ã…ç…á…ã…ä…ä…å…â…ä…ä…è…ä…ç…ç…ä…ã…æ…ã…ä…â…æ…å…ã…æ…å…ä…ç…æ…â…æ…ç…å…å…ã…ä…æ…ã…äXTENSION= 'IMAGE ' / Image extension BITPIX = 16 / Bits per pixel NAXIS = 0 / Number of axes PCOUNT = 0 / No 'random' parameters GCOUNT = 1 / Only one group ORIGIN = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator EXTNAME = 'ERR ' / Extension name EXTVER = 1 / Extension version INHERIT = F / Inherits global header DATE = '2007-02-23T19:57:58' / Date FITS file was generated IRAF-TLM= '14:57:58 (23/02/2007)' / Time of last modification ROOTNAME= 'o4sp040b0 ' / rootname of the observation setEXPNAME = 'o4sp04ezq ' / exposure identifier BUNIT = 'COUNTS ' / brightness units NPIX1 = 62 / length of constant array axis 1 NPIX2 = 44 / length of constant array axis 2 PIXVALUE= 0.0 / values of pixels in constant array / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 535.384 / x-coordinate of reference pixel CRPIX2 = 536.67 / y-coordinate of reference pixel CRVAL1 = 8.561000000000E+03 / first axis value at reference pixel CRVAL2 = 0.000000000000E+00 / second axis value at reference pixel CTYPE1 = 'LAMBDA ' / the coordinate type for the first axis CTYPE2 = 'ANGLE ' / the coordinate type for the second axis CD1_1 = 0.554 / partial of first axis coordinate w.r.t. x CD1_2 = 0. / partial of first axis coordinate w.r.t. y CD2_1 = 0. / partial of second axis coordinate w.r.t. x CD2_2 = 1.38889000000000E-5 / partial of second axis coordinate w.r.t. y LTV1 = 19. / offset in X to subsection start LTV2 = 20. / offset in Y to subsection start LTM1_1 = 1. / reciprocal of sampling rate in X LTM2_2 = 1. / reciprocal of sampling rate in Y RA_APER = 1.761216666667E+02 / RA of aperture reference position DEC_APER= 4.851611111111E+01 / Declination of aperture reference position PA_APER = 1.143617019653E+02 / Position Angle of reference aperture center (deDISPAXIS= 1 / dispersion axis; 1 = axis 1, 2 = axis 2, none CUNIT1 = 'angstrom' / units of first coordinate value CUNIT2 = 'deg ' / units of second coordinate value / OFFSETS FROM ASSOCIATED WAVECAL SHIFTA1 = 0.000000 / Spectrum shift in AXIS1 calculated from WAVECALSHIFTA2 = 0.000000 / Spectrum shift in AXIS2 calculated from WAVECAL / NOISE MODEL KEYWORDS NOISEMOD= ' ' / noise model equation NOISCOF1= 0.000000000000E+00 / noise coefficient 1 NOISCOF2= 0.000000000000E+00 / noise coefficient 2 NOISCOF3= 0.000000000000E+00 / noise coefficient 3 NOISCOF4= 0.000000000000E+00 / noise coefficient 4 NOISCOF5= 0.000000000000E+00 / noise coefficient 5 / IMAGE STATISTICS AND DATA QUALITY FLAGS NGOODPIX= 1108728 / number of good pixels SDQFLAGS= 31743 / serious data quality flags GOODMIN = 1486.0 / minimum value of good pixels GOODMAX = 14113.0 / maximum value of good pixels GOODMEAN= 1526.857056 / mean value of good pixels LTM2_1 = 0. LTM1_2 = 0. END XTENSION= 'IMAGE ' / Image extension BITPIX = 16 / Bits per pixel NAXIS = 0 / Number of axes PCOUNT = 0 / No 'random' parameters GCOUNT = 1 / Only one group ORIGIN = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator EXTNAME = 'DQ ' / Extension name EXTVER = 1 / Extension version INHERIT = F / Inherits global header DATE = '2007-02-23T19:57:58' / Date FITS file was generated IRAF-TLM= '14:57:58 (23/02/2007)' / Time of last modification ROOTNAME= 'o4sp040b0 ' / rootname of the observation setEXPNAME = 'o4sp04ezq ' / exposure identifier BUNIT = 'UNITLESS ' / brightness units NPIX1 = 62 / length of constant array axis 1 NPIX2 = 44 / length of constant array axis 2 PIXVALUE= 0 / values of pixels in constant array / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 535.384 / x-coordinate of reference pixel CRPIX2 = 536.67 / y-coordinate of reference pixel CRVAL1 = 8.561000000000E+03 / first axis value at reference pixel CRVAL2 = 0.000000000000E+00 / second axis value at reference pixel CTYPE1 = 'LAMBDA ' / the coordinate type for the first axis CTYPE2 = 'ANGLE ' / the coordinate type for the second axis CD1_1 = 0.554 / partial of first axis coordinate w.r.t. x CD1_2 = 0. / partial of first axis coordinate w.r.t. y CD2_1 = 0. / partial of second axis coordinate w.r.t. x CD2_2 = 1.38889000000000E-5 / partial of second axis coordinate w.r.t. y LTV1 = 19. / offset in X to subsection start LTV2 = 20. / offset in Y to subsection start LTM1_1 = 1. / reciprocal of sampling rate in X LTM2_2 = 1. / reciprocal of sampling rate in Y RA_APER = 1.761216666667E+02 / RA of aperture reference position DEC_APER= 4.851611111111E+01 / Declination of aperture reference position PA_APER = 1.143617019653E+02 / Position Angle of reference aperture center (deDISPAXIS= 1 / dispersion axis; 1 = axis 1, 2 = axis 2, none CUNIT1 = 'angstrom' / units of first coordinate value CUNIT2 = 'deg ' / units of second coordinate value / OFFSETS FROM ASSOCIATED WAVECAL SHIFTA1 = 0.000000 / Spectrum shift in AXIS1 calculated from WAVECALSHIFTA2 = 0.000000 / Spectrum shift in AXIS2 calculated from WAVECALLTM2_1 = 0. LTM1_2 = 0. END XTENSION= 'IMAGE ' / Image extension BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 62 / Axis length NAXIS2 = 44 / Axis length PCOUNT = 0 / No 'random' parameters GCOUNT = 1 / Only one group ORIGIN = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator EXTNAME = 'SCI ' / Extension name EXTVER = 2 / Extension version INHERIT = F / Inherits global header DATE = '2007-02-23T19:57:59' / Date FITS file was generated IRAF-TLM= '14:57:58 (23/02/2007)' / Time of last modification ROOTNAME= 'o4sp040b0 ' / rootname of the observation setEXPNAME = 'o4sp04f0q ' / exposure identifier BUNIT = 'COUNTS ' / brightness units ASN_MTYP= 'CRSPLIT ' / Role of the Member in the Association / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 535.384 / x-coordinate of reference pixel CRPIX2 = 536.67 / y-coordinate of reference pixel CRVAL1 = 8.561000000000E+03 / first axis value at reference pixel CRVAL2 = 0.000000000000E+00 / second axis value at reference pixel CTYPE1 = 'LAMBDA ' / the coordinate type for the first axis CTYPE2 = 'ANGLE ' / the coordinate type for the second axis CD1_1 = 0.554 / partial of first axis coordinate w.r.t. x CD1_2 = 0.0 / partial of first axis coordinate w.r.t. y CD2_1 = 0.0 / partial of second axis coordinate w.r.t. x CD2_2 = 1.38889E-05 / partial of second axis coordinate w.r.t. y LTV1 = 19.0 / offset in X to subsection start LTV2 = 20.0 / offset in Y to subsection start LTM1_1 = 1.0 / reciprocal of sampling rate in X LTM2_2 = 1.0 / reciprocal of sampling rate in Y RA_APER = 1.761216666667E+02 / RA of aperture reference position DEC_APER= 4.851611111111E+01 / Declination of aperture reference position PA_APER = 1.143617019653E+02 / Position Angle of reference aperture center (deDISPAXIS= 1 / dispersion axis; 1 = axis 1, 2 = axis 2, none CUNIT1 = 'angstrom' / units of first coordinate value CUNIT2 = 'deg ' / units of second coordinate value / OFFSETS FROM ASSOCIATED WAVECAL SHIFTA1 = 0.000000 / Spectrum shift in AXIS1 calculated from WAVECALSHIFTA2 = 0.000000 / Spectrum shift in AXIS2 calculated from WAVECAL / EXPOSURE INFORMATION ORIENTAT= 114.362 / position angle of image y axis (deg. e of n) SUNANGLE= 113.360229 / angle between sun and V1 axis MOONANGL= 131.857269 / angle between moon and V1 axis SUN_ALT = -58.053928 / altitude of the sun above Earth's limb FGSLOCK = 'FINE ' / commanded FGS lock (FINE,COARSE,GYROS,UNKNOWN) DATE-OBS= '1998-04-20' / UT date of start of observation (yyyy-mm-dd) TIME-OBS= '18:39:29' / UT time of start of observation (hh:mm:ss) EXPSTART= 5.092377742742E+04 / exposure start time (Modified Julian Date) EXPEND = 5.092377777464E+04 / exposure end time (Modified Julian Date) EXPTIME = 30.000000 / exposure duration (seconds)--calculated EXPFLAG = 'NORMAL ' / Exposure interruption indicator / PATTERN KEYWORDS PATTSTEP= 0 / position number of this point in the pattern / REPEATED EXPOSURES INFO NCOMBINE= 1 / number of image sets combined during CR rejecti / DATA PACKET INFORMATION FILLCNT = 0 / number of segments containing fill ERRCNT = 0 / number of segments containing errors PODPSFF = F / podps fill present (T/F) STDCFFF = F / ST DDF fill present (T/F) STDCFFP = '0x5569' / ST DDF fill pattern (hex) / ENGINEERING PARAMETERS OSWABSP = 1234272 / Slit Wheel Absolute position OMSCYL1P= 4008 / Mode select cylinder 1 position OMSCYL3P= 1177 / Mode select cylinder 3 position OMSCYL4P= 5297 / Mode select cylinder 4 position OCBABAV = 26.7024 / (V) CEB A&B Amp Bias OCBCDAV = 26.7024 / (V) CEB C&D amp bias OCBLGCDV= -3.37895 / (V) CEB last gate C&D OCBSWALV= -5.99614 / (V) CB summing well A Lo OCBRCDLV= 0.0390036 / (V) CB reset gate CD Lo OCCDHTAV= -1.000000 / average CCD housing temperature (degC) / IMAGE STATISTICS AND DATA QUALITY FLAGS NGOODPIX= 1108728 / number of good pixels SDQFLAGS= 31743 / serious data quality flags GOODMIN = 1484.0 / minimum value of good pixels GOODMAX = 14089.0 / maximum value of good pixels GOODMEAN= 1526.973145 / mean value of good pixels SNRMIN = 0.000000 / minimum signal to noise of good pixels SNRMAX = 0.000000 / maximum signal to noise of good pixels SNRMEAN = 0.000000 / mean value of signal to noise of good pixels SOFTERRS= 0 / number of soft error pixels (DQF=1) MEANDARK= 0.0 / average of the dark values subtracted MEANBLEV= 0.0 / average of all bias levels subtracted / PHOTOMETRY KEYWORDS SPORDER = 1 / Spectral order DIFF2PT = 1.0 / Diffuse to point source conversion factor CONT2EML= 0.000000 / Intensity conversion: continuum -> emission SCALE_A1= 0.000000 / Size of one pixel (arcsec) along dispersion axiOMEGAPIX= 0.000000 / Solid angle (arcsec**2) subtended by one pixel BZERO = 32768 END …á…à…ã…á…â…å…ã…ã…ã…ã…ã…ä…â…â…â…à…à…á…ã…ç…ç…å…æ…ã…ä…å…æ…å…ã…ã…ã…ç…ã…â…æ…ä…ã…ä…æ…ç…ã…æ…æ…å…å…â…ä…æ…è…ä…ä…å…æ…è…å…å…â…æ…á…å…ã…ä…â…ä…æ…æ…ê…è…ä…ä…å…ç…ã…à…å…ã…ã…é…å…æ…ç…ç…ä…æ…æ…ã…â…æ…ã…å…å…æ…æ…ä…ç…ê…æ…å…ã…ä…ç…à…â…ä…ä…å…ä…å…ä…ç…ã…æ…ç…æ…ä…å…ã…æ…ä…ç…è…æ…ä…æ…æ…ã…å…æ…æ…ç…å…ã…å…â…ã…æ…ç…ç…å…æ…æ…ã…å…ä…â…æ…é…ç…å…ç…å…ç…å…æ…æ…ã…ã…ã…æ…ä…ã…å…ä…å…ç…ã…æ…æ…å…ä…â…ç…ä…ä…æ…ã…ã…æ…æ…æ…æ…ç…å…â…æ…å…ã…ã…ä…á…æ…ä…æ…ä…ã…æ…å…å…å…ß…å…ã…á…ä…ã…ä…å…ã…é…ã…æ…å…ã…â…ã…å…æ…å…â…ä…ã…æ…è…ç…è…å…ã…æ…ä…è…æ…á…á…ã…ä…ç…å…è…è…å…æ…ä…æ…ç…æ…ã…ä…å…è…ä…ç…è…é…å…ç…ä…æ…å…ç…ä…ä…å…á…ã…é…ç…ç…ä…ç…ä…ä…ç…æ…ä…ä…ä…ä…å…ã…ã…æ…æ…æ…æ…å…ç…ä…ã…ä…ã…ä…ä…ä…ä…è…ã…ä…ä…ã…ä…æ…è…ç…å…è…â…ä…å…æ…å…æ…å…ä…ç…ê…ä…ç…ã…ä…ä…à…ã…æ…ä…æ…æ…æ…å…è…æ…á…æ…ä…ä…ã…ä…è…á…æ…ä…ä…ä…æ…å…æ…ä…æ…â…æ…â…å…æ…è…á…å…ä…æ…å…æ…ã…ã…ä…ä…æ…ä…æ…å…æ…ç…ä…æ…å…å…ä…ã…ä…ä…ä…å…æ…æ…æ…æ…æ…ç…æ…è…æ…ß…ç…ã…ä…å…ç…ä…å…æ…å…æ…è…å…å…â…â…ã…ä…ä…å…ã…ã…ä…æ…ä…å…è…å…â…ä…ä…å…ä…ä…å…ä…å…ç…æ…ã…è…ä…ã…â…å…ä…å…ä…ä…ä…ä…ä…å…ç…ç…å…è…ã…ä…â…é…å…ã…ç…å…æ…ã…å…ä…å…å…å…æ…ä…å…â…ä…æ…ä…æ…â…å…æ…æ…å…æ…å…â…å…æ…ä…ä…ã…â…è…â…æ…ç…ã…ä…æ…å…å…ä…ã…ã…ç…à…æ…è…è…å…ä…æ…ä…å…á…ã…ç…ä…â…æ…å…ç…ç…ç…å…ä…ç…å…æ…ä…æ…â…ä…á…æ…ç…ã…æ…ä…å…å…ä…å…ä…æ…æ…ã…ã…å…æ…ä…å…ä…è…å…ã…ç…ä…æ…ä…ç…â…ã…â…ä…ä…æ…è…å…å…å…æ…á…ã…æ…â…ã…æ…ä…ã…å…å…ä…æ…ç…ç…ä…ä…å…ä…å…æ…ä…à…â…ç…ã…ç…å…å…ä…ä…â…å…ä…â…å…è…å…æ…è…å…ä…å…æ…ä…á…æ…ä…á…å…ä…æ…á…å…æ…å…æ…å…ä…ã…â…ä…â…á…æ…è…å…æ…å…ä…æ…ã…ä…ã…å…ä…ã…ä…á…å…ä…ä…ç…ä…ç…ç…ä…ã…ã…æ…æ…ä…à…ã…ã…è…ä…å…æ…ç…ä…å…ä…ã…ä…å…å…ä…æ…ä…â…å…å…æ…ç…å…ä…ç…å…ç…ä…ã…å…å…å…è…ã…è…æ…è…æ…ã…æ…á…ä…ä…à…ä…å…å…å…æ…å…ç…æ…ä…ã…ä…ä…æ…ä…å…ä…ä…ä…â…è…æ…æ…á…å…à…ã…â…ä…æ…æ…á…å…ç…ã…â…á…ã…ä…å…ä…å…æ…ã…ç…æ…å…æ…ä…ä…ä…ä…ã…ã…ä…ä…æ…å…â…å…ä…æ…æ…å…ç…æ…ã…è†p…ø…æ…ã…ã…ä…ä…æ…æ…ä…ç…ã…æ…ä…ä…ä…ç…ä…æ…ç…å…ä…ä…ç…å…æ…ç…ç…æ…ã…ã…å…å…ã…è…ã…æ…ã…å…â…å…ã…ç…ã…ã…å…ä…å…ä…ã…ä…æ…ä…æ…é…ã…ã…ä…æ…ç…ä…á…â…ç…ä…ä…ã…å…ä…ä…è…æ…ä…ä…å…ã…ã…ã…å…ä…è…æ…ä…ã…é…è…á…ä…ã…ä…á…ä…é…á…ã…å…æ…ä…æ…æ…å…æ…á…ä…ä…ä…å…å…ã…æ…ã…æ…ã…å…ã…ç…á…ã…å…æ…æ…ã…ã…ä…æ…ã…æ…æ…æ…æ…ä…ä…ã…â…ä…ä…â…ã…ç…å…æ…ç…é…ã…ä…ã…ä…ä…æ…ä…å…ã…ã…å…ã…ç…ã…å…è…á…ä…ä…ã…ä…ã…å…ä…ç…ã…ã…ã…ä…ä…ä…â…ä…â…ç…æ…ä…â…å…å…æ…ç…ç…é…è…æ…ç…æ…ç…à…â…â…æ…â…ä…ç…á…æ…ç…ä…ã…ã…ã…ä…ã…æ…æ…ä…å…ä…æ…ä…ã…ä…â…ã…æ…ã…å…æ…ã…å…ä…ç…ä…å…ä…å…â…ä…å…ã…ç…å…à…æ…å…å…å…ã…å…å…ç…æ…å…æ…ã…å…å…å…æ…æ…ä…ç…ç…ç…å…å…æ…ã…æ…ç…ä…æ…â…ä…ä…å…å…â…å…æ…â…æ…á…â…Þ…Þ…à…à…á…â…à…â…â…á…à…ß…à…Þ…Ü…×…à…Õ…ä…ã…ä…á…á…â…å…á…á…à…â…ã…ß…â…à…á…ä…ã…ã…æ…å…æ…å…å…ç…è…ä…å…æ…ç…æ…æ…è…ã…â…ä…å…ß…ß…ä…â…æ…ä…æ…ä…ç…è…å…å…æ…â…ç…ã…ä…ä…å…è…å…å…ã…æ…è…ä…å…å…ç…ã…å…â…æ…ã…ä…æ…å…å…å…å…å…å…ä…å…ã…æ…å…ä…á…ç…ç…æ…ä…æ…å…å…ä…ã…á…ä…ä…ç…ä…ã…ä…ä…ç…ç…å…ß…å…ä…æ…ä…ã…ä…ä…è…æ…ç…ã…æ…â…å…ã…ç…â…ã…á…ä…ä…ç…è…ç…æ…é…å…æ…ã…å…á…å…ã…ã…ä…å…æ…æ…æ…å…å…å…ä…æ…ä…ã…ä…æ…å…â…å…ç…ç…æ…ã…æ…ç…ç…ã…ã…å…à…ä…è…æ…á…ã…ä…ç…æ…ç…ä…ä…ã…å…ä…ä…æ…å…ä…å…ã…ä…æ…ä…æ…å…ä…æ…å…â…æ…ä…æ…ä…æ…è…ã…ç…è…å…ã…à…ä…å…ã…â…â…ä…ä…ä…å…â…æ…ã…æ…â…â…ã…à…å…æ…á…è…æ…ä…ç…ç…å…ã…ä…ã…æ…á…å…ç…ä…ã…ç…ä…æ…ä…ç…ç…ã…ä…æ…å…ã…å…å…ã…ä…ä…ç…æ…æ…å…ä…ã…â…ã…å…æ…á…æ…æ…å…â…ç…å…ã…â…ä…å…á…ã…æ…æ…ã…ã…ä…å…é…ç…è…ä…ã…â…å…ç…æ…â…ä…ã…ã…ä…ç…ã…æ…ä…ä…ã…å…ä…ã…ä…ä…ä…è…å…å…ã…æ…å…æ…â…ã…ç…æ…å…ã…á…â…â…ç…è…æ…æ…ä…å…à…å…ç…â…ä…å…å…â…ã…è…æ…ä…æ…è…å…ä…æ…ã…â…å…æ…ä…æ…ä…å…æ…æ…ç…æ…è…á…ã…ã…ã…æ…â…è…æ…é…æ…å…å…æ…å…æ…å…ä…ä…ã…ä…ã…å…å…æ…å…é…å…æ…ã…ä…å…ä…ç…å…ä…ç…æ…á…â…æ…æ…â…â…æ…å…å…æ…â…ã…æ…æ…ç…å…å…ä…å…ç…â…æ…ä…á…ã…å…æ…æ…æ…ä…ä…å…æ…æ…ç…ç…ç…ä…æ…ä…ã…ã…ä…å…ã…å…ç…å…ã…å…ä…å…å…ä…ä…ã…ç…ä…ä…å…â…ä…á…â…æ…ã…ã…ä…ä…ã…å…å…â…å…è…å…æ…å…ç…á…ä…æ…ã…è…ä…ä…ä…æ…æ…â…â…å…ä…æ…å…ä…ä…æ…ä…ä…ã…æ…ä…æ…ã…ä…è…å…á…ä…æ…ç…â…ä…æ…è…ç…æ…ä…å…â…á…ç…ã…å…â…ã…å…æ…ã…é…á…ä…æ…æ…ä…â…ã…á…ã…ã…æ…æ…å…ç…æ…ç…æ…å…â…æ…å…â…ã…à…ã…ä…ç…å…ã…ã…ä…å…å…ä…é…ã…ã…å…å…â…æ…ç…å…å…æ…æ…æ…æ…å…æ…ä…á…ã…ä…æ…ä…å…å…ç…ä…ä…æ…ä…ã…ä…æ…ã…ä…ä…â…ä…å…ä…ä…æ…å…è…ã…ä…â…à…â…ä…è…æ…á…ã…ç…æ…æ…ã…â…æ…å…â…â…â…ä…å…á…ã…ä…ã…æ…å…å…ã…æ…æ…â…à…ã…æ…æ…á…æ…æ…â…æ…é…å…å…ä…ä…æ…ä…â…å…ä…å…æ…å…è…æ…ä…å…â…å…ä…æ…ã…å…ç…ã…è…ã…ä…è…ã…ä…ç…ä…å…ä…å…å…å…ä…ç…æ…å…ã…å…å…â…æ…â…ä…ã…â…ä…ä…ç…å…å…ç…ã…å…á…ã…ã…æ…â…â…æ…æ…å…è…ç…é…â…ã…ã…â…å…å…ä…ä…ã…ä…è…æ…á…â…â…å…â…ç…â…ä…æ…æ…á…ã…ã…é…å…â…æ…æ…æ…â…à…å…ã…â…ä…è…é‡&†…è…è…ç…å…å…â…æ…ä…ä…æ…ã…æ…ã…è…ä…ç…ä…å…ã…ã…ä…â…â…ã…ç…å…ä…ã…å…è…ç…ç…ç…â…ã…å…æ…å…ä…ã…æ…æ…æ…ç…ç…ä…â…æ…ä…á…æ…å…æ…ã…ã…å…å…æ…æ…æ…ä…ä…ã…ç…ä…ç…ä…â…ã…å…é…ç…å…ä…ã…ä…å…è…ã…á…ä…æ…å…á…ä…ä…æ…è…å…ä…å…å…æ…ã…ä…ç…è…ã…è…ç…æ…æ…å…å…å…æ…æ…ã…â…á…ä…ä…ä…å…ä…ã…å…é…ä…å…ã…å…â…ä…â…ä…ä…ä…å…æ…ã…ä…å…ã…å…è…ã…ä…å…æ…æ…â…ä…ã…ã…ã…å…ä…æ…á…è…ä…ã…å…å…å…å…ç…ä…â…å…ç…ç…ç…æ…á…ã…â…æ…å…â…ã…å…ã…ä…å…ç…ä…å…ä…ã…â…â…â…å…ä…â…ä…ã…æ…æ…à…ä…ç…â…å…ã…ã…â…ä…å…ç…æ…æ…ç…å…ä…ã…â…â…ã…æ…ä…æ…ç…ä…ä…ä…å…æ…â…ä…ä…â…å…æ…ã…æ…ã…æ…ç…ä…æ…á…å…å…å…å…ã…æ…å…æ…è…æ…æ…è…ç…ç…æ…ä…æ…ä…æ…è…â…å…å…ç…ä…ä…æ…ä…æ…ä…å…ä…å…å…å…â…á…ä…æ…ä…ã…ã…á…ä…á…à…à…à…à…ß…á…á…ß…â…ã…ã…à…Ý…Ø…à…Ñ…à…â…â…ã…å…å…ä…ä…ã…ã…á…ã…â…à…ß…Þ…å…ã…å…ã…ä…å…ã…ä…å…å…å…á…ã…è…å…ç…å…å…è…å…ä…å…å…ã…è…æ…á…å…ä…á…è…è…å…å…ã…ä…á…ä…ç…ä…ã…â…ç…å…æ…ä…ä…æ…å…ä…æ…ã…ç…æ…ä…æ…æ…ä…æ…è…ä…ä…æ…ç…ä…ê…å…ã…ã…ã…æ…ã…æ…ã…ã…å…ç…ç…æ…æ…ã…ä…ã…â…á…â…ã…å…ä…ã…â…æ…æ…ä…ç…å…ã…ã…æ…ä…æ…æ…ä…ç…ç…æ…å…å…æ…â…ä…á…ä…ã…æ…ä…è…æ…æ…æ…æ…ä…å…å…å…ä…ä…å…å…ç…ã…å…å…ã…è…æ…ç…ä…å…ä…å…æ…æ…ä…æ…ã…å…å…æ…æ…è…å…è…ä…á…å…å…å…å…ä…ã…ã…ã…ç…å…å…å…â…â…å…ã…è…ä…æ…ä…â…æ…ã…ç…å…ä…å…å…æ…ã…å…ã…è…å…ä…æ…ä…ã…ä…æ…ç…ä…ã…ã…ã…â…é…ç…ã…æ…æ…è…æ…æ…ã…æ…ä…á…ã…å…ã…å…å…ä…æ…ä…æ…æ…á…ã…å…ä…â…å…å…å…ä…å…å…å…æ…æ…ã…å…æ…ã…ä…ã…æ…å…å…æ…å…æ…æ…ç…ä…ã…â…ã…å…ä…ä…æ…ä…å…ä…æ…å…ã…å…ã…à…å…ã…ã…ä…ã…â…æ…ç…ä…ç…å…è…å…è…ä…ã…å…ä…å…æ…ã…ä…æ…ã…æ…å…ç…å…ã…ã…ä…ã…ç…ã…à…ã…æ…è…ã…ä…æ…ã…å…ä…â…ã…ä…ä…ã…ä…ä…é…ä…æ…ç…ä…ã…á…æ…ã…ã…å…å…ã…æ…æ…è…å…ã…ç…ã…â…ä…â…â…á…æ…ã…à…æ…ã…ç…å…æ…ä…ä…å…å…å…å…æ…ä…â…æ…ã…ä…æ…ä…æ…ä…à…å…æ…ã…å…å…ã…å…ä…ç…æ…ç…æ…æ…â…ã…å…å…à…â…ç…ä…æ…â…å…ç…ä…è…ä…ã…â…è…ä…ä…æ…å…æ…æ…é…è…æ…å…æ…æ…ç…â…ç…à…æ…ç…ä…ç…å…ç…ä…ç…æ…å…ç…ã…ã…â…å…æ…ã…ä…ä…ç…ã…è…æ…ç…ã…ã…å…å…á…å…ä…ä…ç…ã…ç…ä…ã…è…à…ç…ä…å…ç…ä…ä…ã…å…å…å…á…ä…ä…å…å…ã…ã…â…ã…æ…ä…æ…å…ä…ä…å…å…æ…æ…å…å…ã…ã…á…ã…æ…ã…å…è…æ…ç…é…å…ä…å…å…æ…à…ä…æ…ã…é…ä…å…æ…å…ç…á…ä…ä…æ…å…â…ã…ä…å…å…æ…ç…ä…å…â…æ…æ…å…å…å…å…æ…ã…å…æ…ã…ä…ç…è…ã…á…ä…ä…ä…â…æ…ä…ã…æ…å…æ…ä…â…ç…ä…ä…æ…á…ã…ç…ä…æ…ä…é…ä…å…ã…ã…ã…ã…ã…ä…ã…æ…å…ä…ä…ã…ã…ã…ä…å…æ…ä…å…äXTENSION= 'IMAGE ' / Image extension BITPIX = 16 / Bits per pixel NAXIS = 0 / Number of axes PCOUNT = 0 / No 'random' parameters GCOUNT = 1 / Only one group ORIGIN = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator EXTNAME = 'ERR ' / Extension name EXTVER = 2 / Extension version INHERIT = F / Inherits global header DATE = '2007-02-23T19:57:59' / Date FITS file was generated IRAF-TLM= '14:57:59 (23/02/2007)' / Time of last modification ROOTNAME= 'o4sp040b0 ' / rootname of the observation setEXPNAME = 'o4sp04f0q ' / exposure identifier BUNIT = 'COUNTS ' / brightness units NPIX1 = 62 / length of constant array axis 1 NPIX2 = 44 / length of constant array axis 2 PIXVALUE= 0.0 / values of pixels in constant array / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 535.384 / x-coordinate of reference pixel CRPIX2 = 536.67 / y-coordinate of reference pixel CRVAL1 = 8.561000000000E+03 / first axis value at reference pixel CRVAL2 = 0.000000000000E+00 / second axis value at reference pixel CTYPE1 = 'LAMBDA ' / the coordinate type for the first axis CTYPE2 = 'ANGLE ' / the coordinate type for the second axis CD1_1 = 0.554 / partial of first axis coordinate w.r.t. x CD1_2 = 0. / partial of first axis coordinate w.r.t. y CD2_1 = 0. / partial of second axis coordinate w.r.t. x CD2_2 = 1.38889000000000E-5 / partial of second axis coordinate w.r.t. y LTV1 = 19. / offset in X to subsection start LTV2 = 20. / offset in Y to subsection start LTM1_1 = 1. / reciprocal of sampling rate in X LTM2_2 = 1. / reciprocal of sampling rate in Y RA_APER = 1.761216666667E+02 / RA of aperture reference position DEC_APER= 4.851611111111E+01 / Declination of aperture reference position PA_APER = 1.143617019653E+02 / Position Angle of reference aperture center (deDISPAXIS= 1 / dispersion axis; 1 = axis 1, 2 = axis 2, none CUNIT1 = 'angstrom' / units of first coordinate value CUNIT2 = 'deg ' / units of second coordinate value / OFFSETS FROM ASSOCIATED WAVECAL SHIFTA1 = 0.000000 / Spectrum shift in AXIS1 calculated from WAVECALSHIFTA2 = 0.000000 / Spectrum shift in AXIS2 calculated from WAVECAL / NOISE MODEL KEYWORDS NOISEMOD= ' ' / noise model equation NOISCOF1= 0.000000000000E+00 / noise coefficient 1 NOISCOF2= 0.000000000000E+00 / noise coefficient 2 NOISCOF3= 0.000000000000E+00 / noise coefficient 3 NOISCOF4= 0.000000000000E+00 / noise coefficient 4 NOISCOF5= 0.000000000000E+00 / noise coefficient 5 / IMAGE STATISTICS AND DATA QUALITY FLAGS NGOODPIX= 1108728 / number of good pixels SDQFLAGS= 31743 / serious data quality flags GOODMIN = 1484.0 / minimum value of good pixels GOODMAX = 14089.0 / maximum value of good pixels GOODMEAN= 1526.973145 / mean value of good pixels LTM2_1 = 0. LTM1_2 = 0. END XTENSION= 'IMAGE ' / Image extension BITPIX = 16 / Bits per pixel NAXIS = 0 / Number of axes PCOUNT = 0 / No 'random' parameters GCOUNT = 1 / Only one group ORIGIN = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator EXTNAME = 'DQ ' / Extension name EXTVER = 2 / Extension version INHERIT = F / Inherits global header DATE = '2007-02-23T19:57:59' / Date FITS file was generated IRAF-TLM= '14:57:59 (23/02/2007)' / Time of last modification ROOTNAME= 'o4sp040b0 ' / rootname of the observation setEXPNAME = 'o4sp04f0q ' / exposure identifier BUNIT = 'UNITLESS ' / brightness units NPIX1 = 62 / length of constant array axis 1 NPIX2 = 44 / length of constant array axis 2 PIXVALUE= 0 / values of pixels in constant array / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 535.384 / x-coordinate of reference pixel CRPIX2 = 536.67 / y-coordinate of reference pixel CRVAL1 = 8.561000000000E+03 / first axis value at reference pixel CRVAL2 = 0.000000000000E+00 / second axis value at reference pixel CTYPE1 = 'LAMBDA ' / the coordinate type for the first axis CTYPE2 = 'ANGLE ' / the coordinate type for the second axis CD1_1 = 0.554 / partial of first axis coordinate w.r.t. x CD1_2 = 0. / partial of first axis coordinate w.r.t. y CD2_1 = 0. / partial of second axis coordinate w.r.t. x CD2_2 = 1.38889000000000E-5 / partial of second axis coordinate w.r.t. y LTV1 = 19. / offset in X to subsection start LTV2 = 20. / offset in Y to subsection start LTM1_1 = 1. / reciprocal of sampling rate in X LTM2_2 = 1. / reciprocal of sampling rate in Y RA_APER = 1.761216666667E+02 / RA of aperture reference position DEC_APER= 4.851611111111E+01 / Declination of aperture reference position PA_APER = 1.143617019653E+02 / Position Angle of reference aperture center (deDISPAXIS= 1 / dispersion axis; 1 = axis 1, 2 = axis 2, none CUNIT1 = 'angstrom' / units of first coordinate value CUNIT2 = 'deg ' / units of second coordinate value / OFFSETS FROM ASSOCIATED WAVECAL SHIFTA1 = 0.000000 / Spectrum shift in AXIS1 calculated from WAVECALSHIFTA2 = 0.000000 / Spectrum shift in AXIS2 calculated from WAVECALLTM2_1 = 0. LTM1_2 = 0. END stsci.tools-3.4.12/lib/stsci/tools/tests/test_bitmask.py0000644001120100020070000001163213241163620025026 0ustar jhunkSTSCI\science00000000000000""" A module containing unit tests for the `bitmask` modue. :Authors: Mihai Cara (contact: help@stsci.edu) """ from __future__ import (absolute_import, division, unicode_literals, print_function) import warnings import numpy as np import pytest from stsci.tools import bitmask MAX_INT_TYPE = np.maximum_sctype(np.int) MAX_UINT_TYPE = np.maximum_sctype(np.uint) MAX_UINT_FLAG = np.left_shift( MAX_UINT_TYPE(1), MAX_UINT_TYPE(np.iinfo(MAX_UINT_TYPE).bits - 1) ) MAX_INT_FLAG = np.left_shift( MAX_INT_TYPE(1), MAX_INT_TYPE(np.iinfo(MAX_INT_TYPE).bits - 2) ) SUPER_LARGE_FLAG = 1 << np.iinfo(MAX_UINT_TYPE).bits EXTREME_TEST_DATA = np.array([ 0, 1, 1 + 1 << 2, MAX_INT_FLAG, ~0, MAX_INT_TYPE(MAX_UINT_FLAG), 1 + MAX_INT_TYPE(MAX_UINT_FLAG) ], dtype=MAX_INT_TYPE) @pytest.mark.parametrize('flag', [0, -1]) def test_nonpositive_not_a_bit_flag(flag): assert not bitmask.is_bit_flag(n=flag) @pytest.mark.parametrize('flag', [ 1, MAX_UINT_FLAG, int(MAX_UINT_FLAG), SUPER_LARGE_FLAG ]) def test_is_bit_flag(flag): assert bitmask.is_bit_flag(n=flag) @pytest.mark.parametrize('number', [0, 1, MAX_UINT_FLAG, SUPER_LARGE_FLAG]) def test_is_int(number): assert bitmask._is_int(number) @pytest.mark.parametrize('number', ['1', True, 1.0]) def test_nonint_is_not_an_int(number): assert not bitmask._is_int(number) @pytest.mark.parametrize('flag,flip,expected', [ (3, None, 3), (3, True, -4), (3, False, 3), ([1, 2], False, 3), ([1, 2], True, -4) ]) def test_interpret_valid_int_bit_flags(flag, flip, expected): assert( bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip) == expected ) @pytest.mark.parametrize('flag', [None, ' ', 'None', 'Indef']) def test_interpret_none_bit_flags_as_None(flag): assert bitmask.interpret_bit_flags(bit_flags=flag) is None @pytest.mark.parametrize('flag,expected', [ ('1', 1), ('~-1', ~(-1)), ('~1', ~1), ('1,2', 3), ('1+2', 3), ('(1,2)', 3), ('(1+2)', 3), ('~1,2', ~3), ('~1+2', ~3), ('~(1,2)', ~3), ('~(1+2)', ~3) ]) def test_interpret_valid_str_bit_flags(flag, expected): assert( bitmask.interpret_bit_flags(bit_flags=flag) == expected ) @pytest.mark.parametrize('flag,flip', [ (None, True), (' ', True), ('None', True), ('Indef', True), (None, False), (' ', False), ('None', False), ('Indef', False), ('1', True), ('1', False) ]) def test_interpret_None_or_str_and_flip_incompatibility(flag, flip): with pytest.raises(TypeError): bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip) @pytest.mark.parametrize('flag', [True, 1.0, [1.0], object]) def test_interpret_wrong_flag_type(flag): with pytest.raises(TypeError): bitmask.interpret_bit_flags(bit_flags=flag) @pytest.mark.parametrize('flag', ['SOMETHING', '1.0,2,3']) def test_interpret_wrong_string_int_format(flag): with pytest.raises(ValueError): bitmask.interpret_bit_flags(bit_flags=flag) def test_interpret_duplicate_flag_warning(): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") assert bitmask.interpret_bit_flags([2, 4, 4]) == 6 assert len(w) assert issubclass(w[-1].category, UserWarning) assert "Duplicate" in str(w[-1].message) @pytest.mark.parametrize('flag', [[1, 2, 3], '1, 2, 3']) def test_interpret_non_flag(flag): with pytest.raises(ValueError): bitmask.interpret_bit_flags(bit_flags=flag) def test_interpret_allow_single_value_str_nonflags(): assert bitmask.interpret_bit_flags(bit_flags=str(3)) == 3 @pytest.mark.parametrize('flag', [ '~', '( )', '(~1,2)', '~(1,2', '1,~2', '1,(2,4)', '1,2+4', '1+4,2' ]) def test_interpret_bad_str_syntax(flag): with pytest.raises(ValueError): bitmask.interpret_bit_flags(bit_flags=flag) def test_bitfield_must_be_integer_check(): with pytest.raises(TypeError): bitmask.bitfield_to_boolean_mask(1.0, 1) @pytest.mark.parametrize('data,flags,flip,goodval,dtype,ref', [ (EXTREME_TEST_DATA, None, None, True, np.bool_, EXTREME_TEST_DATA.size * [1]), (EXTREME_TEST_DATA, None, None, False, np.bool_, EXTREME_TEST_DATA.size * [0]), (EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, True, np.bool_, [1, 1, 0, 0, 0, 1, 1]), (EXTREME_TEST_DATA, None, None, True, np.bool_, EXTREME_TEST_DATA.size * [1]), (EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, False, np.bool_, [0, 0, 1, 1, 1, 0, 0]), (EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], True, True, np.int8, [1, 0, 1, 1, 0, 0, 0]) ]) def test_bitfield_to_boolean_mask(data, flags, flip, goodval, dtype, ref): mask = bitmask.bitfield_to_boolean_mask( bitfield=data, ignore_flags=flags, flip_bits=flip, good_mask_value=goodval, dtype=dtype ) assert(mask.dtype == dtype) assert np.all(mask == ref) stsci.tools-3.4.12/lib/stsci/tools/tests/test_stpyfits.py0000644001120100020070000007123113241163620025262 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python from __future__ import division # confidence high import os import tempfile import astropy import numpy as np import pytest from astropy.io import fits from astropy.io.fits.tests import FitsTestCase from distutils.version import LooseVersion import stsci.tools.stpyfits as stpyfits ASTROPY_VER_GE13 = LooseVersion(astropy.__version__) >= LooseVersion('1.3') ASTROPY_VER_GE20 = LooseVersion(astropy.__version__) >= LooseVersion('2.0') class TestStpyfitsFunctions(FitsTestCase): def setup(self): self.data_dir = os.path.dirname(__file__) self.temp_dir = tempfile.mkdtemp(prefix='stpyfits-test-') if ASTROPY_VER_GE13: self.writekwargs = {'overwrite': True} else: self.writekwargs = {'clobber': True} def test_InfoConvienceFunction(self): """Test the info convience function in both the fits and stpyfits namespace.""" if ASTROPY_VER_GE20: ans1 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 215, (), '', ''), (1, 'SCI', 1, 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (2, 'ERR', 1, 'ImageHDU', 71, (62, 44), 'int16', ''), (3, 'DQ', 1, 'ImageHDU', 71, (62, 44), 'int16', ''), (4, 'SCI', 2, 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (5, 'ERR', 2, 'ImageHDU', 71, (62, 44), 'int16', ''), (6, 'DQ', 2, 'ImageHDU', 71, (62, 44), 'int16', '')] ans2 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 215, (), '', ''), (1, 'SCI', 1, 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (2, 'ERR', 1, 'ImageHDU', 71, (), '', ''), (3, 'DQ', 1, 'ImageHDU', 71, (), '', ''), (4, 'SCI', 2, 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (5, 'ERR', 2, 'ImageHDU', 71, (), '', ''), (6, 'DQ', 2, 'ImageHDU', 71, (), '', '')] ans3 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (10, 10), 'int32', '')] ans4 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (), '', '')] else: ans1 = [(0, 'PRIMARY', 'PrimaryHDU', 215, (), '', ''), (1, 'SCI', 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (2, 'ERR', 'ImageHDU', 71, (62, 44), 'int16', ''), (3, 'DQ', 'ImageHDU', 71, (62, 44), 'int16', ''), (4, 'SCI', 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (5, 'ERR', 'ImageHDU', 71, (62, 44), 'int16', ''), (6, 'DQ', 'ImageHDU', 71, (62, 44), 'int16', '')] ans2 = [(0, 'PRIMARY', 'PrimaryHDU', 215, (), '', ''), (1, 'SCI', 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (2, 'ERR', 'ImageHDU', 71, (), '', ''), (3, 'DQ', 'ImageHDU', 71, (), '', ''), (4, 'SCI', 'ImageHDU', 141, (62, 44), 'int16 (rescales to uint16)', ''), (5, 'ERR', 'ImageHDU', 71, (), '', ''), (6, 'DQ', 'ImageHDU', 71, (), '', '')] ans3 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', '')] ans4 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', '')] assert stpyfits.info(self.data('o4sp040b0_raw.fits'), output=False) == ans1 assert fits.info(self.data('o4sp040b0_raw.fits'), output=False) == ans2 assert stpyfits.info(self.data('cdva2.fits'), output=False) == ans3 assert fits.info(self.data('cdva2.fits'), output=False) == ans4 def test_OpenConvienceFunction(self): """Test the open convience function in both the fits and stpyfits namespace.""" hdul = stpyfits.open(self.data('cdva2.fits')) hdul1 = fits.open(self.data('cdva2.fits')) assert hdul[0].header['NAXIS'] == 2 assert hdul1[0].header['NAXIS'] == 0 assert hdul[0].header['NAXIS1'] == 10 assert hdul[0].header['NAXIS2'] == 10 for k in ('NAXIS1', 'NAXIS2'): with pytest.raises(KeyError): hdul1[0].header[k] for k in ('NPIX1', 'NPIX2'): with pytest.raises(KeyError): hdul[0].header[k] assert hdul1[0].header['NPIX1'] == 10 assert hdul1[0].header['NPIX2'] == 10 assert (hdul[0].data == np.ones((10, 10), dtype=np.int32)).all() assert hdul1[0].data is None hdul.close() hdul1.close() @pytest.mark.parametrize(['filename', 'ext', 'naxis1', 'naxis2'], [('cdva2.fits', None, 10, 10), ('o4sp040b0_raw.fits', 2, 62, 44)]) def test_GetHeaderConvienceFunction(self, filename, ext, naxis1, naxis2): """Test the getheader convience function in both the fits and stpyfits namespace.""" if ext is None: hd = stpyfits.getheader(self.data(filename)) hd1 = fits.getheader(self.data(filename)) else: hd = stpyfits.getheader(self.data(filename), ext) hd1 = fits.getheader(self.data(filename), ext) assert hd['NAXIS'] == 2 assert hd1['NAXIS'] == 0 assert hd['NAXIS1'] == naxis1 assert hd['NAXIS2'] == naxis2 for k in ('NAXIS1', 'NAXIS2'): with pytest.raises(KeyError): hd1[k] for k in ('NPIX1', 'NPIX2'): with pytest.raises(KeyError): hd[k] assert hd1['NPIX1'] == naxis1 assert hd1['NPIX2'] == naxis2 def test_GetDataConvienceFunction(self): """Test the getdata convience function in both the fits and stpyfits namespace.""" d = stpyfits.getdata(self.data('cdva2.fits')) assert (d == np.ones((10, 10), dtype=np.int32)).all() with pytest.raises(IndexError): fits.getdata(self.data('cdva2.fits')) def test_GetValConvienceFunction(self): """Test the getval convience function in both the fits and stpyfits namespace.""" val = stpyfits.getval(self.data('cdva2.fits'), 'NAXIS', 0) val1 = fits.getval(self.data('cdva2.fits'), 'NAXIS', 0) assert val == 2 assert val1 == 0 def test_writetoConvienceFunction(self): """Test the writeto convience function in both the fits and stpyfits namespace.""" hdul = stpyfits.open(self.data('cdva2.fits')) hdul1 = fits.open(self.data('cdva2.fits')) header = hdul[0].header.copy() header['NAXIS'] = 0 stpyfits.writeto(self.temp('new.fits'), hdul[0].data, header, **self.writekwargs) fits.writeto(self.temp('new1.fits'), hdul1[0].data, hdul1[0].header, **self.writekwargs) hdul.close() hdul1.close() info1 = fits.info(self.temp('new.fits'), output=False) info2 = stpyfits.info(self.temp('new.fits'), output=False) info3 = fits.info(self.temp('new1.fits'), output=False) info4 = stpyfits.info(self.temp('new1.fits'), output=False) if ASTROPY_VER_GE20: ans1 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (), '', '')] ans2 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (10, 10), 'int32', '')] ans3 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (), '', '')] ans4 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (10, 10), 'uint8', '')] else: ans1 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', '')] ans2 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (10, 10), 'int32', '')] ans3 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', '')] ans4 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (10, 10), 'uint8', '')] assert info1 == ans1 assert info2 == ans2 assert info3 == ans3 assert info4 == ans4 def test_appendConvienceFunction(self): """Test the append convience function in both the fits and stpyfits namespace.""" hdul = stpyfits.open(self.data('cdva2.fits')) hdul1 = fits.open(self.data('cdva2.fits')) stpyfits.writeto(self.temp('new.fits'), hdul[0].data, hdul[0].header, **self.writekwargs) fits.writeto(self.temp('new1.fits'), hdul1[0].data, hdul1[0].header, **self.writekwargs) hdu = stpyfits.ImageHDU() hdu1 = fits.ImageHDU() hdu.data = hdul[0].data hdu1.data = hdul1[0].data hdu.header.set('BITPIX', 32) hdu1.header.set('BITPIX', 32) hdu.header.set('NAXIS', 2) hdu.header.set('NAXIS1', 10, 'length of constant array axis 1', after='NAXIS') hdu.header.set('NAXIS2', 10, 'length of constant array axis 2', after='NAXIS1') hdu.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT') hdu1.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT') hdu1.header.set('NPIX1', 10, 'length of constant array axis 1', after='GCOUNT') hdu1.header.set('NPIX2', 10, 'length of constant array axis 2', after='NPIX1') stpyfits.append(self.temp('new.fits'), hdu.data, hdu.header) fits.append(self.temp('new1.fits'), hdu1.data, hdu1.header) if ASTROPY_VER_GE20: ans1 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 1, 'ImageHDU', 8, (10, 10), 'int32', '')] ans2 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (10, 10), 'uint8', ''), (1, '', 1, 'ImageHDU', 8, (10, 10), 'uint8', '')] ans3 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 1, 'ImageHDU', 8, (10, 10), 'int32', '')] ans4 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (), '', ''), (1, '', 1, 'ImageHDU', 8, (), '', '')] else: ans1 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')] ans2 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'uint8', ''), (1, '', 'ImageHDU', 8, (10, 10), 'uint8', '')] ans3 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')] ans4 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', ''), (1, '', 'ImageHDU', 8, (), '', '')] assert stpyfits.info(self.temp('new.fits'), output=False) == ans1 assert stpyfits.info(self.temp('new1.fits'), output=False) == ans2 assert fits.info(self.temp('new.fits'), output=False) == ans3 assert fits.info(self.temp('new1.fits'), output=False) == ans4 hdul5 = stpyfits.open(self.temp('new.fits')) hdul6 = fits.open(self.temp('new1.fits')) assert hdul5[1].header['NAXIS'] == 2 assert hdul6[1].header['NAXIS'] == 0 assert hdul5[1].header['NAXIS1'] == 10 assert hdul5[1].header['NAXIS2'] == 10 for k in ('NPIX1', 'NPIX2'): with pytest.raises(KeyError): hdul5[1].header[k] for k in ('NAXIS1', 'NAXIS2'): with pytest.raises(KeyError): hdul6[1].header[k] assert hdul6[1].header['NPIX1'] == 10 assert hdul6[1].header['NPIX2'] == 10 assert (hdul5[1].data == np.ones((10, 10), dtype=np.int32)).all() assert hdul6[1].data is None hdul5.close() hdul6.close() hdul.close() hdul1.close() def test_updateConvienceFunction(self): """Test the update convience function in both the fits and stpyfits namespace.""" hdul = stpyfits.open(self.data('cdva2.fits')) hdul1 = fits.open(self.data('cdva2.fits')) header = hdul[0].header.copy() header['NAXIS'] = 0 stpyfits.writeto(self.temp('new.fits'), hdul[0].data, header, **self.writekwargs) hdu = stpyfits.ImageHDU() hdu1 = fits.ImageHDU() hdu.data = hdul[0].data hdu1.data = hdul1[0].data hdu.header.set('BITPIX', 32) hdu1.header.set('BITPIX', 32) hdu.header.set('NAXIS', 0) hdu.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT') hdu1.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT') hdu.header.set('NPIX1', 10, 'length of constant array axis 1', after='GCOUNT') hdu.header.set('NPIX2', 10, 'length of constant array axis 2', after='NPIX1') stpyfits.append(self.temp('new.fits'), hdu.data, hdu.header) d = np.zeros_like(hdu.data) stpyfits.update(self.temp('new.fits'), d, hdu.header, 1) if ASTROPY_VER_GE20: ans1 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (), '', ''), (1, '', 1, 'ImageHDU', 8, (), '', '')] ans2 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 1, 'ImageHDU', 8, (10, 10), 'int32', '')] else: ans1 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', ''), (1, '', 'ImageHDU', 8, (), '', '')] ans2 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')] assert fits.info(self.temp('new.fits'), output=False) == ans1 assert stpyfits.info(self.temp('new.fits'), output=False) == ans2 hdul7 = stpyfits.open(self.temp('new.fits')) assert hdul7[1].header['NAXIS'] == 2 assert hdul7[1].header['NAXIS1'] == 10 assert hdul7[1].header['NAXIS2'] == 10 assert hdul7[1].header['PIXVALUE'] == 0 for k in ('NPIX1', 'NPIX2'): with pytest.raises(KeyError): hdul7[1].header[k] assert (hdul7[1].data == np.zeros((10, 10), dtype=np.int32)).all() hdul8 = fits.open(self.temp('new.fits')) assert hdul8[1].header['NAXIS'] == 0 assert hdul8[1].header['NPIX1'] == 10 assert hdul8[1].header['NPIX2'] == 10 assert hdul8[1].header['PIXVALUE'] == 0 for k in ('NAXIS1', 'NAXIS2'): with pytest.raises(KeyError): hdul8[1].header[k] assert hdul8[1].data is None hdul7.close() hdul8.close() hdul.close() hdul1.close() def test_ImageHDUConstructor(self): """Test the ImageHDU constructor in both the fits and stpyfits namespace.""" hdu = stpyfits.ImageHDU() assert isinstance(hdu, stpyfits.ConstantValueImageHDU) assert isinstance(hdu, fits.ImageHDU) def test_PrimaryHDUConstructor(self): """Test the PrimaryHDU constructor in both the fits and stpyfits namespace. Although stpyfits does not reimplement the constructor, it does add _ConstantValueImageBaseHDU to the inheritance hierarchy of fits.PrimaryHDU when accessed through the stpyfits namespace. This method tests that that inheritance is working""" n = np.ones(10) hdu = stpyfits.PrimaryHDU(n) hdu.header.set('PIXVALUE', 1.0, 'Constant pixel value', after='EXTEND') hdu.header.set('NAXIS', 0) stpyfits.writeto(self.temp('new.fits'), hdu.data, hdu.header, **self.writekwargs) hdul = stpyfits.open(self.temp('new.fits')) hdul1 = fits.open(self.temp('new.fits')) assert hdul[0].header['NAXIS'] == 1 assert hdul[0].header['NAXIS1'] == 10 assert hdul[0].header['PIXVALUE'] == 1.0 with pytest.raises(KeyError): hdul[0].header['NPIX1'] assert (hdul[0].data == np.ones(10, dtype=np.float32)).all() assert hdul1[0].header['NAXIS'] == 0 assert hdul1[0].header['NPIX1'] == 10 assert hdul1[0].header['PIXVALUE'] == 1.0 with pytest.raises(KeyError): hdul1[0].header['NAXIS1'] assert hdul1[0].data is None hdul.close() hdul1.close() def test_HDUListWritetoMethod(self): """Test the writeto method of HDUList in both the fits and stpyfits namespace.""" hdu = stpyfits.PrimaryHDU() hdu1 = stpyfits.ImageHDU() hdu.data = np.zeros((10, 10), dtype=np.int32) hdu1.data = hdu.data + 2 hdu.header.set('BITPIX', 32) hdu1.header.set('BITPIX', 32) hdu.header.set('NAXIS', 2) hdu.header.set('NAXIS1', 10, 'length of constant array axis 1', after='NAXIS') hdu.header.set('NAXIS2', 10, 'length of constant array axis 2', after='NAXIS1') hdu.header.set('PIXVALUE', 0, 'Constant pixel value') hdu1.header.set('PIXVALUE', 2, 'Constant pixel value', after='GCOUNT') hdu1.header.set('NAXIS', 2) hdu1.header.set('NAXIS1', 10, 'length of constant array axis 1', after='NAXIS') hdu1.header.set('NAXIS2', 10, 'length of constant array axis 2', after='NAXIS1') hdul = stpyfits.HDUList([hdu, hdu1]) hdul.writeto(self.temp('new.fits'), **self.writekwargs) if ASTROPY_VER_GE20: ans1 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 1, 'ImageHDU', 8, (10, 10), 'int32', '')] ans2 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (), '', ''), (1, '', 1, 'ImageHDU', 8, (), '', '')] else: ans1 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''), (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')] ans2 = [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', ''), (1, '', 'ImageHDU', 8, (), '', '')] assert stpyfits.info(self.temp('new.fits'), output=False) == ans1 assert fits.info(self.temp('new.fits'), output=False) == ans2 hdul1 = stpyfits.open(self.temp('new.fits')) hdul2 = fits.open(self.temp('new.fits')) assert hdul1[0].header['NAXIS'] == 2 assert hdul1[0].header['NAXIS1'] == 10 assert hdul1[0].header['NAXIS2'] == 10 assert hdul1[0].header['PIXVALUE'] == 0 assert (hdul1[0].data == np.zeros((10, 10), dtype=np.int32)).all() assert hdul1[1].header['NAXIS'] == 2 assert hdul1[1].header['NAXIS1'] == 10 assert hdul1[1].header['NAXIS2'] == 10 assert hdul1[1].header['PIXVALUE'] == 2 assert (hdul1[1].data == (np.zeros((10, 10), dtype=np.int32) + 2)).all() assert hdul2[0].header['NAXIS'] == 0 assert hdul2[0].header['NPIX1'] == 10 assert hdul2[0].header['NPIX2'] == 10 assert hdul2[0].header['PIXVALUE'] == 0 for i in range(2): for k in ('NPIX1', 'NPIX2'): with pytest.raises(KeyError): hdul1[i].header[k] for k in ('NAXIS1', 'NAXIS2'): with pytest.raises(KeyError): hdul2[i].header[k] assert hdul2[0].data is None assert hdul2[1].header['NAXIS'] == 0 assert hdul2[1].header['NPIX1'] == 10 assert hdul2[1].header['NPIX2'] == 10 assert hdul2[1].header['PIXVALUE'] == 2 hdul1.close() hdul2.close() def test_HDUList_getitem_Method(self): """Test the __getitem__ method of st_HDUList in the stpyfits namespace.""" n = np.ones(10) hdu = stpyfits.PrimaryHDU(n) hdu.header.set('PIXVALUE', 1., 'constant pixel value', after='EXTEND') hdu.writeto(self.temp('new.fits'), **self.writekwargs) hdul = stpyfits.open(self.temp('new.fits')) hdul1 = fits.open(self.temp('new.fits')) hdu = hdul[0] hdu1 = hdul1[0] assert hdu.header['NAXIS'] == 1 assert hdu.header['NAXIS1'] == 10 assert hdu.header['PIXVALUE'] == 1.0 with pytest.raises(KeyError): hdu.header['NPIX1'] assert (hdu.data == np.ones(10, dtype=np.float32)).all() assert hdu1.header['NAXIS'] == 0 assert hdu1.header['NPIX1'] == 10 assert hdu1.header['PIXVALUE'] == 1.0 with pytest.raises(KeyError): hdu1.header['NAXIS1'] assert hdu1.data is None hdul.close() hdul1.close() def test_HDUListFlushMethod(self): """Test the flush method of HDUList in both the fits and stpyfits namespace.""" hdu = stpyfits.PrimaryHDU() hdu1 = stpyfits.ImageHDU() hdu.data = np.zeros((10, 10), dtype=np.int32) hdu1.data = hdu.data + 2 hdu.header.set('BITPIX', 32) hdu1.header.set('BITPIX', 32) hdu.header.set('NAXIS', 2) hdu.header.set('NAXIS1', 10, 'length of constant array axis 1', after='NAXIS') hdu.header.set('NAXIS2', 10, 'length of constant array axis 2', after='NAXIS1') hdu.header.set('PIXVALUE', 0, 'Constant pixel value') hdu1.header.set('PIXVALUE', 2, 'Constant pixel value', after='GCOUNT') hdu1.header.set('NAXIS', 2) hdu1.header.set('NAXIS1', 10, 'length of constant array axis 1', after='NAXIS') hdu1.header.set('NAXIS2', 10, 'length of constant array axis 2', after='NAXIS1') hdul = stpyfits.HDUList([hdu, hdu1]) hdul.writeto(self.temp('new.fits'), **self.writekwargs) hdul = stpyfits.open(self.temp('new.fits'), 'update') hdul[0].data = np.zeros(10, dtype=np.int32) + 3 hdul.flush() hdul.close() if ASTROPY_VER_GE20: ans1 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (10,), 'int32', ''), (1, '', 1, 'ImageHDU', 8, (10, 10), 'int32', '')] ans2 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (), '', ''), (1, '', 1, 'ImageHDU', 8, (), '', '')] ans3 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (15,), 'int32', ''), (1, '', 1, 'ImageHDU', 8, (10, 10), 'int32', '')] ans4 = [(0, 'PRIMARY', 1, 'PrimaryHDU', 6, (), '', ''), (1, '', 1, 'ImageHDU', 8, (), '', '')] else: ans1 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (10,), 'int32', ''), (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')] ans2 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', ''), (1, '', 'ImageHDU', 8, (), '', '')] ans3 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (15,), 'int32', ''), (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')] ans4 = [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', ''), (1, '', 'ImageHDU', 8, (), '', '')] assert stpyfits.info(self.temp('new.fits'), output=False) == ans1 assert fits.info(self.temp('new.fits'), output=False) == ans2 hdul1 = stpyfits.open(self.temp('new.fits')) hdul2 = fits.open(self.temp('new.fits')) assert hdul1[0].header['NAXIS'] == 1 assert hdul1[0].header['NAXIS1'] == 10 assert hdul1[0].header['PIXVALUE'] == 3 with pytest.raises(KeyError): hdul1[0].header['NPIX1'] assert (hdul1[0].data == (np.zeros(10, dtype=np.int32) + 3)).all() assert hdul2[0].header['NAXIS'] == 0 assert hdul2[0].header['NPIX1'] == 10 assert hdul2[0].header['PIXVALUE'] == 3 with pytest.raises(KeyError): hdul2[0].header['NAXIS1'] assert hdul2[0].data is None hdul1.close() hdul2.close() hdul3 = stpyfits.open(self.temp('new.fits'), 'update') hdul3[0].data = np.zeros(15, dtype=np.int32) + 4 hdul3.close() # Note that close calls flush assert stpyfits.info(self.temp('new.fits'), output=False) == ans3 assert fits.info(self.temp('new.fits'), output=False) == ans4 hdul1 = stpyfits.open(self.temp('new.fits')) hdul2 = fits.open(self.temp('new.fits')) assert hdul1[0].header['NAXIS'] == 1 assert hdul1[0].header['NAXIS1'] == 15 assert hdul1[0].header['PIXVALUE'] == 4 with pytest.raises(KeyError): hdul1[0].header['NPIX1'] assert (hdul1[0].data == (np.zeros(15, dtype=np.int32) + 4)).all() assert hdul2[0].header['NAXIS'] == 0 assert hdul2[0].header['NPIX1'] == 15 assert hdul2[0].header['PIXVALUE'] == 4 with pytest.raises(KeyError): hdul2[0].header['NAXIS1'] assert hdul2[0].data is None hdul1.close() hdul2.close() def test_ImageBaseHDU_getattr_Method(self): """Test the __getattr__ method of ImageBaseHDU in both the fits and stpyfits namespace.""" hdul = stpyfits.open(self.data('cdva2.fits')) hdul1 = fits.open(self.data('cdva2.fits')) hdu = hdul[0] hdu1 = hdul1[0] assert (hdu.data == np.ones((10, 10), dtype=np.int32)).all() assert hdu1.data is None hdul.close() hdul1.close() def test_ImageBaseHDUWriteToMethod(self): """Test the writeto method of _ConstantValueImageBaseHDU in the stpyfits namespace.""" n = np.ones(10) hdu = stpyfits.PrimaryHDU(n) hdu.header.set('PIXVALUE', 1., 'constant pixel value', after='EXTEND') hdu.writeto(self.temp('new.fits'), **self.writekwargs) hdul = stpyfits.open(self.temp('new.fits')) hdul1 = fits.open(self.temp('new.fits')) assert hdul[0].header['NAXIS'] == 1 assert hdul[0].header['NAXIS1'] == 10 assert hdul[0].header['PIXVALUE'] == 1.0 with pytest.raises(KeyError): hdul[0].header['NPIX1'] assert (hdul[0].data == np.ones(10, dtype=np.float32)).all() assert hdul1[0].header['NAXIS'] == 0 assert hdul1[0].header['NPIX1'] == 10 assert hdul1[0].header['PIXVALUE'] == 1.0 with pytest.raises(KeyError): hdul1[0].header['NAXIS1'] assert hdul1[0].data is None hdul.close() hdul1.close() def test_StrayPixvalue(self): """Regression test for #885 (https://svn.stsci.edu/trac/ssb/stsci_python/ticket/885) Tests that HDUs containing a non-zero NAXIS as well as a PIXVALUE keyword in their header are not treated as constant value HDUs. """ data = np.arange(100).reshape((10, 10)) phdu = fits.PrimaryHDU(data=data) hdu = fits.ImageHDU(data=data) phdu.header['PIXVALUE'] = 10 hdu.header['PIXVALUE'] = 10 hdul = fits.HDUList([phdu, hdu]) hdul.writeto(self.temp('test.fits')) with stpyfits.open(self.temp('test.fits')) as h: assert not isinstance(h[0], stpyfits.ConstantValuePrimaryHDU) assert not isinstance(h[1], stpyfits.ConstantValueImageHDU) assert (h[0].data == data).all() assert (h[1].data == data).all() def test_DimensionlessConstantValueArray(self): """Tests a case that was reported where an HDU can be a constant value HDU (it has a PIXVALUE and NAXIS=0) but NPIX1 = NPIX2 = 0 as well. """ hdu = stpyfits.PrimaryHDU() hdu.header['NAXIS'] = 0 hdu.header['BITPIX'] = 16 hdu.header['NPIX1'] = 0 hdu.header['NPIX2'] = 0 hdu.header['PIXVALUE'] = 0 hdu.writeto(self.temp('test.fits')) with stpyfits.open(self.temp('test.fits')) as h: assert h[0].data is None h.writeto(self.temp('test2.fits')) def test_DeconvertConstantArray(self): """When a constant value array's data is overridden with non- constant data, test that when saving the file it removes all constant value array keywords and is treated as a normal image HDU. """ data = np.ones((100, 100)) hdu = stpyfits.PrimaryHDU(data=data) hdu.header['PIXVALUE'] = 1 hdu.writeto(self.temp('test.fits')) with stpyfits.open(self.temp('test.fits'), mode='update') as h: assert h[0].header['PIXVALUE'] == 1 h[0].data[20:80, 20:80] = 2 with fits.open(self.temp('test.fits')) as h: assert 'PIXVALUE' not in h[0].header assert 'NPIX1' not in h[0].header assert 'NPIX2' not in h[0].header assert h[0].header.count('NAXIS') == 1 assert h[0].header['NAXIS'] == 2 assert h[0].header['NAXIS1'] == 100 assert h[0].header['NAXIS2'] == 100 assert h[0].data.max() == 2 assert h[0].data.min() == 1 def test_GetvalExtensionHDU(self): """Regression test for an issue that came up with the fact that ImageHDU has a different argument signature from PrimaryHDU. """ data = np.ones((100, 100)) hdu = stpyfits.ImageHDU(data=data) hdu.header['PIXVALUE'] = 1 hdu.header['FOO'] = 'test' hdul = stpyfits.HDUList([stpyfits.PrimaryHDU(), hdu]) hdul.writeto(self.temp('test.fits')) assert stpyfits.getval(self.temp('test.fits'), 'FOO', ext=1) == 'test' stsci.tools-3.4.12/lib/stsci/tools/tests/test_xyinterp.py0000644001120100020070000000333113006721301025246 0ustar jhunkSTSCI\science00000000000000from __future__ import division # confidence high from stsci.tools.xyinterp import xyinterp import numpy as N x=N.array((1,2,3,4,5)) y=x.copy() def test_xyinterp_1(): #test 1 ans = xyinterp(x,y,3) assert ans == 3, "Test 1 failed, ans = %f, should be 3"%ans def test_xyinterp_2(): #test 2 ans = xyinterp(x,y,3.5) assert ans == 3.5, "Test 2 failed, ans = %f, should be 3.5"%ans def test_xyinterp_3(): #test 3 try: ans = xyinterp(x,y,-3) raise AssertionError( "Test 3 failed; should have thrown an exception, answer = %s" % str(ans)) except ValueError: pass def test_xyinterp_4(): #test 4 try: ans = xyinterp(x,y,5.6) raise AssertionError( "Test 4 failed; should have thrown an exception, answer = %s" % str(ans)) except ValueError: pass def test_xyinterp_5(): #test 5 x=N.array((1,3,7,9,12)) y=N.array((5,10,15,20,25)) ans = xyinterp(x,y,8) assert ans == 17.5, "Test 5 failed, ans = %f, should be 17.5"%ans def test_xyinterp_6(): #test 6 x=N.array((5,3,6,2,7,0)) y=N.array((4,6,2,4,6,2)) try: ans = xyinterp(x,y,2) raise AssertionError( "Test 6 failed; should have thrown an exception, answer = %s" % str(ans)) except ValueError: pass def test_xyinterp_7(): #test 7 x=N.array((1,2,3,4,5)) y=N.arange(20) try: ans = xyinterp(x,y,2) raise AssertionError( "Test 7 failed; should have thrown an exception, answer = %s" % str(ans)) except ValueError: pass if __name__ == '__main__': test_xyinterp_1() test_xyinterp_2() test_xyinterp_3() test_xyinterp_4() test_xyinterp_5() test_xyinterp_6() test_xyinterp_7() stsci.tools-3.4.12/lib/stsci/tools/testutil.py0000644001120100020070000001750713006721301023052 0ustar jhunkSTSCI\science00000000000000from __future__ import division, print_function import math, os, sys, time import unittest import numpy as N """ This module extends the built-in unittest capabilities to facilitate performing floating point comparisons on scalars and numpy arrays. It also provides functions that automate building a test suite from all tests present in the module, and running the tests in standard or debug mode. To use this module, import it along with unittest [QUESTION: should this module import everything from unittest into its namespace to make life even easier?]. Subclass test cases from testutil.FPTestCase instead of unittest.TestCase. Call testall or debug from this module: import testutil class FileTestCase(testutil.FPTestCase): def setUp(self): assorted_test_setup def testone(self): self.assertEqual(1,2) def testtwo(self): self.assertApproxNumpy(arr1,arr2,accuracy=1e-6) def tearDown(self): assorted_test_teardown if __name__ == '__main__': if 'debug' in sys.argv: testutil.debug(__name__) else: testutil.testall(__name__,2) To run the tests in normal mode from the shell, then do the following: python my_module.py It will run all tests, success or failure, and print a summary of the results. To run the tests in debug mode from the shell, do the following: python -i my_module.py debug >>> import pdb >>> pdb.pm() In debug mode, it will run until it encounters the first failed test, then stop. Thus if you run with the -i switch, you can then import pdb and proceed with the usual debugger commands. If you prefer to run your tests from within the python interpreter, you may import this module and call its testall() and debug() functions explicitly. The modules you are testing must be visible in your sys.path. >>>import testutil as U >>> U.testall('ui_test') """ class LogTestCase(unittest.TestCase): """Override the .run() method to do some logging""" def run(self, result=None): if result is None: result = self.defaultTestResult() result.startTest(self) testMethod = getattr(self, self._testMethodName) try: try: self.setUp() except KeyboardInterrupt: raise except: result.addError(self, self._exc_info()) self.log('E') return ok = False try: testMethod() ok = True self.log("P") except self.failureException: result.addFailure(self, self._exc_info()) self.log("F") except KeyboardInterrupt: raise except: result.addError(self, self._exc_info()) self.log("E") try: self.tearDown() except KeyboardInterrupt: raise except: result.addError(self, self._exc_info()) ok = False if ok: result.addSuccess(self) finally: result.stopTest(self) def log(self,status,name=None): """Creates a log file containing the test name, status,and timestamp, as well as any attributes in the tda and tra dictionaries if present. Does not yet support fancy separating of multi-line items.""" if name is None: try: name=self.name except AttributeError: name=self.id() try: f=open(name+'.log','w') except IOError as e: print("Error opening log file: %s"%e.strerror) print("***No Logging Performed***") return f.write("%s:: Name=%s\n"%(name,name)) f.write("%s:: Status=%s\n"%(name,status)) f.write("%s:: Time=%s\n"%(name,time.asctime())) try: for k in self.tda: f.write("%s:: tda_%s=%s\n"%(name,str(k),str(self.tda[k]))) except AttributeError: pass try: for k in self.tra: f.write("%s:: tra_%s=%s\n"%(name,str(k),str(self.tra[k]))) except AttributeError: pass if status == 'E': f.write("%s:: tra_Trace=%s\n"%(name,str(self._exc_info()))) f.write("END\n") f.close() class FPTestCase(unittest.TestCase): ''' Base class to hold some functionality related to floating-point precision and array comparisons''' def assertApproxFP(self, testvalue, expected, accuracy=1.0e-5): ''' Floating point comparison ''' result = math.fabs((testvalue - expected) / expected) self.failUnless(result <= accuracy,"test: %g, ref: %g"%(testvalue,expected)) def assertApproxNumpy(self, testarray, expected, accuracy=1.0e-5): ''' Floating point array comparison ''' result=N.abs(testarray-expected)/expected self.failUnless(N.alltrue(result <= accuracy)) def assertEqualNumpy(self, testarray, expected): ''' Identical FP array comparison ''' self.failUnless(N.alltrue(testarray == expected)) class LogTextRunner(unittest.TextTestRunner): """ Redefines the .run() method to call a .log() method on the test when it is complete. """ def run(self, test): "Run the given test case or test suite." result = self._makeResult() startTime = time.time() test(result) stopTime = time.time() timeTaken = stopTime - startTime result.printErrors() self.stream.writeln(result.separator2) run = result.testsRun self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() if not result.wasSuccessful(): self.stream.write("FAILED (") failed, errored = list(map(len, (result.failures, result.errors))) if failed: self.stream.write("failures=%d" % failed) test.log("F") if errored: if failed: self.stream.write(", ") self.stream.write("errors=%d" % errored) test.log("E") self.stream.writeln(")") else: self.stream.writeln("OK") test.log("P") return result def buildsuite(module): """Builds a test suite containing all tests found in the module. Returns the suite.""" M = __import__(module) suite = unittest.defaultTestLoader.loadTestsFromModule(M) return suite def debug(module): """ Build the test suite, then run in debug mode, which allows for postmortems""" buildsuite(module).debug() def testall(module,verb=0): """ Build and run the suite through the testrunner. Verbosity level defaults to quiet but can be set to 2 to produce a line as it runs each test. A summary of the number of tests run, errors, and failures is always printed at the end.""" result=unittest.TextTestRunner(verbosity=verb).run(buildsuite(module)) return result def testlog(module,verb=0): result=LogTextRunner(verbosity=verb).run(buildsuite(module)) return result def dump_file(fname, hdrwidth=80): """ Convenience function to dump a named file to the stdout, with an optional header listing the filename. This is easy to do without a convenience function like this, but having one reduces code in the XML test files. """ assert os.path.exists(fname), "dump_file could not find: "+fname sys.stdout.flush() if hdrwidth>0: print("") print("="*hdrwidth) print(fname+':') print("="*hdrwidth) f = open(fname, 'r') for line in f: print(line.rstrip()) f.close() def dump_all_log_files(hdrwidth=80): """ Convenience function to dump all *.log files in cwd to the stdout, with an optional header listing each filename. See dump_file. """ import glob flist = glob.glob('*.log') for f in flist: dump_file(f, hdrwidth=hdrwidth) stsci.tools-3.4.12/lib/stsci/tools/textutil.py0000644001120100020070000000402713006721301023050 0ustar jhunkSTSCI\science00000000000000"""Text output-related utilities.""" import textwrap def wrap(text, width, *args, **kwargs): """ Like :func:`textwrap.wrap` but preserves existing newlines which :func:`textwrap.wrap` does not otherwise handle well. See Also -------- :func:`textwrap.wrap` """ return sum([textwrap.wrap(line, width, *args, **kwargs) if line else [''] for line in text.splitlines()], []) def textbox(text, width=78, boxchar='#', indent=0): """ Outputs line-wrapped text wrapped in a box drawn with a repeated (usually ASCII) character. For example: >>> textbox('Text to wrap', width=16) ################ # # # Text to wrap # # # ################ Parameters ---------- text : string The text to wrap width : int The width of the entire box, including the perimeter and the indentation space. Because the wrapped text is padded with an additional column of whitespace on each side, the minimum width is 5--any width less than that is is automatically increased to 5 (default: 78) boxchar : string (No pun intended.) The character to draw the box with. May also be a string of multiple characters (default: '#') indent : int Amount of space by which the box should be indented. (default: 0) """ min_width = len(boxchar) * 2 + 3 width = max(width-indent, min_width) indentspace = indent * ' ' wrap_width = width - min_width + 1 q, r = divmod(width, len(boxchar)) # The top/bottom border top_border = indentspace + boxchar * q + boxchar[:r] top_padding = indentspace + boxchar + ' ' * (width - len(boxchar) * 2) + boxchar lines = ['%s%s %s %s' % (indentspace, boxchar, line.ljust(wrap_width), boxchar) for line in wrap(text, wrap_width)] top = [top_border, top_padding] bottom = [top_padding, top_border] return '\n'.join(top + lines + bottom) stsci.tools-3.4.12/lib/stsci/tools/tkrotext.py0000644001120100020070000000641713017116245023066 0ustar jhunkSTSCI\science00000000000000""" Read-Only tkinter Text Widget. This is a variation of the tkinter Text widget in that the text itself is not editable (it is read-only), but it allows selection for cut/paste to other apps. Cut-paste may currently only work under X11. (9/2015 enabled under OSX by adding 'c' to ALLOWED_SYMS) A vastly simpler way of doing this is to use a tkinter.Text widget and set it to DISABLED, but then you cannot select text. $Id$ """ from __future__ import division # confidence high # System level modules import sys PY3K = sys.version_info[0] > 2 if PY3K: import tkinter as TKNTR else: import Tkinter as TKNTR ALLOWED_SYMS = ('c','Up','Down','Left','Right','Home','End','Prior','Next', \ 'Shift_L', 'Shift_R') class ROText(TKNTR.Text): def __init__(self, master, **kw): """ defer most of __init__ to the base class """ self._fbto = None if 'focusBackTo' in kw: self._fbto = kw['focusBackTo'] del kw['focusBackTo'] TKNTR.Text.__init__(self, master, **kw) # override some bindings to return a "break" string self.bind("", self.ignoreMostKeys) self.bind("", lambda e: "break") self.bind("", lambda e: "break") if self._fbto: self.bind("", self.mouseLeft) self.config(insertwidth=0) # disallow common insert calls, but allow a backdoor when needed def insert(self, index, text, *tags, **kw): if 'force' in kw and kw['force']: TKNTR.Text.insert(self, index, text, *tags) # disallow common delete calls, but allow a backdoor when needed def delete(self, start, end=None, force=False): if force: TKNTR.Text.delete(self, start, end) # a way to disable text manip def ignoreMostKeys(self, event): if event.keysym not in ALLOWED_SYMS: return "break" # have to return this string to stop the event # To get copy/paste working on OSX we allow 'c' so that # they can type 'Command-c', but don't let a regular 'c' through. if event.keysym in ('c','C'): if sys.platform=='darwin' and hasattr(event,'state') and event.state != 0: pass # allow this through, it is Command-c else: return "break" def mouseLeft(self, event): if self._fbto: self._fbto.focus_set() return "break" # have to return this string to stop the event # Test the above class if __name__ == '__main__': import sys, time rot = None def quit(): sys.exit() def clicked(): rot.insert(TKNTR.END, "\nClicked at "+time.asctime(), force=True) rot.see(TKNTR.END) # make our test window top = TKNTR.Tk() f = TKNTR.Frame(top) sc = TKNTR.Scrollbar(f) sc.pack(side=TKNTR.RIGHT, fill=TKNTR.Y) rot = ROText(f, wrap=TKNTR.WORD, height=10, yscrollcommand=sc.set, focusBackTo=top) rot.pack(side=TKNTR.TOP, fill=TKNTR.X, expand=True) sc.config(command=rot.yview) f.pack(side=TKNTR.TOP, fill=TKNTR.X) b = TKNTR.Button(top, text='Click Me', command=clicked) b.pack(side=TKNTR.TOP, fill=TKNTR.X, expand=1) q = TKNTR.Button(top, text='Quit', command=quit) q.pack(side=TKNTR.TOP) # start top.mainloop() stsci.tools-3.4.12/lib/stsci/tools/validate.py0000644001120100020070000013176713112074217023000 0ustar jhunkSTSCI\science00000000000000# validate.py # A Validator object # Copyright (C) 2005-2010 Michael Foord, Mark Andrews, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # mark AT la-la DOT com # nico AT tekNico DOT net # This software is licensed under the terms of the BSD license. # http://www.voidspace.org.uk/python/license.shtml # Basically you're free to copy, modify, distribute and relicense it, # So long as you keep a copy of the license with it. # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. """ The Validator object is used to check that supplied values conform to a specification. The value can be supplied as a string - e.g. from a config file. In this case the check will also *convert* the value to the required type. This allows you to add validation as a transparent layer to access data stored as strings. The validation checks that the data is correct *and* converts it to the expected type. Some standard checks are provided for basic data types. Additional checks are easy to write. They can be provided when the ``Validator`` is instantiated or added afterwards. The standard functions work with the following basic data types : * integers * floats * booleans * strings * ip_addr plus lists of these datatypes Adding additional checks is done through coding simple functions. The full set of standard checks are : * 'integer': matches integer values (including negative) Takes optional 'min' and 'max' arguments : :: integer() integer(3, 9) # any value from 3 to 9 integer(min=0) # any positive value integer(max=9) * 'float': matches float values Has the same parameters as the integer check. * 'boolean': matches boolean values - ``True`` or ``False`` Acceptable string values for True are : true, on, yes, 1 Acceptable string values for False are : false, off, no, 0 Any other value raises an error. * 'ip_addr': matches an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. * 'string': matches any string. Takes optional keyword args 'min' and 'max' to specify min and max lengths of the string. * 'list': matches any list. Takes optional keyword args 'min', and 'max' to specify min and max sizes of the list. (Always returns a list.) * 'tuple': matches any tuple. Takes optional keyword args 'min', and 'max' to specify min and max sizes of the tuple. (Always returns a tuple.) * 'int_list': Matches a list of integers. Takes the same arguments as list. * 'float_list': Matches a list of floats. Takes the same arguments as list. * 'bool_list': Matches a list of boolean values. Takes the same arguments as list. * 'ip_addr_list': Matches a list of IP addresses. Takes the same arguments as list. * 'string_list': Matches a list of strings. Takes the same arguments as list. * 'mixed_list': Matches a list with different types in specific positions. List size must match the number of arguments. Each position can be one of : 'integer', 'float', 'ip_addr', 'string', 'boolean' So to specify a list with two strings followed by two integers, you write the check as : :: mixed_list('string', 'string', 'integer', 'integer') * 'pass': This check matches everything ! It never fails and the value is unchanged. It is also the default if no check is specified. * 'option': This check matches any from a list of options. You specify this check with : :: option('option 1', 'option 2', 'option 3') You can supply a default value (returned if no value is supplied) using the default keyword argument. You specify a list argument for default using a list constructor syntax in the check : :: checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3')) A badly formatted set of arguments will raise a ``VdtParamError``. """ from __future__ import division, print_function # confidence high __version__ = '1.0.1' __all__ = ( '__version__', 'dottedQuadToNum', 'numToDottedQuad', 'ValidateError', 'VdtUnknownCheckError', 'VdtParamError', 'VdtTypeError', 'VdtValueError', 'VdtValueTooSmallError', 'VdtValueTooBigError', 'VdtValueTooShortError', 'VdtValueTooLongError', 'VdtMissingValue', 'Validator', 'is_integer', 'is_float', 'is_boolean', 'is_list', 'is_tuple', 'is_ip_addr', 'is_string', 'is_int_list', 'is_bool_list', 'is_float_list', 'is_string_list', 'is_ip_addr_list', 'is_mixed_list', 'is_option', '__docformat__', ) import re import sys PY3K = sys.version_info[0] > 2 if PY3K: string_types = str number_types = (int, float) int_or_string_types = (int, str) number_or_string_types = (int, float, str) long = int else: string_types = basestring number_types = (int, long, float) int_or_string_types = (int, long, basestring) number_or_string_types = (int, long, float, basestring) _list_arg = re.compile(r''' (?: ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\( ( (?: \s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted ) \s*,\s* )* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted )? # last one ) \) ) ''', re.VERBOSE | re.DOTALL) # two groups _list_members = re.compile(r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?) # unquoted ) (?: (?:\s*,\s*)|(?:\s*$) # comma ) ''', re.VERBOSE | re.DOTALL) # one group _paramstring = r''' (?: ( (?: [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\( (?: \s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted ) \s*,\s* )* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted )? # last one \) )| (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?)| # unquoted (?: # keyword argument [a-zA-Z_][a-zA-Z0-9_]*\s*=\s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?) # unquoted ) ) ) ) (?: (?:\s*,\s*)|(?:\s*$) # comma ) ) ''' _matchstring = '^%s*' % _paramstring # Python pre 2.2.1 doesn't have bool try: bool except NameError: def bool(val): """Simple boolean equivalent function. """ if val: return 1 else: return 0 def dottedQuadToNum(ip): """ Convert decimal dotted quad string to long integer >>> int(dottedQuadToNum('1 ')) 1 >>> int(dottedQuadToNum(' 1.2')) 16777218 >>> int(dottedQuadToNum(' 1.2.3 ')) 16908291 >>> int(dottedQuadToNum('1.2.3.4')) 16909060 >>> dottedQuadToNum('255.255.255.255') 4294967295 >>> dottedQuadToNum('255.255.255.256') Traceback (most recent call last): ValueError: Not a good dotted-quad IP: 255.255.255.256 """ # import here to avoid it when ip_addr values are not used import socket, struct try: return struct.unpack('!L', socket.inet_aton(ip.strip()))[0] except socket.error: raise ValueError('Not a good dotted-quad IP: %s' % ip) return def numToDottedQuad(num): """ Convert long int to dotted quad string >>> numToDottedQuad(long(-1)) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(long(1)) '0.0.0.1' >>> numToDottedQuad(long(16777218)) '1.0.0.2' >>> numToDottedQuad(long(16908291)) '1.2.0.3' >>> numToDottedQuad(long(16909060)) '1.2.3.4' >>> numToDottedQuad(long(4294967295)) '255.255.255.255' >>> numToDottedQuad(long(4294967296)) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296 """ # import here to avoid it when ip_addr values are not used import socket, struct # no need to intercept here, 4294967295 is fine if num > 4294967295 or num < 0: raise ValueError('Not a good numeric IP: %s' % num) try: return socket.inet_ntoa( struct.pack('!L', long(num))) except (socket.error, struct.error, OverflowError): raise ValueError('Not a good numeric IP: %s' % num) class ValidateError(Exception): """ This error indicates that the check failed. It can be the base class for more specific errors. Any check function that fails ought to raise this error. (or a subclass) >>> raise ValidateError Traceback (most recent call last): ValidateError """ class VdtMissingValue(ValidateError): """No value was supplied to a check that needed one.""" class VdtUnknownCheckError(ValidateError): """An unknown check function was requested""" def __init__(self, value): """ >>> raise VdtUnknownCheckError('yoda') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. """ ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,)) class VdtParamError(SyntaxError): """An incorrect parameter was passed""" def __init__(self, name, value): """ >>> raise VdtParamError('yoda', 'jedi') Traceback (most recent call last): VdtParamError: passed an incorrect value "jedi" for parameter "yoda". """ SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name)) class VdtTypeError(ValidateError): """The value supplied was of the wrong type""" def __init__(self, value): """ >>> raise VdtTypeError('jedi') Traceback (most recent call last): VdtTypeError: the value "jedi" is of the wrong type. """ ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,)) class VdtValueError(ValidateError): """The value supplied was of the correct type, but was not an allowed value.""" def __init__(self, value): """ >>> raise VdtValueError('jedi') Traceback (most recent call last): VdtValueError: the value "jedi" is unacceptable. """ ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,)) class VdtValueTooSmallError(VdtValueError): """The value supplied was of the correct type, but was too small.""" def __init__(self, value): """ >>> raise VdtValueTooSmallError('0') Traceback (most recent call last): VdtValueTooSmallError: the value "0" is too small. """ ValidateError.__init__(self, 'the value "%s" is too small.' % (value,)) class VdtValueTooBigError(VdtValueError): """The value supplied was of the correct type, but was too big.""" def __init__(self, value): """ >>> raise VdtValueTooBigError('1') Traceback (most recent call last): VdtValueTooBigError: the value "1" is too big. """ ValidateError.__init__(self, 'the value "%s" is too big.' % (value,)) class VdtValueTooShortError(VdtValueError): """The value supplied was of the correct type, but was too short.""" def __init__(self, value): """ >>> raise VdtValueTooShortError('jed') Traceback (most recent call last): VdtValueTooShortError: the value "jed" is too short. """ ValidateError.__init__( self, 'the value "%s" is too short.' % (value,)) class VdtValueTooLongError(VdtValueError): """The value supplied was of the correct type, but was too long.""" def __init__(self, value): """ >>> raise VdtValueTooLongError('jedie') Traceback (most recent call last): VdtValueTooLongError: the value "jedie" is too long. """ ValidateError.__init__(self, 'the value "%s" is too long.' % (value,)) class Validator(object): """ Validator is an object that allows you to register a set of 'checks'. These checks take input and test that it conforms to the check. This can also involve converting the value from a string into the correct datatype. The ``check`` method takes an input string which configures which check is to be used and applies that check to a supplied value. An example input string would be: 'int_range(param1, param2)' You would then provide something like: >>> def int_range_check(value, min, max): ... # turn min and max from strings to integers ... min = int(min) ... max = int(max) ... # check that value is of the correct type. ... # possible valid inputs are integers or strings ... # that represent integers ... if not isinstance(value, (int, str)): ... raise VdtTypeError(value) ... elif isinstance(value, str): ... # if we are given a string ... # attempt to convert to an integer ... try: ... value = int(value) ... except ValueError: ... raise VdtValueError(value) ... # check the value is between our constraints ... if not min <= value: ... raise VdtValueTooSmallError(value) ... if not value <= max: ... raise VdtValueTooBigError(value) ... return value >>> fdict = {'int_range': int_range_check} >>> vtr1 = Validator(fdict) >>> vtr1.check('int_range(20, 40)', '30') 30 >>> vtr1.check('int_range(20, 40)', '60') Traceback (most recent call last): VdtValueTooBigError: the value "60" is too big. New functions can be added with : :: >>> vtr2 = Validator() >>> vtr2.functions['int_range'] = int_range_check Or by passing in a dictionary of functions when Validator is instantiated. Your functions *can* use keyword arguments, but the first argument should always be 'value'. If the function doesn't take additional arguments, the parentheses are optional in the check. It can be written with either of : :: keyword = function_name keyword = function_name() The first program to utilise Validator() was Michael Foord's ConfigObj, an alternative to ConfigParser which supports lists and can validate a config file using a config schema. For more details on using Validator with ConfigObj see: http://www.voidspace.org.uk/python/configobj.html """ # this regex does the initial parsing of the checks _func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL) # this regex takes apart keyword arguments _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL) # this regex finds keyword=list(....) type values _list_arg = _list_arg # this regex takes individual values out of lists - in one pass _list_members = _list_members # These regexes check a set of arguments for validity # and then pull the members out _paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL) _matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL) def __init__(self, functions=None): """ >>> vtri = Validator() """ self.functions = { '': self._pass, 'integer': is_integer, 'float': is_float, 'boolean': is_boolean, 'ip_addr': is_ip_addr, 'string': is_string, 'list': is_list, 'tuple': is_tuple, 'int_list': is_int_list, 'float_list': is_float_list, 'bool_list': is_bool_list, 'ip_addr_list': is_ip_addr_list, 'string_list': is_string_list, 'mixed_list': is_mixed_list, 'pass': self._pass, 'option': is_option, 'force_list': force_list, } if functions is not None: self.functions.update(functions) # tekNico: for use by ConfigObj self.baseErrorClass = ValidateError self._cache = {} def check(self, check, value, missing=False): """ Usage: check(check, value) Arguments: check: string representing check to apply (including arguments) value: object to be checked Returns value, converted to correct type if necessary If the check fails, raises a ``ValidateError`` subclass. >>> vtor.check('yoda', '') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('yoda()', '') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('string(default="")', '', missing=True) '' """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if missing: if default is None: # no information needed here - to be handled by caller raise VdtMissingValue() value = self._handle_none(default) if value is None: return None return self._check_value(value, fun_name, fun_args, fun_kwargs) def _handle_none(self, value): if value == 'None': value = None elif value in ("'None'", '"None"'): # Special case a quoted None value = self._unquote(value) return value def _parse_with_caching(self, check): if check in self._cache: fun_name, fun_args, fun_kwargs, default = self._cache[check] # We call list and dict below to work with *copies* of the data # rather than the original (which are mutable of course) fun_args = list(fun_args) fun_kwargs = dict(fun_kwargs) else: fun_name, fun_args, fun_kwargs, default = self._parse_check(check) fun_kwargs = dict([(str(key), value) for (key, value) in fun_kwargs.items()]) self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default return fun_name, fun_args, fun_kwargs, default def _check_value(self, value, fun_name, fun_args, fun_kwargs): try: fun = self.functions[fun_name] except KeyError: raise VdtUnknownCheckError(fun_name) else: return fun(value, *fun_args, **fun_kwargs) def _parse_check(self, check): fun_match = self._func_re.match(check) if fun_match: fun_name = fun_match.group(1) arg_string = fun_match.group(2) arg_match = self._matchfinder.match(arg_string) if arg_match is None: # Bad syntax raise VdtParamError('Bad syntax in check "%s".' % check) fun_args = [] fun_kwargs = {} # pull out args of group 2 for arg in self._paramfinder.findall(arg_string): # args may need whitespace removing (before removing quotes) arg = arg.strip() listmatch = self._list_arg.match(arg) if listmatch: key, val = self._list_handle(listmatch) fun_kwargs[key] = val continue keymatch = self._key_arg.match(arg) if keymatch: val = keymatch.group(2) if not val in ("'None'", '"None"'): # Special case a quoted None val = self._unquote(val) fun_kwargs[keymatch.group(1)] = val continue fun_args.append(self._unquote(arg)) else: # allows for function names without (args) return check, (), {}, None # Default must be deleted if the value is specified too, # otherwise the check function will get a spurious "default" keyword arg try: default = fun_kwargs.pop('default', None) except AttributeError: # Python 2.2 compatibility default = None try: default = fun_kwargs['default'] del fun_kwargs['default'] except KeyError: pass return fun_name, fun_args, fun_kwargs, default def _unquote(self, val): """Unquote a value if necessary.""" if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]): val = val[1:-1] return val def _list_handle(self, listmatch): """Take apart a ``keyword=list('val, 'val')`` type string.""" out = [] name = listmatch.group(1) args = listmatch.group(2) for arg in self._list_members.findall(args): out.append(self._unquote(arg)) return name, out def _pass(self, value): """ Dummy check that always passes >>> vtor.check('', 0) 0 >>> vtor.check('', '0') '0' """ return value def get_default_value(self, check): """ Given a check, return the default value for the check (converted to the right type). If the check doesn't specify a default value then a ``KeyError`` will be raised. """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if default is None: raise KeyError('Check "%s" has no default value.' % check) value = self._handle_none(default) if value is None: return value return self._check_value(value, fun_name, fun_args, fun_kwargs) def _is_num_param(names, values, to_float=False): """ Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a". """ fun = to_float and float or int out_params = [] for (name, val) in zip(names, values): if val is None: out_params.append(val) elif isinstance(val, number_or_string_types): try: out_params.append(fun(val)) except ValueError: raise VdtParamError(name, val) else: raise VdtParamError(name, val) return out_params # built in checks # you can override these by setting the appropriate name # in Validator.functions # note: if the params are specified wrongly in your input string, # you will also raise errors. def is_integer(value, min=None, max=None): """ A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0 """ (min_val, max_val) = _is_num_param(('min', 'max'), (min, max)) if not isinstance(value, int_or_string_types): raise VdtTypeError(value) if isinstance(value, string_types): # if it's a string - does it represent an integer ? try: value = int(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value def is_float(value, min=None, max=None): """ A check that tests that a given value is a float (an integer will be accepted), and optionally - that it is between bounds. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. This can accept negative values. >>> vtor.check('float', '2') 2.0 From now on we multiply the value to avoid comparing decimals >>> vtor.check('float', '-6.8') * 10 -68.0 >>> vtor.check('float', '12.2') * 10 122.0 >>> vtor.check('float', 8.4) * 10 84.0 >>> vtor.check('float', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('float(10.1)', '10.2') * 10 102.0 >>> vtor.check('float(max=20.2)', '15.1') * 10 151.0 >>> vtor.check('float(10.0)', '9.0') Traceback (most recent call last): VdtValueTooSmallError: the value "9.0" is too small. >>> vtor.check('float(max=20.0)', '35.0') Traceback (most recent call last): VdtValueTooBigError: the value "35.0" is too big. """ (min_val, max_val) = _is_num_param( ('min', 'max'), (min, max), to_float=True) if not isinstance(value, number_or_string_types): raise VdtTypeError(value) if not isinstance(value, float): # if it's a string - does it represent a float ? try: value = float(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value bool_dict = { True: True, 'on': True, '1': True, 'true': True, 'yes': True, False: False, 'off': False, '0': False, 'false': False, 'no': False, } def is_boolean(value): """ Check if the value represents a boolean. >>> vtor.check('boolean', 0) 0 >>> vtor.check('boolean', False) 0 >>> vtor.check('boolean', '0') 0 >>> vtor.check('boolean', 'off') 0 >>> vtor.check('boolean', 'false') 0 >>> vtor.check('boolean', 'no') 0 >>> vtor.check('boolean', 'nO') 0 >>> vtor.check('boolean', 'NO') 0 >>> vtor.check('boolean', 1) 1 >>> vtor.check('boolean', True) 1 >>> vtor.check('boolean', '1') 1 >>> vtor.check('boolean', 'on') 1 >>> vtor.check('boolean', 'true') 1 >>> vtor.check('boolean', 'yes') 1 >>> vtor.check('boolean', 'Yes') 1 >>> vtor.check('boolean', 'YES') 1 >>> vtor.check('boolean', '') Traceback (most recent call last): VdtTypeError: the value "" is of the wrong type. >>> vtor.check('boolean', 'up') Traceback (most recent call last): VdtTypeError: the value "up" is of the wrong type. """ if isinstance(value, string_types): try: return bool_dict[value.lower()] except KeyError: raise VdtTypeError(value) # we do an equality test rather than an identity test # this ensures Python 2.2 compatibilty # and allows 0 and 1 to represent True and False if value == False: return False elif value == True: return True else: raise VdtTypeError(value) def is_ip_addr(value): """ Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, string_types): raise VdtTypeError(value) value = value.strip() try: dottedQuadToNum(value) except ValueError: raise VdtValueError(value) return value def is_list(value, min=None, max=None): """ Check that the value is a list of values. You can optionally specify the minimum and maximum number of members. It does no check on list members. >>> vtor.check('list', ()) [] >>> vtor.check('list', []) [] >>> vtor.check('list', (1, 2)) [1, 2] >>> vtor.check('list', [1, 2]) [1, 2] >>> vtor.check('list(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) [1, 2, 3, 4] >>> vtor.check('list', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('list', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) if isinstance(value, string_types): raise VdtTypeError(value) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return list(value) def is_tuple(value, min=None, max=None): """ Check that the value is a tuple of values. You can optionally specify the minimum and maximum number of members. It does no check on members. >>> vtor.check('tuple', ()) () >>> vtor.check('tuple', []) () >>> vtor.check('tuple', (1, 2)) (1, 2) >>> vtor.check('tuple', [1, 2]) (1, 2) >>> vtor.check('tuple(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4)) (1, 2, 3, 4) >>> vtor.check('tuple', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('tuple', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ return tuple(is_list(value, min, max)) def is_string(value, min=None, max=None): """ Check that the supplied value is a string. You can optionally specify the minimum and maximum number of members. >>> vtor.check('string', '0') '0' >>> vtor.check('string', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('string(2)', '12') '12' >>> vtor.check('string(2)', '1') Traceback (most recent call last): VdtValueTooShortError: the value "1" is too short. >>> vtor.check('string(min=2, max=3)', '123') '123' >>> vtor.check('string(min=2, max=3)', '1234') Traceback (most recent call last): VdtValueTooLongError: the value "1234" is too long. """ if not isinstance(value, string_types): raise VdtTypeError(value) (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return value def is_int_list(value, min=None, max=None): """ Check that the value is a list of integers. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an integer. >>> vtor.check('int_list', ()) [] >>> vtor.check('int_list', []) [] >>> vtor.check('int_list', (1, 2)) [1, 2] >>> vtor.check('int_list', [1, 2]) [1, 2] >>> vtor.check('int_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_integer(mem) for mem in is_list(value, min, max)] def is_bool_list(value, min=None, max=None): """ Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_boolean(mem) for mem in is_list(value, min, max)] def is_float_list(value, min=None, max=None): """ Check that the value is a list of floats. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a float. >>> vtor.check('float_list', ()) [] >>> vtor.check('float_list', []) [] >>> vtor.check('float_list', (1, 2.0)) [1.0, 2.0] >>> vtor.check('float_list', [1, 2.0]) [1.0, 2.0] >>> vtor.check('float_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_float(mem) for mem in is_list(value, min, max)] def is_string_list(value, min=None, max=None): """ Check that the value is a list of strings. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a string. >>> vtor.check('string_list', ()) [] >>> vtor.check('string_list', []) [] >>> vtor.check('string_list', ('a', 'b')) ['a', 'b'] >>> vtor.check('string_list', ['a', 1]) Traceback (most recent call last): VdtTypeError: the value "1" is of the wrong type. >>> vtor.check('string_list', 'hello') Traceback (most recent call last): VdtTypeError: the value "hello" is of the wrong type. """ if isinstance(value, string_types): raise VdtTypeError(value) return [is_string(mem) for mem in is_list(value, min, max)] def is_ip_addr_list(value, min=None, max=None): """ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. """ return [is_ip_addr(mem) for mem in is_list(value, min, max)] def force_list(value, min=None, max=None): """ Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello'] """ if not isinstance(value, (list, tuple)): value = [value] return is_list(value, min, max) fun_dict = { 'integer': is_integer, 'float': is_float, 'ip_addr': is_ip_addr, 'string': is_string, 'boolean': is_boolean, } def is_mixed_list(value, *args): """ Check that the value is a list. Allow specifying the type of each member. Work on lists of specific lengths. You specify each member as a positional argument specifying type Each type should be one of the following strings : 'integer', 'float', 'ip_addr', 'string', 'boolean' So you can specify a list of two strings, followed by two integers as : mixed_list('string', 'string', 'integer', 'integer') The length of the list must match the number of positional arguments you supply. >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) Traceback (most recent call last): VdtTypeError: the value "b" is of the wrong type. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. >>> vtor.check(mix_str, 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. This test requires an elaborate setup, because of a change in error string output from the interpreter between Python 2.2 and 2.3 . >>> res_seq = ( ... 'passed an incorrect value "', ... 'yoda', ... '" for parameter "mixed_list".', ... ) >>> res_str = "'".join(res_seq) >>> try: ... vtor.check('mixed_list("yoda")', ('a')) ... except VdtParamError as err: ... str(err) == res_str 1 """ try: length = len(value) except TypeError: raise VdtTypeError(value) if length < len(args): raise VdtValueTooShortError(value) elif length > len(args): raise VdtValueTooLongError(value) try: return [fun_dict[arg](val) for arg, val in zip(args, value)] except KeyError as e: raise(VdtParamError('mixed_list', e)) def is_option(value, *options): """ This check matches the value to any of a set of options. >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, string_types): raise VdtTypeError(value) if not value in options: raise VdtValueError(value) return value def _test(value, *args, **keywargs): """ A function that exists for test purposes. >>> checks = [ ... '3, 6, min=1, max=3, test=list(a, b, c)', ... '3', ... '3, 6', ... '3,', ... 'min=1, test="a b c"', ... 'min=5, test="a, b, c"', ... 'min=1, max=3, test="a, b, c"', ... 'min=-100, test=-99', ... 'min=1, max=3', ... '3, 6, test="36"', ... '3, 6, test="a, b, c"', ... '3, max=3, test=list("a", "b", "c")', ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''', ... "test='x=fish(3)'", ... ] >>> v = Validator({'test': _test}) >>> for entry in checks: ... print(v.check(('test(%s)' % entry), 3)) (3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'}) (3, ('3',), {}) (3, ('3', '6'), {}) (3, ('3',), {}) (3, (), {'test': 'a b c', 'min': '1'}) (3, (), {'test': 'a, b, c', 'min': '5'}) (3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'}) (3, (), {'test': '-99', 'min': '-100'}) (3, (), {'max': '3', 'min': '1'}) (3, ('3', '6'), {'test': '36'}) (3, ('3', '6'), {'test': 'a, b, c'}) (3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'}) (3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'}) (3, (), {'test': 'x=fish(3)'}) >>> v = Validator() >>> v.check('integer(default=6)', '3') 3 >>> v.check('integer(default=6)', None, True) 6 >>> v.get_default_value('integer(default=6)') 6 >>> v.get_default_value('float(default=6)') 6.0 >>> v.get_default_value('pass(default=None)') >>> v.get_default_value("string(default='None')") 'None' >>> v.get_default_value('pass') Traceback (most recent call last): KeyError: 'Check "pass" has no default value.' >>> v.get_default_value('pass(default=list(1, 2, 3, 4))') ['1', '2', '3', '4'] >>> v = Validator() >>> v.check("pass(default=None)", None, True) >>> v.check("pass(default='None')", None, True) 'None' >>> v.check('pass(default="None")', None, True) 'None' >>> v.check('pass(default=list(1, 2, 3, 4))', None, True) ['1', '2', '3', '4'] >>> v = Validator() >>> default = v.get_default_value('string(default=None)') >>> default is None True """ return (value, args, keywargs) def _test2(): """ >>> >>> v = Validator() >>> v.get_default_value('string(default="#ff00dd")') '#ff00dd' >>> v.get_default_value('integer(default=3) # comment') 3 """ def _test3(): r""" >>> vtor.check('string(default="")', '', missing=True) '' >>> vtor.check('string(default="\n")', '', missing=True) '\n' >>> print(vtor.check('string(default="\n")', '', missing=True), end='') >>> vtor.check('string()', '\n') '\n' >>> vtor.check('string(default="\n\n\n")', '', missing=True) '\n\n\n' >>> vtor.check('string()', 'random \n text goes here\n\n') 'random \n text goes here\n\n' >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")', ... '', missing=True) ' \nrandom text\ngoes \n here\n\n ' >>> vtor.check("string(default='\n\n\n')", '', missing=True) '\n\n\n' >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True) '\n' >>> vtor.check("string_list()", ['foo', '\n', 'bar']) ['foo', '\n', 'bar'] >>> vtor.check("string_list(default=list('\n'))", '', missing=True) ['\n'] """ if __name__ == '__main__': # run the code tests in doctest format import sys import doctest m = sys.modules.get('__main__') globs = m.__dict__.copy() globs.update({ 'vtor': Validator(), }) doctest.testmod(m, globs=globs) stsci.tools-3.4.12/lib/stsci/tools/version.py0000644001120100020070000000142013241171572022657 0ustar jhunkSTSCI\science00000000000000 # AUTOMATICALLY GENERATED BY 'RELIC': # * DO NOT EDIT THIS MODULE MANUALLY. # * DO NOT COMMIT THIS MODULE TO YOUR GIT REPOSITORY __all__ = [ '__version__', '__version_short__', '__version_long__', '__version_post__', '__version_commit__', '__version_date__', '__version_dirty__', '__build_date__', '__build_time__', '__build_status__' ] __version__ = '3.4.12' __version_short__ = '3.4.12' __version_long__ = '3.4.12-0-gfd82437d' __version_post__ = '0' __version_commit__ = 'fd82437d' __version_date__ = '2018-02-07 16:36:45 -0500' __version_dirty__ = False __build_date__ = '2018-02-14' __build_time__ = '21:42:02.379382' __build_status__ = 'release' if not int(__version_post__) > 0 \ and not __version_dirty__ \ else 'development' stsci.tools-3.4.12/lib/stsci/tools/versioninfo.py0000644001120100020070000000460013006721301023524 0ustar jhunkSTSCI\science00000000000000# Program: versionInfo.py # Author: Christopher Hanley # # License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE # # Date: 19 January 2004 # Purpose: # To print a user's system information when providing user support. # # Version: # Version 0.1.0, 19-Jan-04: Program created. -- CJH # Version 0.1.1, 20-Jan-04: Modified program to # loop over a taskList object. -- CJH # # Version 0.2.0, 31-Mar-06: Added numpy to the task list. -- CJH from __future__ import division, print_function # confidence high __version__ = '0.2.0' def printVersionInfo(): # Print the current path information try: print("Path information:") print("-----------------") import sys print(sys.path) print(" ") except: print("Unable to get sys information.") print(" ") # Define the list of tasks to test taskList = [ 'numarray', 'numpy', 'Numeric', 'pyfits', 'pyraf', 'multidrizzle', 'pydrizzle', 'stsci.tools', 'calcos', 'convolve', 'image', 'imagemanip', 'imagestats', 'ndimage' ] # Test the list of software tasks for software in taskList: print(software+":") print("-----------") try: package = __import__(software) try: print("version -> ",package.__version__) except: print("__version__ attribute is not defined") try: print("SVN version -> ",package.__svn_version__) except: print("__svn_version__ attribute is not defined") try: pathName = package.__path__ except: pathName = package.__file__ print("location -> ",pathName) except: print(software+" not found in path...") print(" ") # Print instruction message. print("PLEASE PASTE THE OUTPUT FROM THIS TASK ") print("INTO AN E-MAIL MESSAGE AND SEND IT WITH") print("YOUR PROBLEM DESCRIPTION TO SSB!") print(" ") print("SUPPORT ADDRESS: help@stsci.edu ") if __name__ == '__main__': printVersionInfo() stsci.tools-3.4.12/lib/stsci/tools/vtor_checks.py0000755001120100020070000001277113006721301023510 0ustar jhunkSTSCI\science00000000000000#!/usr/local/bin/python """ This file holds our own over-rides for the standard Validator check functions. We over-ride them so that we may add our own special keywords to them in the config_spec. $Id$ """ from __future__ import absolute_import, division, print_function # confidence high from . import configobj, validate from . import irafutils STANDARD_KEYS = ['min', 'max', 'missing', 'default'] OVCDBG = False def sigStrToKwArgsDict(checkFuncSig): """ Take a check function signature (string), and parse it to get a dict of the keyword args and their values. """ p1 = checkFuncSig.find('(') p2 = checkFuncSig.rfind(')') assert p1 > 0 and p2 > 0 and p2 > p1, "Invalid signature: "+checkFuncSig argParts = irafutils.csvSplit(checkFuncSig[p1+1:p2], ',', True) argParts = [x.strip() for x in argParts] retval = {} for argPair in argParts: argSpl = argPair.split('=', 1) if len(argSpl) > 1: if argSpl[0] in retval: if isinstance(retval[argSpl[0]], (list,tuple)): retval[argSpl[0]]+=(irafutils.stripQuotes(argSpl[1]),) # 3rd else: # 2nd in, so convert to tuple retval[argSpl[0]] = (retval[argSpl[0]], irafutils.stripQuotes(argSpl[1]),) else: retval[argSpl[0]] = irafutils.stripQuotes(argSpl[1]) # 1st in else: retval[argSpl[0]] = None # eg. found "triggers=, max=6, ..." return retval def separateKeywords(kwArgsDict): """ Look through the keywords passed and separate the special ones we have added from the legal/standard ones. Return both sets as two dicts (in a tuple), as (standardKws, ourKws) """ standardKws = {} ourKws = {} for k in kwArgsDict: if k in STANDARD_KEYS: standardKws[k]=kwArgsDict[k] else: ourKws[k]=kwArgsDict[k] return (standardKws, ourKws) def addKwdArgsToSig(sigStr, kwArgsDict): """ Alter the passed function signature string to add the given kewords """ retval = sigStr if len(kwArgsDict) > 0: retval = retval.strip(' ,)') # open up the r.h.s. for more args for k in kwArgsDict: if retval[-1] != '(': retval += ", " retval += str(k)+"="+str(kwArgsDict[k]) retval += ')' retval = retval return retval def boolean_check_kw(val, *args, **kw): if OVCDBG: print("boolean_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw)) vtor = validate.Validator() checkFuncStr = "boolean"+str(tuple(args)) checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0]) if OVCDBG: print("CFS: "+checkFuncStr+'\n') return vtor.check(checkFuncStr, val) def option_check_kw(val, *args, **kw): if OVCDBG: print("option_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw)) vtor = validate.Validator() checkFuncStr = "option"+str(tuple(args)) checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0]) if OVCDBG: print("CFS: "+checkFuncStr+'\n') return vtor.check(checkFuncStr, val) def integer_check_kw(val, *args, **kw): if OVCDBG: print("integer_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw)) vtor = validate.Validator() checkFuncStr = "integer"+str(tuple(args)) checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0]) if OVCDBG: print("CFS: "+checkFuncStr+'\n') return vtor.check(checkFuncStr, val) def integer_or_none_check_kw(val, *args, **kw): if OVCDBG: print("integer_or_none_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw)) if val in (None,'','None','NONE','INDEF'): return None # only difference vtor = validate.Validator() checkFuncStr = "integer"+str(tuple(args)) checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0]) if OVCDBG: print("CFS: "+checkFuncStr+'\n') return vtor.check(checkFuncStr, val) def float_check_kw(val, *args, **kw): if OVCDBG: print("float_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw)) vtor = validate.Validator() checkFuncStr = "float"+str(tuple(args)) checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0]) if OVCDBG: print("CFS: "+checkFuncStr+'\n') return vtor.check(checkFuncStr, val) def float_or_none_check_kw(val, *args, **kw): if OVCDBG: print("float_or_none_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw)) if val in (None,'','None','NONE','INDEF'): return None # only difference vtor = validate.Validator() checkFuncStr = "float"+str(tuple(args)) checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0]) if OVCDBG: print("CFS: "+checkFuncStr+'\n') return vtor.check(checkFuncStr, val) def string_check_kw(val, *args, **kw): if OVCDBG: print("string_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw)) vtor = validate.Validator() checkFuncStr = "string"+str(tuple(args)) checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0]) if OVCDBG: print("CFS: "+checkFuncStr+'\n') return vtor.check(checkFuncStr, val) FUNC_DICT = {'boolean_kw': boolean_check_kw, 'option_kw': option_check_kw, 'integer_kw': integer_check_kw, 'integer_or_none_kw': integer_or_none_check_kw, 'float_kw': float_check_kw, 'float_or_none_kw': float_or_none_check_kw, 'string_kw': string_check_kw, 'action_kw': string_check_kw } stsci.tools-3.4.12/lib/stsci/tools/wcsutil.py0000644001120100020070000012142213112074217022664 0ustar jhunkSTSCI\science00000000000000from __future__ import absolute_import, division, print_function # confidence high import copy, os from astropy.io import fits import numpy as N from math import * from . import fileutil # Convenience definitions... yes = True no = False DEGTORAD = fileutil.DEGTORAD RADTODEG = fileutil.RADTODEG DIVMOD = fileutil.DIVMOD DEFAULT_PREFIX = 'O' # # History # # 30-Mar-2002 WJH: Added separate XYtoSky interface function. # 19-Apr-2002 WJH: Corrected 'ddtohms' for error in converting neg. dec. # 20-Sept-2002 WJH: replaced all references to 'keypar' with calls to 'hselect' # This avoids any parameter writes in the pipeline. # 03-Dec-2002 WJH: Added 'new' parameter to WCSObject to make creating an # object from scratch unambiguous and free from filename # collisions with user files. # 23-Apr-2003 WJH: Enhanced to search entire file for header with WCS keywords # if no extension was specified with filename. # 6-Oct-2003 WJH: Modified to use the 'fileutil.getHeader' function or # accept a PyFITS/readgeis header object. Removed # any explicit check on whether the image was FITS or # not. # 5-Feb-2004 WJH: Added 'recenter' method to rigorously shift the WCS from # an off-center reference pixel position to the frame center # # 24-Jan-2005 WJH: Added methods and attributes for working with archived # versions of WCS keywords. Archived keywords will be # treated as 'read-only' if they already exist, unless # specifically overwritten. # # 30-Mar-2005 WJH: 'read_archive' needed to be modified to use existing prefix # found in header, if one exists, for computing archive pscale. # # 20-Jun-2005 WJH: Support for constant-value arrays using NPIX/PIXVALUE added to # class. The output reference WCS now creates a constant-value # array for the extension as well in order to be FITS compliant. # WCS keywords now get written out in a set order to be FITS compliant. # New method, get_orient, added to always allow access to computed # orientation regardless of orientat keyword value. # # 29-June-2005 WJH: Multiple WCS extensions are not created when running # 'createReferenceWCS'. # __version__ = '1.2.3 (11-Feb-2011)' def help(): print('wcsutil Version '+str(__version__)+':\n') print(WCSObject.__doc__) ################# # # # Coordinate Transformation Functions # # ################# def ddtohms(xsky,ysky,verbose=no): """ Convert sky position(s) from decimal degrees to HMS format.""" xskyh = xsky /15. xskym = (xskyh - N.floor(xskyh)) * 60. xskys = (xskym - N.floor(xskym)) * 60. yskym = (N.abs(ysky) - N.floor(N.abs(ysky))) * 60. yskys = (yskym - N.floor(yskym)) * 60. if isinstance(xskyh,N.ndarray): rah,dech = [],[] for i in range(len(xskyh)): rastr = repr(int(xskyh[i]))+':'+repr(int(xskym[i]))+':'+repr(xskys[i]) decstr = repr(int(ysky[i]))+':'+repr(int(yskym[i]))+':'+repr(yskys[i]) rah.append(rastr) dech.append(decstr) if verbose: print('RA = ',rastr,', Dec = ',decstr) else: rastr = repr(int(xskyh))+':'+repr(int(xskym))+':'+repr(xskys) decstr = repr(int(ysky))+':'+repr(int(yskym))+':'+repr(yskys) rah = rastr dech = decstr if verbose: print('RA = ',rastr,', Dec = ',decstr) return rah,dech def troll(roll, dec, v2, v3): """ Computes the roll angle at the target position based on:: the roll angle at the V1 axis(roll), the dec of the target(dec), and the V2/V3 position of the aperture (v2,v3) in arcseconds. Based on the algorithm provided by Colin Cox that is used in Generic Conversion at STScI. """ # Convert all angles to radians _roll = DEGTORAD(roll) _dec = DEGTORAD(dec) _v2 = DEGTORAD(v2 / 3600.) _v3 = DEGTORAD(v3 / 3600.) # compute components sin_rho = sqrt((pow(sin(_v2),2)+pow(sin(_v3),2)) - (pow(sin(_v2),2)*pow(sin(_v3),2))) rho = asin(sin_rho) beta = asin(sin(_v3)/sin_rho) if _v2 < 0: beta = pi - beta gamma = asin(sin(_v2)/sin_rho) if _v3 < 0: gamma = pi - gamma A = pi/2. + _roll - beta B = atan2( sin(A)*cos(_dec), (sin(_dec)*sin_rho - cos(_dec)*cos(rho)*cos(A))) # compute final value troll = RADTODEG(pi - (gamma+B)) return troll ################# # # # Coordinate System Class # # ################# class WCSObject: """ This class should contain the WCS information from the input exposure's header and provide conversion functionality from pixels to RA/Dec and back. :Syntax: The basic syntax for using this object is:: >>> wcs = wcsutil.WCSObject(rootname,header=None,shape=None, >>> pa_key='PA_V3',new=no,prefix=None) This will create a WCSObject which provides basic WCS functions. Parameters ========== rootname: string filename in a format supported by IRAF, specifically:: filename.hhh[group] -or- filename.fits[ext] -or- filename.fits[extname,extver] header: object PyFITS header object from which WCS keywords can be read shape: tuple tuple giving (nx,ny,pscale) pa_key: string name of keyword to read in telescopy orientation new: boolean specify a new object rather than creating one by reading in keywords from an existing image prefix: string string to use as prefix for creating archived versions of WCS keywords, if such keywords do not already exist Notes ====== Setting 'new=yes' will create a WCSObject from scratch regardless of any input rootname. This avoids unexpected filename collisions. Methods ======= print_archive(format=True) print out archive keyword values get_archivekw(keyword) return archived value for WCS keyword set_pscale() set pscale attribute for object compute_pscale(cd11,cd21) compute pscale value get_orient() return orient computed from CD matrix updateWCS(pixel_scale=None,orient=None,refpos=None,refval=None,size=None) reset entire WCS based on given values xy2rd(pos) compute RA/Dec position for given (x,y) tuple rd2xy(skypos,hour=no) compute X,Y position for given (RA,Dec) rotateCD(orient) rotate CD matrix to new orientation given by 'orient' recenter() Reset reference position to X,Y center of frame write(fitsname=None,archive=True,overwrite=False,quiet=True) write out values of WCS to specified file restore() reset WCS keyword values to those from archived values read_archive(header,prepend=None) read any archive WCS keywords from PyFITS header archive(prepend=None,overwrite=no,quiet=yes) create archived copies of WCS keywords. write_archive(fitsname=None,overwrite=no,quiet=yes) write out the archived WCS values to the file restoreWCS(prepend=None) resets WCS values in file to original values createReferenceWCS(refname,overwrite=yes) write out values of WCS keywords to NEW FITS file without any image data copy(deep=True) create a copy of the WCSObject. help() prints out this help message """ def __init__(self, rootname,header=None,shape=None,pa_key='PA_V3',new=no,prefix=None): # Initialize wcs dictionaries: # wcsdef - default values for new images # wcstrans - translation table from header keyword to attribute # wcskeys - keywords in the order they should appear in the header self.wcsdef = {'crpix1':0.0,'crpix2':0.0,'crval1':0.0,'crval2':0.0,'cd11':1.0, 'cd12':1.0,'cd21':1.0,'cd22':1.0,'orient':1.0,'naxis1':0,'naxis2':0,'pscale':1.0, 'postarg1':0.0,'postarg2':0.0,'pa_obs':0.0, 'ctype1':'RA---TAN','ctype2':'DEC--TAN'} self.wcstrans = {'CRPIX1':'crpix1','CRPIX2':'crpix2','CRVAL1':'crval1','CRVAL2':'crval2', 'CD1_1':'cd11','CD1_2':'cd12','CD2_1':'cd21','CD2_2':'cd22', 'ORIENTAT':'orient', 'NAXIS1':'naxis1','NAXIS2':'naxis2', 'pixel scale':'pscale','CTYPE1':'ctype1','CTYPE2':'ctype2'} self.wcskeys = ['NAXIS1','NAXIS2','CRPIX1','CRPIX2', 'CRVAL1','CRVAL2','CTYPE1','CTYPE2', 'CD1_1','CD1_2','CD2_1','CD2_2', 'ORIENTAT'] # Now, read in the CRPIX1/2, CRVAL1/2, CD1/2_1/2 keywords. # Simplistic, but easy to understand what you are asking for. _exists = yes if rootname is not None: self.rootname = rootname else: self.rootname = 'New' new = yes _exists = no # Initialize attribute for GEIS image name, just in case... self.geisname = None # Look for extension specification in rootname _indx = _section = self.rootname.find('[') # If none are found, use entire rootname if _indx < 0: _indx = len(self.rootname) # Determine whether we are working with a new image or not. _dir,_rootname = os.path.split(fileutil.osfn(self.rootname[:_indx])) if _dir: _filename = _dir+os.sep+_rootname else: _filename = _rootname self.filename = _filename if not new: _exists = fileutil.checkFileExists(_rootname,directory=_dir) else: _exists = no # If no header has been provided, get the PRIMARY and the # specified extension header... This call uses the fully # expanded version of the filename, plus any sections listed by # by the user in the original rootname. if not header and _exists: _hdr_file = _filename+self.rootname[_indx:] _header = fileutil.getHeader(_hdr_file) else: # Otherwise, simply use the header already read into memory # for this exposure/chip. _header = header if _exists or header: # Initialize WCS object with keyword values... try: _dkey = 'orientat' if 'orientat' in _header: self.orient = _header['orientat'] else: self.orient = None if _header['naxis'] == 0 and 'pixvalue' in _header: # Check for existence of NPIX/PIXVALUE keywords # which represent a constant array extension _dkey = 'npix1' self.naxis1 = _header['npix1'] _dkey = 'npix2' self.naxis2 = _header['npix2'] _dkey = 'pixvalue' self.pixvalue = _header['pixvalue'] else: _dkey = 'naxis1' self.naxis1 = _header['naxis1'] _dkey = 'naxis2' self.naxis2 = _header['naxis2'] self.pixvalue = None self.npix1 = self.naxis1 self.npix2 = self.naxis2 for key in self.wcstrans.keys(): _dkey = self.wcstrans[key] if _dkey not in ['pscale','orient','naxis1','naxis2']: self.__dict__[_dkey] = _header[key] self.new = no except: print('Could not find WCS keyword: ',_dkey) raise IOError('Image %s does not contain all required WCS keywords!' % self.rootname) # Now, try to read in POSTARG keyword values, if they exist... try: self.postarg1 = _header['postarg1'] self.postarg2 = _header['postarg2'] except: # If these keywords, don't exist set defaults... self.postarg1 = 0.0 self.postarg2 = 0.0 try: self.pa_obs = _header[pa_key] except: # If no such keyword exists, use orientat value later self.pa_obs = None else: # or set default values... self.new = yes for key in self.wcsdef.keys(): self.__dict__[key] = self.wcsdef[key] if shape is not None: # ... and update with user values. self.naxis1 = int(shape[0]) self.naxis2 = int(shape[1]) self.pscale = float(shape[2]) # Make sure reported 'orient' is consistent with CD matrix # while preserving the original 'ORIENTAT' keyword value self.orientat = self.orient self.orient = RADTODEG(N.arctan2(self.cd12,self.cd22)) # If no keyword provided pa_obs value (PA_V3), then default to # image orientation from CD matrix. if self.pa_obs is None: self.pa_obs = self.orient if shape is None: self.set_pscale() #self.pscale = N.sqrt(N.power(self.cd11,2)+N.power(self.cd21,2)) * 3600. # Use Jacobian determination of pixel scale instead of X or Y separately... #self.pscale = N.sqrt(abs(self.cd11*self.cd22 - self.cd12*self.cd21))*3600. # Establish an attribute for the linearized orient # defined as the orientation of the CD after applying the default # distortion correction. self._orient_lin = 0. # attribute to define format for printing WCS self.__format__=yes # Keep track of the keyword names used as the backup keywords # for the original WCS values # backup - dict relating active keywords with backup keywords # prepend - string prepended to active keywords to create backup keywords # orig_wcs - dict containing orig keywords and values self.backup = {} self.revert = {} self.prepend = None self.orig_wcs = {} # Read in any archived WCS keyword values, if they exist self.read_archive(_header,prepend=prefix) # You never know when you want to print out the WCS keywords... def __str__(self): block = 'WCS Keywords for ' + self.rootname + ': \n' if not self.__format__: for key in self.wcstrans.keys(): _dkey = self.wcstrans[key] strn = key.upper() + " = " + repr(self.__dict__[_dkey]) + '\n' block += strn block += 'PA_V3: '+repr(self.pa_obs)+'\n' else: block += 'CD_11 CD_12: '+repr(self.cd11)+' '+repr(self.cd12) +'\n' block += 'CD_21 CD_22: '+repr(self.cd21)+' '+repr(self.cd22) +'\n' block += 'CRVAL : '+repr(self.crval1)+' '+repr(self.crval2) + '\n' block += 'CRPIX : '+repr(self.crpix1)+' '+repr(self.crpix2) + '\n' block += 'NAXIS : '+repr(int(self.naxis1))+' '+repr(int(self.naxis2)) + '\n' block += 'Plate Scale : '+repr(self.pscale)+'\n' block += 'ORIENTAT : '+repr(self.orient)+'\n' block += 'CTYPE : '+repr(self.ctype1)+' '+repr(self.ctype2)+'\n' block += 'PA Telescope: '+repr(self.pa_obs)+'\n' return block def __repr__(self): return repr(self.__dict__) def print_archive(self,format=True): """ Prints out archived WCS keywords.""" if len(list(self.orig_wcs.keys())) > 0: block = 'Original WCS keywords for ' + self.rootname+ '\n' block += ' backed up on '+repr(self.orig_wcs['WCSCDATE'])+'\n' if not format: for key in self.wcstrans.keys(): block += key.upper() + " = " + repr(self.get_archivekw(key)) + '\n' block = 'PA_V3: '+repr(self.pa_obs)+'\n' else: block += 'CD_11 CD_12: '+repr(self.get_archivekw('CD1_1'))+' '+repr(self.get_archivekw('CD1_2')) +'\n' block += 'CD_21 CD_22: '+repr(self.get_archivekw('CD2_1'))+' '+repr(self.get_archivekw('CD2_2')) +'\n' block += 'CRVAL : '+repr(self.get_archivekw('CRVAL1'))+' '+repr(self.get_archivekw('CRVAL2')) + '\n' block += 'CRPIX : '+repr(self.get_archivekw('CRPIX1'))+' '+repr(self.get_archivekw('CRPIX2')) + '\n' block += 'NAXIS : '+repr(int(self.get_archivekw('NAXIS1')))+' '+repr(int(self.get_archivekw('NAXIS2'))) + '\n' block += 'Plate Scale : '+repr(self.get_archivekw('pixel scale'))+'\n' block += 'ORIENTAT : '+repr(self.get_archivekw('ORIENTAT'))+'\n' print(block) def get_archivekw(self,keyword): """ Return an archived/backup value for the keyword. """ return self.orig_wcs[self.backup[keyword]] def set_pscale(self): """ Compute the pixel scale based on active WCS values. """ if self.new: self.pscale = 1.0 else: self.pscale = self.compute_pscale(self.cd11,self.cd21) def compute_pscale(self,cd11,cd21): """ Compute the pixel scale based on active WCS values. """ return N.sqrt(N.power(cd11,2)+N.power(cd21,2)) * 3600. def get_orient(self): """ Return the computed orientation based on CD matrix. """ return RADTODEG(N.arctan2(self.cd12,self.cd22)) def set_orient(self): """ Return the computed orientation based on CD matrix. """ self.orient = RADTODEG(N.arctan2(self.cd12,self.cd22)) def update(self): """ Update computed values of WCS based on current CD matrix.""" self.set_pscale() self.set_orient() def updateWCS(self, pixel_scale=None, orient=None,refpos=None,refval=None,size=None): """ Create a new CD Matrix from the absolute pixel scale and reference image orientation. """ # Set up parameters necessary for updating WCS # Check to see if new value is provided, # If not, fall back on old value as the default _updateCD = no if orient is not None and orient != self.orient: pa = DEGTORAD(orient) self.orient = orient self._orient_lin = orient _updateCD = yes else: # In case only pixel_scale was specified pa = DEGTORAD(self.orient) if pixel_scale is not None and pixel_scale != self.pscale: _ratio = pixel_scale / self.pscale self.pscale = pixel_scale _updateCD = yes else: # In case, only orient was specified pixel_scale = self.pscale _ratio = None # If a new plate scale was given, # the default size should be revised accordingly # along with the default reference pixel position. # Added 31 Mar 03, WJH. if _ratio is not None: self.naxis1 /= _ratio self.naxis2 /= _ratio self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. # However, if the user provides a given size, # set it to use that no matter what. if size is not None: self.naxis1 = size[0] self.naxis2 = size[1] # Insure that naxis1,2 always return as integer values. self.naxis1 = int(self.naxis1) self.naxis2 = int(self.naxis2) if refpos is not None: self.crpix1 = refpos[0] self.crpix2 = refpos[1] if self.crpix1 is None: self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. if refval is not None: self.crval1 = refval[0] self.crval2 = refval[1] # Reset WCS info now... if _updateCD: # Only update this should the pscale or orientation change... pscale = pixel_scale / 3600. self.cd11 = -pscale * N.cos(pa) self.cd12 = pscale * N.sin(pa) self.cd21 = self.cd12 self.cd22 = -self.cd11 # Now make sure that all derived values are really up-to-date based # on these changes self.update() def scale_WCS(self,pixel_scale,retain=True): ''' Scale the WCS to a new pixel_scale. The 'retain' parameter [default value: True] controls whether or not to retain the original distortion solution in the CD matrix. ''' _ratio = pixel_scale / self.pscale # Correct the size of the image and CRPIX values for scaled WCS self.naxis1 /= _ratio self.naxis2 /= _ratio self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. if retain: # Correct the WCS while retaining original distortion information self.cd11 *= _ratio self.cd12 *= _ratio self.cd21 *= _ratio self.cd22 *= _ratio else: pscale = pixel_scale / 3600. self.cd11 = -pscale * N.cos(pa) self.cd12 = pscale * N.sin(pa) self.cd21 = self.cd12 self.cd22 = -self.cd11 # Now make sure that all derived values are really up-to-date based # on these changes self.update() def xy2rd(self,pos): """ This method would apply the WCS keywords to a position to generate a new sky position. The algorithm comes directly from 'imgtools.xy2rd' translate (x,y) to (ra, dec) """ if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0: print('XY2RD only supported for TAN projections.') raise TypeError if isinstance(pos,N.ndarray): # If we are working with an array of positions, # point to just X and Y values posx = pos[:,0] posy = pos[:,1] else: # Otherwise, we are working with a single X,Y tuple posx = pos[0] posy = pos[1] xi = self.cd11 * (posx - self.crpix1) + self.cd12 * (posy - self.crpix2) eta = self.cd21 * (posx - self.crpix1) + self.cd22 * (posy - self.crpix2) xi = DEGTORAD(xi) eta = DEGTORAD(eta) ra0 = DEGTORAD(self.crval1) dec0 = DEGTORAD(self.crval2) ra = N.arctan((xi / (N.cos(dec0)-eta*N.sin(dec0)))) + ra0 dec = N.arctan( ((eta*N.cos(dec0)+N.sin(dec0)) / (N.sqrt((N.cos(dec0)-eta*N.sin(dec0))**2 + xi**2))) ) ra = RADTODEG(ra) dec = RADTODEG(dec) ra = DIVMOD(ra, 360.) # Otherwise, just return the RA,Dec tuple. return ra,dec def rd2xy(self,skypos,hour=no): """ This method would use the WCS keywords to compute the XY position from a given RA/Dec tuple (in deg). NOTE: Investigate how to let this function accept arrays as well as single positions. WJH 27Mar03 """ if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0: print('RD2XY only supported for TAN projections.') raise TypeError det = self.cd11*self.cd22 - self.cd12*self.cd21 if det == 0.0: raise ArithmeticError("singular CD matrix!") cdinv11 = self.cd22 / det cdinv12 = -self.cd12 / det cdinv21 = -self.cd21 / det cdinv22 = self.cd11 / det # translate (ra, dec) to (x, y) ra0 = DEGTORAD(self.crval1) dec0 = DEGTORAD(self.crval2) if hour: skypos[0] = skypos[0] * 15. ra = DEGTORAD(skypos[0]) dec = DEGTORAD(skypos[1]) bottom = float(N.sin(dec)*N.sin(dec0) + N.cos(dec)*N.cos(dec0)*N.cos(ra-ra0)) if bottom == 0.0: raise ArithmeticError("Unreasonable RA/Dec range!") xi = RADTODEG((N.cos(dec) * N.sin(ra-ra0) / bottom)) eta = RADTODEG((N.sin(dec)*N.cos(dec0) - N.cos(dec)*N.sin(dec0)*N.cos(ra-ra0)) / bottom) x = cdinv11 * xi + cdinv12 * eta + self.crpix1 y = cdinv21 * xi + cdinv22 * eta + self.crpix2 return x,y def rotateCD(self,orient): """ Rotates WCS CD matrix to new orientation given by 'orient' """ # Determine where member CRVAL position falls in ref frame # Find out whether this needs to be rotated to align with # reference frame. _delta = self.get_orient() - orient if _delta == 0.: return # Start by building the rotation matrix... _rot = fileutil.buildRotMatrix(_delta) # ...then, rotate the CD matrix and update the values... _cd = N.array([[self.cd11,self.cd12],[self.cd21,self.cd22]],dtype=N.float64) _cdrot = N.dot(_cd,_rot) self.cd11 = _cdrot[0][0] self.cd12 = _cdrot[0][1] self.cd21 = _cdrot[1][0] self.cd22 = _cdrot[1][1] self.orient = orient def recenter(self): """ Reset the reference position values to correspond to the center of the reference frame. Algorithm used here developed by Colin Cox - 27-Jan-2004. """ if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0: print('WCS.recenter() only supported for TAN projections.') raise TypeError # Check to see if WCS is already centered... if self.crpix1 == self.naxis1/2. and self.crpix2 == self.naxis2/2.: # No recentering necessary... return without changing WCS. return # This offset aligns the WCS to the center of the pixel, in accordance # with the 'align=center' option used by 'drizzle'. #_drz_off = -0.5 _drz_off = 0. _cen = (self.naxis1/2.+ _drz_off,self.naxis2/2. + _drz_off) # Compute the RA and Dec for center pixel _cenrd = self.xy2rd(_cen) _cd = N.array([[self.cd11,self.cd12],[self.cd21,self.cd22]],dtype=N.float64) _ra0 = DEGTORAD(self.crval1) _dec0 = DEGTORAD(self.crval2) _ra = DEGTORAD(_cenrd[0]) _dec = DEGTORAD(_cenrd[1]) # Set up some terms for use in the final result _dx = self.naxis1/2. - self.crpix1 _dy = self.naxis2/2. - self.crpix2 _dE,_dN = DEGTORAD(N.dot(_cd,(_dx,_dy))) _dE_dN = 1 + N.power(_dE,2) + N.power(_dN,2) _cosdec = N.cos(_dec) _sindec = N.sin(_dec) _cosdec0 = N.cos(_dec0) _sindec0 = N.sin(_dec0) _n1 = N.power(_cosdec,2) + _dE*_dE + _dN*_dN*N.power(_sindec,2) _dra_dE = (_cosdec0 - _dN*_sindec0)/_n1 _dra_dN = _dE*_sindec0 /_n1 _ddec_dE = -_dE*N.tan(_dec) / _dE_dN _ddec_dN = (1/_cosdec) * ((_cosdec0 / N.sqrt(_dE_dN)) - (_dN*N.sin(_dec) / _dE_dN)) # Compute new CD matrix values now... _cd11n = _cosdec * (self.cd11*_dra_dE + self.cd21 * _dra_dN) _cd12n = _cosdec * (self.cd12*_dra_dE + self.cd22 * _dra_dN) _cd21n = self.cd11 * _ddec_dE + self.cd21 * _ddec_dN _cd22n = self.cd12 * _ddec_dE + self.cd22 * _ddec_dN _new_orient = RADTODEG(N.arctan2(_cd12n,_cd22n)) #_new_pscale = N.sqrt(N.power(_cd11n,2)+N.power(_cd21n,2)) * 3600. # Update the values now... self.crpix1 = _cen[0] self.crpix2 = _cen[1] self.crval1 = RADTODEG(_ra) self.crval2 = RADTODEG(_dec) # Keep the same plate scale, only change the orientation self.rotateCD(_new_orient) # These would update the CD matrix with the new rotation # ALONG with the new plate scale which we do not want. self.cd11 = _cd11n self.cd12 = _cd12n self.cd21 = _cd21n self.cd22 = _cd22n #self.pscale = _new_pscale self.update() def write(self,fitsname=None,wcs=None,archive=True,overwrite=False,quiet=True): """ Write out the values of the WCS keywords to the specified image. If it is a GEIS image and 'fitsname' has been provided, it will automatically make a multi-extension FITS copy of the GEIS and update that file. Otherwise, it throw an Exception if the user attempts to directly update a GEIS image header. If archive=True, also write out archived WCS keyword values to file. If overwrite=True, replace archived WCS values in file with new values. If a WCSObject is passed through the 'wcs' keyword, then the WCS keywords of this object are copied to the header of the image to be updated. A use case fo rthis is updating the WCS of a WFPC2 data quality (_c1h.fits) file in order to be in sync with the science (_c0h.fits) file. """ ## Start by making sure all derived values are in sync with CD matrix self.update() image = self.rootname _fitsname = fitsname if image.find('.fits') < 0 and _fitsname is not None: # A non-FITS image was provided, and openImage made a copy # Update attributes to point to new copy instead self.geisname = image image = self.rootname = _fitsname # Open image as writable FITS object fimg = fileutil.openImage(image, mode='update', fitsname=_fitsname) _root,_iextn = fileutil.parseFilename(image) _extn = fileutil.getExtn(fimg,_iextn) # Write out values to header... if wcs: _wcsobj = wcs else: _wcsobj = self for key in _wcsobj.wcstrans.keys(): _dkey = _wcsobj.wcstrans[key] if _dkey != 'pscale': _extn.header[key] = _wcsobj.__dict__[_dkey] # Close the file fimg.close() del fimg if archive: self.write_archive(fitsname=fitsname,overwrite=overwrite,quiet=quiet) def restore(self): """ Reset the active WCS keywords to values stored in the backup keywords. """ # If there are no backup keys, do nothing... if len(list(self.backup.keys())) == 0: return for key in self.backup.keys(): if key != 'WCSCDATE': self.__dict__[self.wcstrans[key]] = self.orig_wcs[self.backup[key]] self.update() def archive(self,prepend=None,overwrite=no,quiet=yes): """ Create backup copies of the WCS keywords with the given prepended string. If backup keywords are already present, only update them if 'overwrite' is set to 'yes', otherwise, do warn the user and do nothing. Set the WCSDATE at this time as well. """ # Verify that existing backup values are not overwritten accidentally. if len(list(self.backup.keys())) > 0 and overwrite == no: if not quiet: print('WARNING: Backup WCS keywords already exist! No backup made.') print(' The values can only be overridden if overwrite=yes.') return # Establish what prepend string to use... if prepend is None: if self.prepend is not None: _prefix = self.prepend else: _prefix = DEFAULT_PREFIX else: _prefix = prepend # Update backup and orig_wcs dictionaries # We have archive keywords and a defined prefix # Go through and append them to self.backup self.prepend = _prefix for key in self.wcstrans.keys(): if key != 'pixel scale': _archive_key = self._buildNewKeyname(key,_prefix) else: _archive_key = self.prepend.lower()+'pscale' # if key != 'pixel scale': self.orig_wcs[_archive_key] = self.__dict__[self.wcstrans[key]] self.backup[key] = _archive_key self.revert[_archive_key] = key # Setup keyword to record when these keywords were backed up. self.orig_wcs['WCSCDATE']= fileutil.getLTime() self.backup['WCSCDATE'] = 'WCSCDATE' self.revert['WCSCDATE'] = 'WCSCDATE' def read_archive(self,header,prepend=None): """ Extract a copy of WCS keywords from an open file header, if they have already been created and remember the prefix used for those keywords. Otherwise, setup the current WCS keywords as the archive values. """ # Start by looking for the any backup WCS keywords to # determine whether archived values are present and to set # the prefix used. _prefix = None _archive = False if header is not None: for kw in header.items(): if kw[0][1:] in self.wcstrans.keys(): _prefix = kw[0][0] _archive = True break if not _archive: self.archive(prepend=prepend) return # We have archive keywords and a defined prefix # Go through and append them to self.backup if _prefix is not None: self.prepend = _prefix else: self.prepend = DEFAULT_PREFIX for key in self.wcstrans.keys(): _archive_key = self._buildNewKeyname(key,_prefix) if key!= 'pixel scale': if _archive_key in header: self.orig_wcs[_archive_key] = header[_archive_key] else: self.orig_wcs[_archive_key] = header[key] self.backup[key] = _archive_key self.revert[_archive_key] = key # Establish plate scale value _cd11str = self.prepend+'CD1_1' _cd21str = self.prepend+'CD2_1' pscale = self.compute_pscale(self.orig_wcs[_cd11str],self.orig_wcs[_cd21str]) _archive_key = self.prepend.lower()+'pscale' self.orig_wcs[_archive_key] = pscale self.backup['pixel scale'] = _archive_key self.revert[_archive_key] = 'pixel scale' # Setup keyword to record when these keywords were backed up. if 'WCSCDATE' in header: self.orig_wcs['WCSCDATE'] = header['WCSCDATE'] else: self.orig_wcs['WCSCDATE'] = fileutil.getLTime() self.backup['WCSCDATE'] = 'WCSCDATE' self.revert['WCSCDATE'] = 'WCSCDATE' def write_archive(self,fitsname=None,overwrite=no,quiet=yes): """ Saves a copy of the WCS keywords from the image header as new keywords with the user-supplied 'prepend' character(s) prepended to the old keyword names. If the file is a GEIS image and 'fitsname' is not None, create a FITS copy and update that version; otherwise, raise an Exception and do not update anything. """ _fitsname = fitsname # Open image in update mode # Copying of GEIS images handled by 'openImage'. fimg = fileutil.openImage(self.rootname,mode='update',fitsname=_fitsname) if self.rootname.find('.fits') < 0 and _fitsname is not None: # A non-FITS image was provided, and openImage made a copy # Update attributes to point to new copy instead self.geisname = self.rootname self.rootname = _fitsname # extract the extension ID being updated _root,_iextn = fileutil.parseFilename(self.rootname) _extn = fileutil.getExtn(fimg,_iextn) if not quiet: print('Updating archive WCS keywords for ',_fitsname) # Write out values to header... for key in self.orig_wcs.keys(): _comment = None _dkey = self.revert[key] # Verify that archive keywords will not be overwritten, # unless overwrite=yes. _old_key = key in _extn.header if _old_key == True and overwrite == no: if not quiet: print('WCS keyword',key,' already exists! Not overwriting.') continue # No archive keywords exist yet in file, or overwrite=yes... # Extract the value for the original keyword if _dkey in _extn.header: # Extract any comment string for the keyword as well _indx_key = _extn.header.index(_dkey) _full_key = _extn.header.cards[_indx_key] if not quiet: print('updating ',key,' with value of: ',self.orig_wcs[key]) _extn.header[key] = (self.orig_wcs[key], _full_key.comment) key = 'WCSCDATE' if key not in _extn.header: # Print out history keywords to record when these keywords # were backed up. _extn.header[key] = (self.orig_wcs[key], "Time WCS keywords were copied.") # Close the now updated image fimg.close() del fimg def restoreWCS(self,prepend=None): """ Resets the WCS values to the original values stored in the backup keywords recorded in self.backup. """ # Open header for image image = self.rootname if prepend: _prepend = prepend elif self.prepend: _prepend = self.prepend else: _prepend = None # Open image as writable FITS object fimg = fileutil.openImage(image, mode='update') # extract the extension ID being updated _root,_iextn = fileutil.parseFilename(self.rootname) _extn = fileutil.getExtn(fimg,_iextn) if len(self.backup) > 0: # If it knows about the backup keywords already, # use this to restore the original values to the original keywords for newkey in self.revert.keys(): if newkey != 'opscale': _orig_key = self.revert[newkey] _extn.header[_orig_key] = _extn.header[newkey] elif _prepend: for key in self.wcstrans.keys(): # Get new keyword name based on old keyname # and prepend string if key != 'pixel scale': _okey = self._buildNewKeyname(key,_prepend) if _okey in _extn.header: _extn.header[key] = _extn.header[_okey] else: print('No original WCS values found. Exiting...') break else: print('No original WCS values found. Exiting...') fimg.close() del fimg def createReferenceWCS(self,refname,overwrite=yes): """ Write out the values of the WCS keywords to the NEW specified image 'fitsname'. """ hdu = self.createWcsHDU() # If refname already exists, delete it to make way for new file if os.path.exists(refname): if overwrite==yes: # Remove previous version and re-create with new header os.remove(refname) hdu.writeto(refname) else: # Append header to existing file wcs_append = True oldhdu = fits.open(refname, mode='append') for e in oldhdu: if 'extname' in e.header and e.header['extname'] == 'WCS': wcs_append = False if wcs_append == True: oldhdu.append(hdu) oldhdu.close() del oldhdu else: # No previous file, so generate new one from scratch hdu.writeto(refname) # Clean up del hdu def createWcsHDU(self): """ Generate a WCS header object that can be used to populate a reference WCS HDU. """ hdu = fits.ImageHDU() hdu.header['EXTNAME'] = 'WCS' hdu.header['EXTVER'] = 1 # Now, update original image size information hdu.header['WCSAXES'] = (2, "number of World Coordinate System axes") hdu.header['NPIX1'] = (self.naxis1, "Length of array axis 1") hdu.header['NPIX2'] = (self.naxis2, "Length of array axis 2") hdu.header['PIXVALUE'] = (0.0, "values of pixels in array") # Write out values to header... excluded_keys = ['naxis1','naxis2'] for key in self.wcskeys: _dkey = self.wcstrans[key] if _dkey not in excluded_keys: hdu.header[key] = self.__dict__[_dkey] return hdu def _buildNewKeyname(self,key,prepend): """ Builds a new keyword based on original keyword name and a prepend string. """ if len(prepend+key) <= 8: _new_key = prepend+key else: _new_key = str(prepend+key)[:8] return _new_key def copy(self,deep=yes): """ Makes a (deep)copy of this object for use by other objects. """ if deep: return copy.deepcopy(self) else: return copy.copy(self) def help(self): """ Prints out help message.""" print('wcsutil Version '+str(__version__)+':\n') print(self.__doc__) stsci.tools-3.4.12/lib/stsci/tools/xyinterp.py0000644001120100020070000000527413006721301023055 0ustar jhunkSTSCI\science00000000000000""" :Module: xyinterp.py Interpolates y based on the given xval. `x` and `y` are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. `xval` is a user-specified value. This routine looks up `xval` in the x array and uses that information to properly interpolate the value in the y array. :author: Vicki Laidler :version: '0.1 (2006-07-06)' """ from __future__ import division # confidence high import numpy as N #This section for standalone imports only------------------------------------- __version__ = '0.1' #Release version number only __vdate__ = '2006-07-06' #Date of this version, in this (FITS-style) format #----------------------------------------------------------------------------- def xyinterp(x,y,xval): """ :Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06 """ #Enforce conditions on x, y, and xval: #x and y must correspond if len(x) != len(y): raise ValueError("Input arrays must be equal lengths") #Extrapolation not supported if xval < x[0]: raise ValueError("Value %f < min(x) %f: Extrapolation unsupported"%(xval,x[0])) if xval > x[-1]: raise ValueError("Value > max(x): Extrapolation unsupported") #This algorithm only works on sorted data if x.argsort().all() != N.arange(len(x)).all(): raise ValueError("Input array x must be sorted") # Now do the real work. hi = x.searchsorted(xval) lo = hi - 1 try: seg = (float(xval)-x[lo]) / (x[hi] - x[lo]) except ZeroDivisionError: seg = 0.0 yval = y[lo] + seg*(y[hi] - y[lo]) return yval stsci.tools-3.4.12/lib/stsci.tools.egg-info/0000755001120100020070000000000013241171572022314 5ustar jhunkSTSCI\science00000000000000stsci.tools-3.4.12/lib/stsci.tools.egg-info/dependency_links.txt0000644001120100020070000000000113241171572026362 0ustar jhunkSTSCI\science00000000000000 stsci.tools-3.4.12/lib/stsci.tools.egg-info/entry_points.txt0000644001120100020070000000016713241171572025616 0ustar jhunkSTSCI\science00000000000000[console_scripts] convertlog = stsci.tools.convertlog:main convertwaiveredfits = stsci.tools.convertwaiveredfits:main stsci.tools-3.4.12/lib/stsci.tools.egg-info/PKG-INFO0000644001120100020070000000114613241171572023413 0ustar jhunkSTSCI\science00000000000000Metadata-Version: 1.1 Name: stsci.tools Version: 3.4.12 Summary: Collection of STScI utility functions Home-page: https://github.com/spacetelescope/stsci.tools Author: STScI Author-email: help@stsci.edu License: UNKNOWN Description-Content-Type: UNKNOWN Description: UNKNOWN Platform: UNKNOWN Classifier: Intended Audience :: Science/Research Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: Scientific/Engineering :: Astronomy Classifier: Topic :: Software Development :: Libraries :: Python Modules stsci.tools-3.4.12/lib/stsci.tools.egg-info/requires.txt0000644001120100020070000000001613241171572024711 0ustar jhunkSTSCI\science00000000000000astropy numpy stsci.tools-3.4.12/lib/stsci.tools.egg-info/SOURCES.txt0000644001120100020070000000376013241171572024206 0ustar jhunkSTSCI\science00000000000000MANIFEST.in README.md RELIC-INFO setup.cfg setup.py lib/stsci/__init__.py lib/stsci.tools.egg-info/PKG-INFO lib/stsci.tools.egg-info/SOURCES.txt lib/stsci.tools.egg-info/dependency_links.txt lib/stsci.tools.egg-info/entry_points.txt lib/stsci.tools.egg-info/requires.txt lib/stsci.tools.egg-info/top_level.txt lib/stsci/tools/__init__.py lib/stsci/tools/alert.py lib/stsci/tools/asnutil.py lib/stsci/tools/basicpar.py lib/stsci/tools/bitmask.py lib/stsci/tools/capable.py lib/stsci/tools/cfgpars.py lib/stsci/tools/check_files.py lib/stsci/tools/clipboard_helper.py lib/stsci/tools/compmixin.py lib/stsci/tools/configobj.py lib/stsci/tools/convertgeis.py lib/stsci/tools/convertlog.py lib/stsci/tools/convertwaiveredfits.py lib/stsci/tools/dialog.py lib/stsci/tools/editpar.py lib/stsci/tools/eparoption.py lib/stsci/tools/filedlg.py lib/stsci/tools/fileutil.py lib/stsci/tools/fitsdiff.py lib/stsci/tools/for2to3.py lib/stsci/tools/gfit.py lib/stsci/tools/imageiter.py lib/stsci/tools/irafglob.py lib/stsci/tools/irafglobals.py lib/stsci/tools/irafutils.py lib/stsci/tools/iterfile.py lib/stsci/tools/linefit.py lib/stsci/tools/listdlg.py lib/stsci/tools/logutil.py lib/stsci/tools/minmatch.py lib/stsci/tools/mputil.py lib/stsci/tools/nimageiter.py lib/stsci/tools/nmpfit.py lib/stsci/tools/numerixenv.py lib/stsci/tools/parseinput.py lib/stsci/tools/readgeis.py lib/stsci/tools/stash.py lib/stsci/tools/stpyfits.py lib/stsci/tools/swapgeis.py lib/stsci/tools/taskpars.py lib/stsci/tools/teal.py lib/stsci/tools/teal_bttn.py lib/stsci/tools/tester.py lib/stsci/tools/testutil.py lib/stsci/tools/textutil.py lib/stsci/tools/tkrotext.py lib/stsci/tools/validate.py lib/stsci/tools/version.py lib/stsci/tools/versioninfo.py lib/stsci/tools/vtor_checks.py lib/stsci/tools/wcsutil.py lib/stsci/tools/xyinterp.py lib/stsci/tools/tests/__init__.py lib/stsci/tools/tests/cdva2.fits lib/stsci/tools/tests/o4sp040b0_raw.fits lib/stsci/tools/tests/test_bitmask.py lib/stsci/tools/tests/test_stpyfits.py lib/stsci/tools/tests/test_xyinterp.pystsci.tools-3.4.12/lib/stsci.tools.egg-info/top_level.txt0000644001120100020070000000000613241171572025042 0ustar jhunkSTSCI\science00000000000000stsci stsci.tools-3.4.12/MANIFEST.in0000644001120100020070000000007513241163620017323 0ustar jhunkSTSCI\science00000000000000include RELIC-INFO recursive-include lib/stsci/tools/tests * stsci.tools-3.4.12/PKG-INFO0000644001120100020070000000114613241171572016667 0ustar jhunkSTSCI\science00000000000000Metadata-Version: 1.1 Name: stsci.tools Version: 3.4.12 Summary: Collection of STScI utility functions Home-page: https://github.com/spacetelescope/stsci.tools Author: STScI Author-email: help@stsci.edu License: UNKNOWN Description-Content-Type: UNKNOWN Description: UNKNOWN Platform: UNKNOWN Classifier: Intended Audience :: Science/Research Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: Scientific/Engineering :: Astronomy Classifier: Topic :: Software Development :: Libraries :: Python Modules stsci.tools-3.4.12/README.md0000644001120100020070000000051013241163620017036 0ustar jhunkSTSCI\science00000000000000# stsci.tools [![Build Status](https://travis-ci.org/spacetelescope/stsci.tools.svg?branch=master)](https://travis-ci.org/spacetelescope/stsci.tools) [![Documentation Status](https://readthedocs.org/projects/stscitools/badge/?version=latest)](http://stscitools.readthedocs.io/en/latest/?badge=latest) STScI utility functions. stsci.tools-3.4.12/RELIC-INFO0000644001120100020070000000023613241167565017112 0ustar jhunkSTSCI\science00000000000000{"pep386": "3.4.12", "short": "3.4.12", "long": "3.4.12-0-gfd82437d", "date": "2018-02-07 16:36:45 -0500", "dirty": false, "commit": "fd82437d", "post": "0"} stsci.tools-3.4.12/setup.cfg0000644001120100020070000000031013241171572017403 0ustar jhunkSTSCI\science00000000000000[aliases] test = pytest [tool:pytest] python_files = lib/stsci/tools/tests/*.py [flake8] ignore = E501 exclude = setup.py,conf.py [bdist_wheel] universal = 1 [egg_info] tag_build = tag_date = 0 stsci.tools-3.4.12/setup.py0000755001120100020070000000334713241163620017307 0ustar jhunkSTSCI\science00000000000000#!/usr/bin/env python import os import subprocess import sys from setuptools import setup, find_packages if os.path.exists('relic'): sys.path.insert(1, 'relic') import relic.release else: try: import relic.release except ImportError: try: subprocess.check_call( ['git', 'clone', 'https://github.com/jhunkeler/relic.git']) sys.path.insert(1, 'relic') import relic.release except subprocess.CalledProcessError as e: print(e) exit(1) version = relic.release.get_info() relic.release.write_template(version, 'lib/stsci/tools') setup( name = 'stsci.tools', version = version.pep386, author = 'STScI', author_email = 'help@stsci.edu', description = 'Collection of STScI utility functions', url = 'https://github.com/spacetelescope/stsci.tools', classifiers = [ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Astronomy', 'Topic :: Software Development :: Libraries :: Python Modules', ], install_requires = [ 'astropy', 'numpy', ], setup_requires = [ 'pytest-runner' ], tests_require = [ 'pytest' ], package_dir = { '': 'lib', }, packages = find_packages('lib'), package_data = { '': ['LICENSE.txt'], 'stsci/tools/tests': ['*.fits'] }, entry_points = { 'console_scripts': [ 'convertwaiveredfits=stsci.tools.convertwaiveredfits:main', 'convertlog=stsci.tools.convertlog:main' ], }, )