firmware-tools-2.1.14/ 0000777 0017654 0017654 00000000000 11452664762 021132 5 ustar 00michael_e_brown michael_e_brown 0000000 0000000 firmware-tools-2.1.14/bin/ 0000777 0017654 0017654 00000000000 11452664762 021702 5 ustar 00michael_e_brown michael_e_brown 0000000 0000000 firmware-tools-2.1.14/bin/firmwaretool 0000775 0017654 0017654 00000001400 10756403330 024317 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 #!/usr/bin/python
import sys
import os
# these are replaced by autotools when installed.
__VERSION__="unreleased_version"
PYTHONDIR=os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),"..")
PKGDATADIR=os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),"..","ft-cli")
# end build system subs
sys.path.insert(0, PKGDATADIR)
sys.path.insert(0,PYTHONDIR)
try:
if "inventory_firmware" in sys.argv[0]:
sys.argv.append("--inventory")
if "update_firmware" in sys.argv[0]:
sys.argv.append("--update")
if "bootstrap_firmware" in sys.argv[0]:
sys.argv.append("--bootstrap")
import ftmain
ftmain.main(sys.argv[1:])
except KeyboardInterrupt, e:
print >> sys.stderr, "\n\nExiting on user cancel."
sys.exit(1)
firmware-tools-2.1.14/bin/inventory_firmware_gui 0000775 0017654 0017654 00000110471 10767377333 026432 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 #!/usr/bin/python
# vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
"""
this is the documentation...
"""
# import arranged alphabetically
import commands
import cStringIO
import getopt
from gettext import gettext as _
import locale
import os
import pygtk
import sys
import threading
import traceback
pygtk.require('2.0')
import gtk, gtk.glade, pango
import gobject
import gnome.ui
# these are replaced by autotools when installed.
__VERSION__="unreleased_version"
PYTHONDIR=os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),".."))
PKGGLADEDIR=os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),"..","glade"))
PKGDATADIR=os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),"..","ft-cli"))
# end build system subs
# import all local modules after this. This allows us to run from build tree
sys.path.insert(0,PYTHONDIR)
sys.path.insert(0,PKGDATADIR)
import firmwaretools.trace_decorator as trace_decorator
import guihelpers
import firmwaretools.repository as repository
import firmwaretools.package
PROGRAM_NAME="Firmware Inventory and Update GUI"
class CommandAlreadyExecuted(Exception): pass
class CommandAlreadyUndone(Exception): pass
class Command(object):
def __init__(self, object, method, args, kargs):
self.object = object
self.method = method
self.args = args
self.kargs = kargs
self.memento = None
self.executed = False
self.undone = False
def execute(self):
if self.executed:
raise CommandAlreadyExecuted()
self.executed=True
self.undone = False
self.memento = self.object.getMemento()
self.method(*self.args, **self.kargs)
def undo(self):
if self.undone:
raise CommandAlreadyUndone()
self.undone = True
self.executed = False
self.object.setMemento(self.memento)
# use only for pin/unpin, it is more efficent as it doesnt save full memento
class UpdateSetPinCommand(Command):
def execute(self):
if self.executed:
raise CommandAlreadyExecuted()
self.executed=True
self.undone = False
self.memento = self.object.getMemento(deviceHint = self.args[0])
self.method(*self.args, **self.kargs)
class InventoryFirmware:
GLADE_FILE = '/inventory_firmware_gui.glade'
def __init__(self, base):
self.wTree = gtk.glade.XML(PKGGLADEDIR + self.GLADE_FILE)
self.wTree.signal_autoconnect(self)
self.main_window = self.wTree.get_widget("MainWindow")
self.wTree.get_widget("about_dialog").destroy()
# set up toggles
self.showUnknown=0
self.toolbarAllowDowngrade = self.wTree.get_widget("toolbar_allow_downgrade")
self.toolbarAllowReflash = self.wTree.get_widget("toolbar_allow_reflash")
self.toolbarShowUnknown = self.wTree.get_widget("toolbar_show_unknown")
self.menuAllowDowngrade = self.wTree.get_widget("menu_allow_downgrade")
self.menuAllowReflash = self.wTree.get_widget("menu_allow_reflash")
self.menuShowUnknown = self.wTree.get_widget("menu_show_unknown")
self.recursiveCallback=0
# internal accounting
self.numDowngradeSelected = 0
self.numReflashSelected = 0
# set up command stack, used for undo/redo
self.undoStack = []
self.redoStack = []
# setup tree views
self._setupInventoryTreeView()
self._setupBootstrapTreeView()
self._setupUpdateStatusTreeView()
# get handle to status bar
self.statusBar = self.wTree.get_widget("main_window_status_bar")
ctx = self.statusBar.get_context_id("main")
self.statusBar.push(ctx, "Ready")
# disable input in main window until we are finished initializing...
self.main_window.set_sensitive(0)
# show main window
self.main_window.show()
# set status == collecting inventory
ctx = self.statusBar.get_context_id("inventory")
self.statusBar.push(ctx, _("Performing system inventory..."))
guihelpers.gtkYield() # make sure current GUI is fully displayed
# special function to make sure GUI updates smoothly while we
# generate the update set
def myYield(*args, **kargs):
# eats all its args...
guihelpers.gtkYield()
# the following two lines are equivalent, but runLongProcess() does the
# action in a background thread so the GUI will update while it works.
#self.updateSet = base.calculateUpgradeList(cb=(myYield, None))
self.updateSet = guihelpers.runLongProcessGtk(
base.calculateUpgradeList,
args=(),
kargs={'cb':(myYield, None)})
self._populateInventoryTree()
self._populateBootstrapTree(base)
self.inventoryTreeView.expand_all()
self._refresh()
# set status == ready
self.statusBar.pop(ctx)
self.main_window.set_sensitive(1)
def _setupBootstrapTreeView(self):
# create model for bootstrap treeview
self.bootstrapTreeView = self.wTree.get_widget("bootstrap_treeview")
self.bootstrapTreeModel= gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING)
self.bootstrapTreeView.set_model(self.bootstrapTreeModel)
self.BOOTSTRAP_COLUMN_BOOTSTRAP_NAME = 0
self.BOOTSTRAP_COLUMN_DEVICE_NAME = 1
self.BOOTSTRAP_COLUMN_FW_VER = 2
# add column headers to the inventory treeview
self.bootstrapTreeView.set_headers_visible(True)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Bootstrap Name"),renderer, text=self.BOOTSTRAP_COLUMN_BOOTSTRAP_NAME)
column.set_resizable(True)
self.bootstrapTreeView.append_column(column)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Device Name"),renderer, text=self.BOOTSTRAP_COLUMN_DEVICE_NAME)
column.set_resizable(True)
self.bootstrapTreeView.append_column(column)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Firmware Version"),renderer, text=self.BOOTSTRAP_COLUMN_FW_VER)
column.set_resizable(True)
self.bootstrapTreeView.append_column(column)
# let us select multiple releases
self.bootstrapTreeView.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
def _setupUpdateStatusTreeView(self):
# create model for update status treeview
self.updateStatusTreeView = self.wTree.get_widget("status_treeview")
self.updateStatusTreeModel= gtk.TreeStore( object, object, int )
self.updateStatusTreeView.set_model(self.updateStatusTreeModel)
self.STATUS_COLUMN_DEVICE = 0
self.STATUS_COLUMN_PACKAGE = 1
self.STATUS_COLUMN_SERIAL = 2
# add column headers to the inventory treeview
self.updateStatusTreeView.set_headers_visible(True)
# status, component, status description, log?
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Status"),renderer)
column.set_resizable(True)
column.set_cell_data_func(renderer, self.cell_data_func_us_status)
self.updateStatusTreeView.append_column(column)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Component"),renderer)
column.set_resizable(True)
column.set_cell_data_func(renderer, self.cell_data_func_us_component)
self.updateStatusTreeView.append_column(column)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Current Version"),renderer)
column.set_resizable(True)
column.set_cell_data_func(renderer, self.cell_data_func_us_cur_version)
self.updateStatusTreeView.append_column(column)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Update Version"),renderer)
column.set_resizable(True)
column.set_cell_data_func(renderer, self.cell_data_func_us_update_version)
self.updateStatusTreeView.append_column(column)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Status Description"),renderer)
column.set_resizable(True)
column.set_cell_data_func(renderer, self.cell_data_func_us_status_description)
self.updateStatusTreeView.append_column(column)
# let us select multiple releases
self.updateStatusTreeView.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
def _setupInventoryTreeView(self):
# create model for inventory treeview
self.inventoryTreeView = self.wTree.get_widget("inventory_treeview")
self.inventoryTreeModel= gtk.TreeStore(
object, # device or package
gobject.TYPE_BOOLEAN, # for device == enable update for device (checkbox),
# for package == update to this package
gobject.TYPE_INT, # flags
gobject.TYPE_INT, # update serial
)
self.inventoryTreeView.set_model(self.inventoryTreeModel)
self.INVENTORY_COLUMN_DEVICE = 0
self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE = 1
self.INVENTORY_COLUMN_FLAGS = 2
self.INVENTORY_COLUMN_SERIAL = 3
self.FLAG_REFLASH = 1
self.FLAG_DOWNGRADE = 2
# add column headers to the inventory treeview
self.inventoryTreeView.set_headers_visible(True)
# select, status, criticality, package name, component, type (bios/firmware/driver), current ver, repo ver
# add column: Flash yes/no checkbox column
renderer=gtk.CellRendererToggle()
renderer.set_property("radio", False)
renderer.set_property('activatable', True)
renderer.connect('toggled', self.toggle_device_cb, self.inventoryTreeModel)
column=gtk.TreeViewColumn(_("Flash"),renderer)
column.add_attribute(renderer, "active", self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE)
column.set_cell_data_func(renderer, self.cell_data_func_iv_toggle)
self.inventoryTreeView.append_column(column)
# add column: Display name for devices, version select for updates
renderer=gtk.CellRendererToggle()
renderer.set_property("radio", True)
renderer.set_property('activatable', True)
renderer.connect('toggled', self.toggle_update_cb, self.inventoryTreeModel)
column=gtk.TreeViewColumn(_("Device Name"),renderer)
column.add_attribute(renderer, "active", self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE)
column.set_resizable(True)
renderer=gtk.CellRendererText()
column.pack_start(renderer)
column.set_cell_data_func(renderer, self.cell_data_func_iv_display_name)
self.inventoryTreeView.append_column(column)
self.inventoryTreeView.set_expander_column(column)
# add column: Firmware version
renderer = gtk.CellRendererText()
column=gtk.TreeViewColumn(_("Current Version"),renderer)
column.set_cell_data_func(renderer, self.cell_data_func_iv_version)
column.set_resizable(True)
self.inventoryTreeView.append_column(column)
# let us select multiple releases
self.inventoryTreeView.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
# this is a helper function to initially populate the tree model.
# should only ever be called once.
def _populateInventoryTree(self):
self.inventoryTreeModel.clear()
for device in self.updateSet.iterDevices():
guihelpers.gtkYield()
if device.version == "unknown" and not self.showUnknown:
continue
flags = 0
toggle=False
if self.updateSet.getUpdatePackageForDevice(device) is not None:
toggle=True
iter = self.inventoryTreeModel.append(None, [device, toggle, flags, 0])
for availPkg in self.updateSet.iterAvailableUpdates(device):
guihelpers.gtkYield()
flags = 0
if device.compareVersion(availPkg) == 0:
flags = flags | self.FLAG_REFLASH
if device.compareVersion(availPkg) > 0:
flags = flags | self.FLAG_DOWNGRADE
toggle=False
if self.updateSet.getUpdatePackageForDevice(device) == availPkg:
toggle=True
self.inventoryTreeModel.append(iter, [availPkg, toggle, flags, 0])
# this is a helper function to initially populate the tree model.
# should only ever be called once.
def _populateBootstrapTree(self, base):
self.bootstrapTreeModel.clear()
venId, sysId = base.getSystemId()
for dev in base.yieldInventory():
guihelpers.gtkYield()
self.bootstrapTreeModel.append(None, [dev.name, dev.displayname, dev.version])
if venId and sysId:
self.bootstrapTreeModel.append(None, ["%s/system(ven_0x%04x_dev_0x%04x)" % (dev.name, venId, sysId), dev.displayname, dev.version])
def _populateUpdateStatusTree(self):
self.updateStatusTreeModel.clear()
for device, package in self.updateSet.generateInstallationOrder(returnDeviceToo=1):
self.updateStatusTreeModel.append(None, [device, package, 0])
# refresh the display when something happens behind the scenes. Should be rarely used.
def _refresh(self):
self._refreshUpdateEnableToggles()
self._refreshAllowToggles()
self._refreshEnableUndoRedo()
# only refreshes the toggles and radio buttons to reflect current package set.
def _refreshUpdateEnableToggles(self):
for i in range(self.inventoryTreeModel.iter_n_children(None)):
device_path = self.inventoryTreeModel.get_path(self.inventoryTreeModel.iter_nth_child(None, i))
device = self.inventoryTreeModel[device_path][self.INVENTORY_COLUMN_DEVICE]
if self.updateSet.getUpdatePackageForDevice(device) is not None:
self.inventoryTreeModel[device_path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE]=True
else:
self.inventoryTreeModel[device_path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE]=False
self._fixupChildren(self.inventoryTreeModel, self.inventoryTreeModel.get_iter(device_path))
# refreshes allow reflash/downgrade toggles
def _refreshAllowToggles(self):
self.recursiveCallback=1
self.menuAllowDowngrade.set_property("active", self.updateSet.allowDowngrade)
self.menuAllowReflash.set_property("active", self.updateSet.allowReflash)
self.menuShowUnknown.set_property("active", self.showUnknown)
self.toolbarAllowDowngrade.set_active(self.updateSet.allowDowngrade)
self.toolbarAllowReflash.set_active(self.updateSet.allowReflash)
self.toolbarShowUnknown.set_active(self.showUnknown)
if self.numDowngradeSelected:
self.menuAllowDowngrade.set_sensitive(0)
self.toolbarAllowDowngrade.set_sensitive(0)
else:
self.menuAllowDowngrade.set_sensitive(1)
self.toolbarAllowDowngrade.set_sensitive(1)
if self.numReflashSelected:
self.menuAllowReflash.set_sensitive(0)
self.toolbarAllowReflash.set_sensitive(0)
else:
self.menuAllowReflash.set_sensitive(1)
self.toolbarAllowReflash.set_sensitive(1)
self.recursiveCallback=0
# enables/disables the undo/redo/reset buttons so only valid actions are enabled
def _refreshEnableUndoRedo(self):
if self.undoStack:
self.wTree.get_widget("reset_button").set_sensitive(1)
self.wTree.get_widget("reset_menu").set_sensitive(1)
self.wTree.get_widget("undo_button").set_sensitive(1)
self.wTree.get_widget("undo_menu").set_sensitive(1)
else:
self.wTree.get_widget("reset_button").set_sensitive(0)
self.wTree.get_widget("reset_menu").set_sensitive(0)
self.wTree.get_widget("undo_button").set_sensitive(0)
self.wTree.get_widget("undo_menu").set_sensitive(0)
if self.redoStack:
self.wTree.get_widget("redo_button").set_sensitive(1)
self.wTree.get_widget("redo_menu").set_sensitive(1)
else:
self.wTree.get_widget("redo_button").set_sensitive(0)
self.wTree.get_widget("redo_menu").set_sensitive(0)
def cell_data_func_iv_display_name(self, column, cell, model, iter):
pyobj = model.get_value(iter,self.INVENTORY_COLUMN_DEVICE)
renderers = column.get_cell_renderers()
text = str(pyobj)
if isinstance(pyobj, firmwaretools.package.Device):
renderers[0].set_property("visible", False)
renderers[1].set_property("text", text)
else:
flags = model.get_value(iter,self.INVENTORY_COLUMN_FLAGS)
renderers[0].set_property("visible", True)
parentIter = model.iter_parent(iter)
device = model.get_value(parentIter,self.INVENTORY_COLUMN_DEVICE)
text = str(pyobj.version)
renderers[1].set_property("text", _("Available Version: %s") % text)
renderers[0].set_property('activatable', True)
if not model.get_value(parentIter, self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE):
renderers[0].set_property('activatable', False)
if self.updateSet.getSuggestedUpdatePackageForDevice(device) == pyobj:
# TODO: picture?
renderers[1].set_property("text", _("Available Version: %s (suggested)") % text)
if flags & self.FLAG_REFLASH:
# TODO: picture?
renderers[1].set_property("text", _("Available Version: %s (reflash)") % text)
if not self.updateSet.allowReflash:
renderers[0].set_property('activatable', False)
renderers[1].set_property("text", _("Available Version: %s (reflash disabled per policy)") % text)
if not pyobj.getCapability("can_reflash"):
renderers[0].set_property('activatable', False)
renderers[1].set_property("text", _("Available Version: %s (reflash disabled due to package limitations)") % text)
if flags & self.FLAG_DOWNGRADE:
# TODO: picture?
renderers[1].set_property("text", _("Available Version: %s (downgrade)") % text)
if not self.updateSet.allowDowngrade:
renderers[0].set_property('activatable', False)
renderers[1].set_property("text", _("Available Version: %s (downgrade disabled per policy)") % text)
if not pyobj.getCapability("can_downgrade"):
renderers[0].set_property('activatable', False)
renderers[1].set_property("text", _("Available Version: %s (downgrade disabled due to package limitations)") % text)
def cell_data_func_iv_version(self, column, cell, model, iter):
pyobj = model.get_value(iter,self.INVENTORY_COLUMN_DEVICE)
if isinstance(pyobj, firmwaretools.package.Device):
cell.set_property("visible", True)
cell.set_property("text", pyobj.version)
else:
cell.set_property("visible", False)
def cell_data_func_iv_toggle(self, column, cell, model, iter):
pyobj = model.get_value(iter,self.INVENTORY_COLUMN_DEVICE)
if isinstance(pyobj, firmwaretools.package.Device):
cell.set_property("visible", True)
else:
cell.set_property("visible", False)
def toggle_device_cb(self, renderer, path, model, *args, **kargs):
model[path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE] = not model[path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE]
device = model[path][self.INVENTORY_COLUMN_DEVICE]
if model[path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE]:
# unpin it and calculated pkg takes over
self._executeCommand(UpdateSetPinCommand(self.updateSet, self.updateSet.unPinDevice, (device,), {}))
else:
# pin it to None to disable update for this device
self._executeCommand(UpdateSetPinCommand(self.updateSet, self.updateSet.pinUpdatePackage, (device, None), {}))
self._fixupChildren(model, model.get_iter(path))
self._refreshAllowToggles()
def toggle_update_cb(self, renderer, path, model, *args, **kargs):
# dont re-activate if it is already the active update
if not model[path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE]:
iter = model.get_iter(path)
parentIter = model.iter_parent(iter)
device = model.get_value(parentIter, self.INVENTORY_COLUMN_DEVICE)
update = model[path][self.INVENTORY_COLUMN_DEVICE]
self._executeCommand(UpdateSetPinCommand(self.updateSet, self.updateSet.pinUpdatePackage, (device, update), {}))
self._fixupChildren(model, parentIter)
self._refreshAllowToggles()
# this method sets the enable toggle on packages appropriately
# it also interlocks the allow reflash/downgrade toggles
def _fixupChildren(self, model, iter):
for i in range(model.iter_n_children(iter)):
child_path = model.get_path(model.iter_nth_child(iter, i))
curValue = model[child_path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE]
if model[child_path][self.INVENTORY_COLUMN_DEVICE] == self.updateSet.getUpdatePackageForDevice(model.get_value(iter,self.INVENTORY_COLUMN_DEVICE)):
if curValue == False and model[child_path][self.INVENTORY_COLUMN_FLAGS] & self.FLAG_DOWNGRADE:
self.numDowngradeSelected = self.numDowngradeSelected + 1
if curValue == False and model[child_path][self.INVENTORY_COLUMN_FLAGS] & self.FLAG_REFLASH:
self.numReflashSelected = self.numReflashSelected + 1
model[child_path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE] = True
else:
if curValue == True and model[child_path][self.INVENTORY_COLUMN_FLAGS] & self.FLAG_DOWNGRADE:
self.numDowngradeSelected = self.numDowngradeSelected - 1
if curValue == True and model[child_path][self.INVENTORY_COLUMN_FLAGS] & self.FLAG_REFLASH:
self.numReflashSelected = self.numReflashSelected - 1
model[child_path][self.INVENTORY_COLUMN_DEVICE_ENABLE_UPDATE] = False
# force update serial so rows get redisplayed
self.inventoryTreeModel[child_path][self.INVENTORY_COLUMN_SERIAL]= (
self.inventoryTreeModel[child_path][self.INVENTORY_COLUMN_SERIAL] + 1)
def on_allow_downgrade_toggled(self, widget, *args, **kargs):
# guard against executeCommand being called while we are in a refresh
if not self.recursiveCallback:
if self.numDowngradeSelected > 0:
active = 1
else:
active = widget.get_active()
self._executeCommand(Command(self.updateSet, self.updateSet.setAllowDowngrade, (active,), {}))
self._refreshAllowToggles()
# force update serial so rows get redisplayed
for i in range(self.inventoryTreeModel.iter_n_children(None)):
device_path = self.inventoryTreeModel.get_path(self.inventoryTreeModel.iter_nth_child(None, i))
self._fixupChildren(self.inventoryTreeModel, self.inventoryTreeModel.get_iter(device_path))
def on_show_unknown_toggled(self, widget, *args, **kargs):
if not self.recursiveCallback:
self.showUnknown = widget.get_active()
self._refreshAllowToggles()
self._populateInventoryTree()
def on_allow_reflash_toggled(self, widget, *args, **kargs):
# guard against executeCommand being called while we are in a refresh
if not self.recursiveCallback:
if self.numReflashSelected > 0:
active = 1
else:
active = widget.get_active()
self._executeCommand(Command(self.updateSet, self.updateSet.setAllowReflash, (active,), {}))
self._refreshAllowToggles()
# force update serial so rows get redisplayed
for i in range(self.inventoryTreeModel.iter_n_children(None)):
device_path = self.inventoryTreeModel.get_path(self.inventoryTreeModel.iter_nth_child(None, i))
self._fixupChildren(self.inventoryTreeModel, self.inventoryTreeModel.get_iter(device_path))
def on_help_about(self, *args):
wTree = gtk.glade.XML(PKGGLADEDIR + self.GLADE_FILE, "about_dialog")
wTree.get_widget("about_dialog").set_property('name',PROGRAM_NAME)
wTree.get_widget("about_dialog").set_property('version',__VERSION__)
wTree.get_widget("about_dialog").run() # modal until 'close'
wTree.get_widget("about_dialog").destroy()
def _executeCommand(self, command):
command.execute()
self.undoStack.append(command)
self.redoStack = []
self._refreshEnableUndoRedo()
if len(self.undoStack) > 20:
self.undoStack = self.undoStack[-20:]
def on_undo_activate(self, *args, **kargs):
if self.undoStack:
command = self.undoStack.pop()
command.undo()
self.redoStack.append(command)
self._refresh()
def on_redo_activate(self, *args, **kargs):
if self.redoStack:
command = self.redoStack.pop()
command.execute()
self.undoStack.append(command)
self._refresh()
def on_reset_activate(self, *args, **kargs):
self.updateSet.reset()
self.undoStack = []
self.redoStack = []
self.updateSet.setAllowReflash(0)
self.updateSet.setAllowDowngrade(0)
self._refresh()
def on_system_inventory_menu_activate(self, *args, **kargs):
notebook = self.wTree.get_widget("notebook")
widget = self.wTree.get_widget("inventory_vbox")
page = notebook.page_num(widget)
notebook.set_current_page(page)
def on_bootstrap_inventory_menu_activate(self, *args, **kargs):
notebook = self.wTree.get_widget("notebook")
widget = self.wTree.get_widget("bootstrap_scrolledwindow")
page = notebook.page_num(widget)
notebook.set_current_page(page)
def on_file_quit(self, *args):
self.on_quit_app( allowCancel = 1 )
def on_quit_app(self, *args, **kargs):
# check kargs['allowCancel']
gtk.main_quit()
def on_update_now_activate(self, *args, **kargs):
detailsStr = cStringIO.StringIO()
detailsStr.write( _("Going to update the following devices:\n\n") )
for device in self.updateSet.iterDevices():
pkg = self.updateSet.getUpdatePackageForDevice(device)
if pkg is not None:
detailsStr.write("\t%s\n" % str(device))
detailsStr.write(_("\t\tFrom Version: %s\n") % device.version)
detailsStr.write(_("\t\tTo Version : %s\n") % pkg.version)
detailsStr.write("\n")
dialog = gtk.MessageDialog(parent=None,
flags=0,
type=gtk.MESSAGE_WARNING,
buttons=gtk.BUTTONS_NONE)
dialog.set_title(_("Update Firmware"))
dialog.set_markup(_( "Your system will now be updated.\n\nYou will not be able to come back to this page after you continue.\nPress the 'Show Details' button to see which devices are set to be updated."))
showButton = dialog.add_button(_("Show Details..."), 1)
dialog.add_button(_("Continue to update page..."), 2)
dialog.add_button(_("Cancel Update"), 3)
# Details
textview = gtk.TextView()
textview.set_editable(False)
textview.modify_font(pango.FontDescription("Monospace"))
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add(textview)
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
frame.add(sw)
frame.set_border_width(6)
dialog.vbox.add(frame)
textbuffer = textview.get_buffer()
textbuffer.set_text(detailsStr.getvalue())
textview.set_size_request(gtk.gdk.screen_width()/2, gtk.gdk.screen_height()/3)
dialog.details = frame
dialog.set_position(gtk.WIN_POS_CENTER)
dialog.set_gravity(gtk.gdk.GRAVITY_CENTER)
show = False
while 1:
resp = dialog.run()
if resp == 1:
show = not show
if show:
dialog.details.show_all()
showButton.set_label(_("Hide Details"))
else:
dialog.details.hide_all()
showButton.set_label(_("Show Details..."))
elif resp == 2:
dialog.destroy()
self._gotoUpdatePage()
break
else:
dialog.destroy()
break
def _gotoUpdatePage(self):
self._populateUpdateStatusTree()
notebook = self.wTree.get_widget("notebook")
widget = self.wTree.get_widget("update_status_scrolledwindow")
page = notebook.page_num(widget)
notebook.set_current_page(page)
# disable view menu
view_menu = self.wTree.get_widget("view_menu")
view_menu.set_sensitive(0)
self.menuAllowDowngrade.set_sensitive(0)
self.menuAllowReflash.set_sensitive(0)
self.wTree.get_widget("update_now_menu").set_sensitive(0)
self.redoStack = []
self.undoStack = []
self._refreshEnableUndoRedo()
ctx = self.statusBar.get_context_id("update page")
self.statusBar.push(ctx, "Click 'Update Now' to begin update...")
def cell_data_func_us_status(self, column, cell, model, iter):
pkg = model.get_value(iter,self.STATUS_COLUMN_PACKAGE)
if pkg.getCapability('accurate_update_percentage'):
cell.set_property("text", "%s%%" % (pkg.getProgress()*100))
else:
if pkg.status == "installing":
cell.set_property("text", firmwaretools.pycompat.spinner())
else:
cell.set_property("text", "")
def cell_data_func_us_status_description(self, column, cell, model, iter):
pkg = model.get_value(iter,self.STATUS_COLUMN_PACKAGE)
cell.set_property("text", pkg.getStatusStr())
def cell_data_func_us_component(self, column, cell, model, iter):
device = model.get_value(iter,self.STATUS_COLUMN_DEVICE)
cell.set_property("text", str(device))
def cell_data_func_us_cur_version(self, column, cell, model, iter):
device = model.get_value(iter,self.STATUS_COLUMN_DEVICE)
cell.set_property("text", device.version)
def cell_data_func_us_update_version(self, column, cell, model, iter):
pkg = model.get_value(iter,self.STATUS_COLUMN_PACKAGE)
cell.set_property("text", pkg.version)
def on_really_update_now_button_clicked(self, *args, **kargs):
# disable update button...
ctx = self.statusBar.get_context_id("update page")
self.statusBar.pop(ctx)
self.statusBar.push(ctx, "Performing updates now...")
self.wTree.get_widget("really_update_now_button").set_sensitive(0)
success = 1
for pkg in self.updateSet.generateInstallationOrder():
stop = self.wTree.get_widget("stop_on_errors").get_active()
try:
ret = guihelpers.runLongProcessGtk(pkg.install, waitLoopFunction=self._refreshUpdateStatus)
except (firmwaretools.package.NoInstaller,), e:
print "package %s - %s does not have an installer available." % (pkg.name, pkg.version)
print "skipping this package for now."
except (Exception,), e:
success = 0
print "Installation failed for package: %s - %s" % (pkg.name, pkg.version)
print "The error message from the low-level command was:"
print e
if stop:
print
print "stop on errors selected. error detected, so I'm stopping."
break
self._refreshUpdateStatus()
self.statusBar.pop(ctx)
if success:
self.statusBar.push(ctx, "All updates successfully completed.")
else:
self.statusBar.push(ctx, "Some updates failed.")
def _refreshUpdateStatus(self):
# update serial # to force GUI to refresh
model = self.updateStatusTreeModel
for i in range(model.iter_n_children(None)):
path = model.get_path(model.iter_nth_child(None, i))
model[path][self.STATUS_COLUMN_SERIAL] = model[path][self.STATUS_COLUMN_SERIAL] + 1
def main():
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error, e:
# default to C locale if we get a failure.
print >> sys.stderr, 'Failed to set locale, defaulting to C'
locale.setlocale(locale.LC_ALL, 'C')
try:
import cli
import firmwaretools.plugins as plugins
base = firmwaretools.FtBase()
parser = cli.FtOptionParser(usage=__doc__, version=__VERSION__)
opts = parser.firstParse(sys.argv[1:])
configFiles = opts.configFiles
base.loggingConfig = configFiles[0]
opts,args = parser.parse_args(sys.argv[1:])
pluginTypes = [plugins.TYPE_CORE, ]
if not opts.fake_mode:
pluginTypes.extend([plugins.TYPE_INVENTORY,])
else:
pluginTypes.extend([plugins.TYPE_MOCK_CORE, plugins.TYPE_MOCK_INVENTORY])
base.opts = opts
base.verbosity = opts.verbosity
base.trace = opts.trace
base._getConfig(configFiles, pluginTypes, parser, opts.disabledPlugins)
#gnome.init(PROGRAM_NAME, version)
test = InventoryFirmware(base)
gtk.main()
except:
traceback.print_exc()
sys.exit(2)
sys.exit(0)
def _info(type, value, tb):
# exception dialog code from: Gustavo J A M Carneiro
# http://www.daa.com.au/pipermail/pygtk/attachments/20030828/2d304204/gtkexcepthook.py
# license: "The license is whatever you want."
# http://www.daa.com.au/pipermail/pygtk/2003-August/005777.html
# Bugfixes by Michael Brown
dialog = gtk.MessageDialog(parent=None,
flags=0,
type=gtk.MESSAGE_WARNING,
buttons=gtk.BUTTONS_NONE,
message_format=_(
"A programming error has been detected during the execution of this program."
"\n\nIt probably isn't fatal, but should be reported to the developers nonetheless."))
dialog.set_title(_("Bug Detected"))
dialog.set_property("has-separator", False)
dialog.vbox.get_children()[0].get_children()[1].set_property("use-markup", True)
dialog.add_button(_("Show Details"), 1)
dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
# Details
textview = gtk.TextView()
textview.set_editable(False)
textview.modify_font(pango.FontDescription("Monospace"))
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add(textview)
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
frame.add(sw)
frame.set_border_width(6)
dialog.vbox.add(frame)
textbuffer = textview.get_buffer()
trace = cStringIO.StringIO()
traceback.print_exception(type, value, tb, None, trace)
textbuffer.set_text(trace.getvalue())
textview.set_size_request(gtk.gdk.screen_width()/2, gtk.gdk.screen_height()/3)
dialog.details = frame
dialog.set_position(gtk.WIN_POS_CENTER)
dialog.set_gravity(gtk.gdk.GRAVITY_CENTER)
while 1:
resp = dialog.run()
if resp == 1:
dialog.details.show_all()
dialog.action_area.get_children()[1].set_sensitive(0)
continue
else:
dialog.destroy()
break
if __name__ == "__main__":
sys.excepthook = _info
main()
firmware-tools-2.1.14/firmwaretools/ 0000777 0017654 0017654 00000000000 11452664762 024027 5 ustar 00michael_e_brown michael_e_brown 0000000 0000000 firmware-tools-2.1.14/firmwaretools/generated/ 0000777 0017654 0017654 00000000000 11452664762 025765 5 ustar 00michael_e_brown michael_e_brown 0000000 0000000 firmware-tools-2.1.14/firmwaretools/generated/__init__.py 0000664 0017654 0017654 00000022633 11452664732 030077 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Firmware-tools: update infrastructure for firmware
"""
import ConfigParser
import fcntl
import glob
import logging
import logging.config
import os
import sys
from trace_decorator import decorate, traceLog, getLog
import errors
import repository
#import config
import plugins
def mkselfrelpath(*args):
return os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), *args))
# these are replaced by autotools when installed.
__VERSION__="2.1.14"
SYSCONFDIR="/usr/local/etc"
PYTHONDIR="/usr/local/lib/python2.4/site-packages"
PKGPYTHONDIR="/usr/local/lib/python2.4/site-packages/firmwaretools"
PKGDATADIR="/usr/local/share/firmware-tools"
DATADIR="/usr/local/share"
PKGCONFDIR="/usr/local/etc/firmware/"
LOCALSTATEDIR=mkselfrelpath("..", "var")
# end build system subs
PID_FILE = '/var/run/ft.pid'
class confObj(object):
def __getattribute__(self, name):
return object.__getattribute__(self, name.lower())
def __setattr__(self, name, value):
object.__setattr__(self, name.lower(), value)
decorate(traceLog())
def callCB(cb, *args, **kargs):
if cb is None: return
try:
return cb(*args, **kargs)
except TypeError:
pass
class Callback(object):
def __init__(self):
pass
def __call__(self, *args, **kargs):
func = getattr(self, kargs.get("what", "UNKNOWN"), None)
if func is not None:
return func(*args, **kargs)
class FtBase(object):
"""This is a primary structure and base class. It houses the objects and
methods needed to perform most things . It is almost an abstract
class in that you will need to add your own class above it for most
real use."""
def __init__(self):
self.logger = getLog()
self.verbose_logger = getLog(prefix="verbose.")
self.cmdargs = []
self.cb = None
self._conf = None
self._repo = None
self._systemInventory = None
self._vendorId = None
self._systemId = None
self.verbosity = 0
self.trace = 0
self.loggingConfig = os.path.join(PKGCONFDIR, "firmware.conf")
# Start with plugins disabled
self.disablePlugins()
def _getConfig(self, cfgFiles=None, pluginTypes=(plugins.TYPE_CORE, plugins.TYPE_INVENTORY,), optparser=None, disabledPlugins=None):
if self._conf is not None:
return self._conf
if cfgFiles is None:
cfgFiles = [os.path.join(PKGCONFDIR, "firmware.conf"),]
if disabledPlugins is None:
disabledPlugins = []
self.conf = confObj()
self.setupLogging(self.loggingConfig, self.verbosity, self.trace)
self.setConfFromIni(cfgFiles)
self.conf.uid = os.geteuid()
self.doPluginSetup(optparser, pluginTypes, disabledPlugins)
return self._conf
def setupLogging(self, configFile, verbosity=1, trace=0):
# set up logging
logging.config.fileConfig(configFile)
root_log = logging.getLogger()
ft_log = logging.getLogger("firmwaretools")
ft_verbose_log = logging.getLogger("verbose")
ft_trace_log = logging.getLogger("trace")
ft_log.propagate = 0
ft_trace_log.propagate = 0
ft_verbose_log.propagate = 0
if verbosity >= 1:
ft_log.propagate = 1
if verbosity >= 2:
ft_verbose_log.propagate = 1
if verbosity >= 3:
for hdlr in root_log.handlers:
hdlr.setLevel(logging.DEBUG)
if trace:
ft_trace_log.propagate = 1
decorate(traceLog())
def setConfFromIni(self, cfgFiles):
defaults = {
"sysconfdir": SYSCONFDIR,
"pythondir": PYTHONDIR,
"datadir": DATADIR,
"pkgpythondir": PKGPYTHONDIR,
"pkgdatadir": PKGDATADIR,
"pkgconfdir": PKGCONFDIR,
"localstatedir": LOCALSTATEDIR,
}
self._ini = ConfigParser.SafeConfigParser(defaults)
for i in cfgFiles:
self._ini.read(i)
mapping = {
# conf.WHAT : (iniSection, iniOption, default)
"storageTopdir": ('main', 'storage_topdir', "%s/firmware" % DATADIR),
"pluginSearchPath": ('main', 'plugin_search_path', os.path.join(PKGDATADIR, "plugins")),
"pluginConfDir": ('main', 'plugin_config_dir', os.path.join(PKGCONFDIR, "firmware.d")),
"rpmMode": ('main', 'rpm_mode', "manual"),
}
for key, val in mapping.items():
if self._ini.has_option( val[0], val[1] ):
setattr(self.conf, key, self._ini.get(val[0], val[1]))
else:
setattr(self.conf, key, val[2])
# read plugin configs
for i in glob.glob( "%s/*.conf" % self.conf.pluginConfDir ):
self._ini.read(i)
decorate(traceLog())
def listPluginsFromIni(self):
return [x[len("plugin:"):] for x in self._ini.sections() if x.startswith("plugin:")]
decorate(traceLog())
def getPluginConfFromIni(self, plugin):
section = "plugin:%s" % plugin
conf = confObj()
conf.module = None
conf.enabled = False
conf.search = None
for i in self._ini.options(section):
setattr(conf, i, self._ini.get(section, i))
#required ("enabled", "module"):
if getattr(conf, "module", None) is None:
conf.enabled = False
return conf
# called early so no tracing.
def disablePlugins(self):
'''Disable plugins
'''
self.plugins = plugins.DummyPlugins()
decorate(traceLog())
def doPluginSetup(self, optparser=None, pluginTypes=None, disabledPlugins=None):
if isinstance(self.plugins, plugins.Plugins):
raise RuntimeError("plugins already initialised")
self.plugins = plugins.Plugins(self, optparser, pluginTypes, disabledPlugins)
decorate(traceLog())
def _getRepo(self):
if self._repo is not None:
return self._repo
self._repo = repository.Repository( self.conf.storageTopdir )
return self._repo
decorate(traceLog())
def _getInventory(self):
if self._systemInventory is not None:
return self._systemInventory
self._systemInventory = repository.SystemInventory()
self.plugins.run("preinventory", inventory=self._systemInventory)
self.plugins.run("inventory", inventory=self._systemInventory)
self.plugins.run("postinventory", inventory=self._systemInventory)
return self._systemInventory
decorate(traceLog())
def calculateUpgradeList(self, cb=None):
saveCb = self.cb
self.cb = cb
try:
for candidate in self.repo.iterPackages(cb=cb):
self.systemInventory.addAvailablePackage(candidate)
self.systemInventory.calculateUpgradeList(cb)
finally:
self.cb = saveCb
return self.systemInventory
# properties so they auto-create themselves with defaults
repo = property(fget=lambda self: self._getRepo(),
fset=lambda self, value: setattr(self, "_repo", value))
conf = property(fget=lambda self: self._getConfig(),
fset=lambda self, value: setattr(self, "_conf", value),
fdel=lambda self: setattr(self, "_conf", None))
systemInventory = property(
fget=lambda self: self._getInventory(),
fset=lambda self, value: setattr(self, "_systemInventory", value),
fdel=lambda self: setattr(self, "_systemInventory", None))
decorate(traceLog())
def lock(self):
if self.conf.uid == 0:
self.runLock = open(PID_FILE, "a+")
try:
fcntl.lockf(self.runLock.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
raise errors.LockError, "unable to obtain exclusive lock."
decorate(traceLog())
def unlock(self):
if self.conf.uid == 0:
fcntl.lockf(self.runLock.fileno(), fcntl.LOCK_UN)
os.unlink(PID_FILE)
decorate(traceLog())
def setSystemId(self, vendorId, systemId):
if not (vendorId and systemId):
raise RuntimeError("Need non-null, non-zero, id for vendor and system id.")
self._vendorId = vendorId
self._systemId = systemId
decorate(traceLog())
def getSystemId(self):
return (self._vendorId, self._systemId)
decorate(traceLog())
def yieldInventory(self, cb=None):
saveCb = self.cb
try:
self.cb = cb
for dev in self.systemInventory.iterDevices():
yield dev
except:
self.cb = saveCb
raise
firmware-tools-2.1.14/firmwaretools/bootstrap_pci.py 0000664 0017654 0017654 00000007035 11310074510 027227 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""module
some docs here eventually.
"""
from __future__ import generators
# import arranged alphabetically
import os
try:
import subprocess
except ImportError:
import compat_subprocess as subprocess
# my stuff
import firmwaretools.package as package
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import firmwaretools.plugins as plugins
plugin_type = (plugins.TYPE_INVENTORY,)
requires_api_version = "2.0"
# ======
# public API
# ======
sysfs_pcidevdir="/sys/bus/pci/devices"
decorate(traceLog())
def inventory_hook(conduit, inventory=None, *args, **kargs):
base = conduit.getBase()
cb = base.cb
devdir = sysfs_pcidevdir
for d in os.listdir(devdir):
d = makePciDevice(os.path.join(devdir, d))
if inventory.getDevice(d.uniqueInstance) is None:
inventory.addDevice(d)
decorate(traceLog())
def getFile(f):
fd = open(f,"r")
ret = fd.read()
fd.close()
if ret[-1:] == '\n': ret = ret[:-1]
return ret
decorate(traceLog())
def chomp(s):
if s.endswith("\n"):
return s[:-1]
return s
LSPCI = None
for i in ("/sbin/lspci", "/usr/bin/lspci"):
if os.path.exists(i):
LSPCI=i
break
decorate(traceLog())
def makePciDevice(devDir):
kargs = {}
kargs["pciVendor"] = int(getFile(os.path.join(devDir, "vendor")),16)
kargs["pciDevice"] = int(getFile(os.path.join(devDir, "device")),16)
kargs["pciSubVendor"] = int(getFile(os.path.join(devDir, "subsystem_vendor")),16)
kargs["pciSubDevice"] = int(getFile(os.path.join(devDir, "subsystem_device")),16)
kargs["pciClass"] = int(getFile(os.path.join(devDir, "class")),16)
shortname = None
name = "pci_firmware(ven_0x%04x_dev_0x%04x" % (kargs["pciVendor"], kargs["pciDevice"])
if kargs["pciSubVendor"] and kargs["pciSubDevice"]:
shortname = name + ")"
name = name + "_subven_0x%04x_subdev_0x%04x" % (kargs["pciSubVendor"], kargs["pciSubDevice"])
name = name + ")"
dirname = os.path.basename(devDir)
dets = dirname.split(":")
kargs["pciBDF_Domain"] = int(dets[0],16)
kargs["pciBDF_Bus"] = int(dets[1],16)
kargs["pciBDF_Device"] = int(dets[2].split(".")[0],16)
kargs["pciBDF_Function"] = int(dets[2].split(".")[1],16)
kargs["pciDbdf"] = (kargs["pciBDF_Domain"], kargs["pciBDF_Bus"], kargs["pciBDF_Device"], kargs["pciBDF_Function"])
null = open("/dev/null", "w")
p = subprocess.Popen([LSPCI, "-s", "%02x:%02x:%02x.%x" % kargs["pciDbdf"]], stdout=subprocess.PIPE, stderr=null, stdin=null)
lspciname = chomp(p.communicate()[0])
null.close()
if lspciname is not None and len(lspciname) > 0:
displayname = lspciname
else:
displayname = "unknown device"
if shortname is not None:
return package.PciDevice(
name=name,
shortname=shortname,
version='unknown',
displayname=displayname,
lspciname=lspciname,
**kargs
)
else:
return package.PciDevice(
name=name,
version='unknown',
displayname=displayname,
lspciname=lspciname,
**kargs
)
if __name__ == "__main__":
for p in getPciDevs():
print "%s" % p.name
firmware-tools-2.1.14/firmwaretools/dep_parser.py 0000775 0017654 0017654 00000004760 10756403330 026521 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""
repository module
"""
from __future__ import generators
import sys
import package
from firmwaretools.trace_decorator import decorate, traceLog, getLog
class DepParser(object):
tokens = ( 'ID', 'LT', 'LE', 'EQ', 'GE', 'GT', 'COMMA' )
t_ID = r'[\w()]+'
t_LT = r'<'
t_LE = r'<='
t_EQ = r'=='
t_GE = r'>='
t_GT = r'>'
t_COMMA = r','
t_ignore = " \t"
def t_error(self, t):
print "Illegal character '%s'" % t.value[0]
t.skip(1)
decorate(traceLog())
def __init__(self, string, inventory, fullInventory, *args, **kargs):
self.inventory = inventory
self.fullInventory = fullInventory
self.depPass = 1
import ply_lex
lexer = ply_lex.lex( module=self )
import ply_yacc
parser = ply_yacc.yacc( module=self, write_tables=0, debug=0 )
parser.parse(string, lexer=lexer, debug=0)
precedence = (
('left', 'COMMA'),
)
def p_error(self, t):
print "Syntax error at '%s'" % t
def p_stmt(self, t):
# statement_list can be 1) empty, 2) single statement, or 3) list
"""statement_list :
| statement
| statement_list COMMA statement
statement : dep"""
pass
def p_package_depencency(self, t):
"""dep : ID LT ID
| ID LE ID
| ID EQ ID
| ID GE ID
| ID GT ID
"""
op = t[2]
reqPkg = package.Package (name=t[1], version=t[3], displayname="virtual package")
pkg = self.inventory.get(t[1])
if pkg:
r = pkg.compareVersion(reqPkg)
evalStr = "%s %s 0" % (r, op)
if not eval(evalStr):
self.reason = "Failed for rule: requires %s %s %s" % (t[1], t[2], t[3])
self.depPass = 0
else:
self.reason = "Repository package doesn't exist in system inventory."
self.depPass = 0
def p_package_exists(self, t):
"""dep : ID"""
if not self.inventory.get(t[1]):
self.reason = "Failed for rule: requires %s" % t[1]
self.depPass = 0
firmware-tools-2.1.14/firmwaretools/errors.py 0000664 0017654 0017654 00000000314 10756403330 025675 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
import exceptions
class BaseError(exceptions.Exception): pass
class ConfigError(BaseError): pass
class LockError(BaseError): pass
firmware-tools-2.1.14/firmwaretools/i18n.py 0000664 0017654 0017654 00000001173 11121512226 025134 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
"""i18n abstraction
License: GPL
Author: Vladimir Bormotov
$Id$
"""
# $RCSfile$
__version__ = "$Revision$"[11:-2]
__date__ = "$Date$"[7:-2]
try:
import gettext
import sys
if sys.version_info[0] == 2:
t = gettext.translation('yum')
_ = t.ugettext
else:
gettext.bindtextdomain('yum', '/usr/share/locale')
gettext.textdomain('yum')
_ = gettext.gettext
except:
def _(str):
"""pass given string as-is"""
return str
if __name__ == '__main__':
pass
# vim: set ts=4 et :
firmware-tools-2.1.14/firmwaretools/mockpackage.py 0000775 0017654 0017654 00000012003 10765775302 026642 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""module
some docs here eventually.
"""
from __future__ import generators
import os
import time
import logging
# import arranged alphabetically
import package
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import firmwaretools.plugins as plugins
plugin_type = (plugins.TYPE_MOCK_INVENTORY,)
requires_api_version = "2.0"
moduleLog = getLog()
moduleVerboseLog = getLog(prefix="verbose.")
#==============================================================
# mock classes for unit tests
# plus expected data returns
#==============================================================
decorate(traceLog())
def inventory_hook(conduit, inventory=None, *args, **kargs):
base = conduit.getBase()
cb = base.cb
import firmwaretools as ft
ft.callCB(cb, who="mock_inventory", what="running_inventory", details="fake cmd")
inventory.addDevice( package.Device(
name = "debug_system_bios",
displayname = "System BIOS for Imaginary Server 1234",
version = "A02"))
ft.callCB(cb, who="mock_inventory", what="running_inventory", details="fake cmd 2")
inventory.addDevice( package.Device(
name = "debug_system_bmc",
displayname = "Baseboard Management Controller for Imaginary Server 1234",
version = "1.0"))
ft.callCB(cb, who="mock_inventory", what="running_inventory", details="fake cmd 3")
inventory.addDevice( package.Device(
name = "debug_pci_firmware_ven_crappy_dev_slow",
displayname = "ReallyFast Network Controller",
version = "1.0"))
ft.callCB(cb, who="mock_inventory", what="running_inventory", details="fake cmd 4")
inventory.addDevice( package.Device(
name = "debug_pci_firmware_ven_0x0c64_dev_0xrocked",
displayname = "Pokey Modem -- Enhanced 1200baud",
version = "2.0"))
ft.callCB(cb, who="mock_inventory", what="running_inventory", details="fake cmd 5")
inventory.addDevice( package.Device(
name = "debug_pci_firmware_ven_corrupt_dev_yourdata",
displayname = "SafeData RAID Controller v2i",
version = "2.0"))
ft.callCB(cb, who="mock_inventory", what="running_inventory", details="fake cmd 6")
inventory.addDevice( package.Device(
name = "debug_pci_firmware_ven_violates_dev_scsistandard",
displayname = "AdapFirm SloTek AHA-1501",
version = "3.0"))
ft.callCB(cb, who="mock_inventory", what="running_inventory", details="fake cmd 7")
inventory.addDevice( package.Device(
name = "debug_pci_firmware_ven_draws_dev_polygons",
displayname = "PixelPusher 2000 Video Adapter",
version = "4.0"))
#new style -- used by unit tests.
class MockPackage2(package.RepositoryPackage):
decorate(traceLog())
def __init__(self, *args, **kargs):
super(MockPackage2, self).__init__(*args, **kargs)
decorate(traceLog())
def install(self):
self.status = "in_progress"
self.status = "success"
return "SUCCESS"
# used when we switch to 'fake' data
class MockRepositoryPackage(package.RepositoryPackage):
decorate(traceLog())
def __init__(self, *args, **kargs):
super(MockRepositoryPackage, self).__init__(*args, **kargs)
self.capabilities['can_downgrade'] = True
self.capabilities['can_reflash'] = True
self.capabilities['accurate_update_percentage'] = True
self.uniqueInstance = self.name
decorate(traceLog())
def install(self):
self.status = "in_progress"
for i in xrange(100):
self.progressPct = i/100.0
time.sleep(0.01)
#print "MockRepositoryPackage -> Install pkg(%s) version(%s)" % (str(self), self.version)
self.progressPct = 1
self.status = "success"
#==============================================================
# mock classes for unit tests
# plus expected data returns
#==============================================================
mockExpectedOutput = """debug_pci_firmware_ven_0x0c64_dev_0xrocked
debug_pci_firmware_ven_crappy_dev_slow
debug_system_bmc
debug_pci_firmware_ven_corrupt_dev_yourdata
debug_system_bios
debug_pci_firmware_ven_draws_dev_polygons
debug_pci_firmware_ven_violates_dev_scsistandard"""
# re-use mock data from low-level getSystemId mock function
mockExpectedOutput_inventory = [("mock_package(ven_0x1028_dev_0x1234)", "a05"), ]
#==============================================================
# mock classes for unit tests
# plus expected data returns
#==============================================================
# re-use mock data from low-level getSystemId mock function
mockExpectedOutput_bootstrap = """mock_package(ven_0x1028_dev_0x1234)"""
firmware-tools-2.1.14/firmwaretools/mockrepository.py 0000664 0017654 0017654 00000006470 10765775302 027476 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""
repository module
"""
from __future__ import generators
import os
import repository
import mockpackage
import sys
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import firmwaretools.plugins as plugins
plugin_type = (plugins.TYPE_MOCK_INVENTORY, )
requires_api_version = "2.0"
moduleLog = getLog()
moduleVerboseLog = getLog(prefix="verbose.")
#
# DEBUG ONLY
#
# a null function that just eats args. Default callback
def nullFunc(*args, **kargs): pass
def config_hook(conduit, *args, **kargs):
repository.Repository.iterPackages = iterPackages_DEBUG
decorate(traceLog())
def iterPackages_DEBUG(self, cb=(nullFunc, None)):
# TODO: put this in a separate function
yield mockpackage.MockRepositoryPackage(
displayname="Baseboard Management Controller for Imaginary Server 1234",
name="debug_system_bmc",
version="0.9")
yield mockpackage.MockRepositoryPackage(
displayname="ReallyFast Network Controller",
name="debug_pci_firmware_ven_crappy_dev_slow",
version="1.1")
yield mockpackage.MockRepositoryPackage(
displayname="Pokey Modem -- Enhanced 1200baud",
name="debug_pci_firmware_ven_0x0c64_dev_0xrocked",
version="1.1")
yield mockpackage.MockRepositoryPackage(
displayname="Pokey Modem -- Enhanced 1200baud",
name="debug_pci_firmware_ven_0x0c64_dev_0xrocked",
version="1.9")
yield mockpackage.MockRepositoryPackage(
displayname="SafeData RAID Controller v2i",
name="debug_pci_firmware_ven_corrupt_dev_yourdata",
version="1.1")
yield mockpackage.MockRepositoryPackage(
displayname="SafeData RAID Controller v2i",
name="debug_pci_firmware_ven_corrupt_dev_yourdata",
version="2.9")
yield mockpackage.MockRepositoryPackage(
displayname="AdapFirm SloTek AHA-1501",
name="debug_pci_firmware_ven_violates_dev_scsistandard",
version="2.1")
yield mockpackage.MockRepositoryPackage(
displayname="AdapFirm SloTek AHA-1501",
name="debug_pci_firmware_ven_violates_dev_scsistandard",
version="2.5")
yield mockpackage.MockRepositoryPackage(
displayname="AdapFirm SloTek AHA-1501",
name="debug_pci_firmware_ven_violates_dev_scsistandard",
version="3.0")
yield mockpackage.MockRepositoryPackage(
displayname="PixelPusher 2000 Video Adapter",
name="debug_pci_firmware_ven_draws_dev_polygons",
version="4.0")
yield mockpackage.MockRepositoryPackage(
displayname="PixelPusher 2000 Video Adapter",
name="debug_pci_firmware_ven_draws_dev_polygons",
version="4.1")
yield mockpackage.MockRepositoryPackage(
displayname="PixelPusher 2000 Video Adapter",
name="debug_pci_firmware_ven_draws_dev_polygons",
version="4.1.1")
yield mockpackage.MockRepositoryPackage(
displayname="PixelPusher 2000 Video Adapter",
name="debug_pci_firmware_ven_draws_dev_polygons",
version="4.1.2")
firmware-tools-2.1.14/firmwaretools/package.py 0000664 0017654 0017654 00000012534 10767377333 026002 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:tw=0:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""
package module
"""
import rpm
from gettext import gettext as _
from firmwaretools.trace_decorator import decorate, traceLog, getLog
class InternalError(Exception): pass
class InstallError(Exception): pass
class NoInstaller(Exception): pass
def defaultCompareStrategy(ver1, ver2):
return rpm.labelCompare( ("0", str(ver1), "0"), ("0", str(ver2), "0"))
packageStatusEnum = {
"unknown": _("The package status is not known."),
"not_installed": _("The device has not been updated to this version."),
"in_progress": _("The device is being updated now"),
"failed": _("Device update failed."),
"success": _("Device update was successful."),
"disabled": _("Device update is disabled for this device."),
"warm_reboot_needed": _("Update complete. You must perform a warm reboot for the update to take effect."),
}
# Package public API:
# pkg.name
# pkg.version
# str(pkg) == display name
# pkg.compareVersion(otherPkg)
class Package(object):
def __init__(self, *args, **kargs):
self.name = None
self.version = None
self.compareStrategy = defaultCompareStrategy
for key, value in kargs.items():
setattr(self, key, value)
assert(hasattr(self, "name"))
assert(hasattr(self, "version"))
assert(hasattr(self, "displayname"))
assert(len(self.name))
assert(len(self.version))
assert(len(self.displayname))
status = "unknown"
def __str__(self):
if hasattr(self, "displayname"):
return self.displayname
return self.name
def compareVersion(self, otherPackage):
return self.compareStrategy(self.version, otherPackage.version)
class RepositoryPackage(Package):
mainIni = None
def __init__(self, *args, **kargs):
self.installFunction = None
self.path = None
super(RepositoryPackage, self).__init__(*args, **kargs)
self.capabilities = {
# if package is capable of downgrading
'can_downgrade': False,
# if package is capable of reflashing same version
'can_reflash': False,
# if package has/updates .percent_done member var
# GUI can use progress bar if this is set.
# otherwise, GUI should just use a spinner or something
'accurate_update_percentage': False,
# if update has .update_status_text member var
# GUI should use for 'view log' function
'update_log_string': False,
# if update has .update_status_logfile member var
# GUI should use for 'view log' function
'update_log_filename': False,
}
self.progressPct = 0
self.status = "not_installed"
self.deviceList = []
self.currentInstallDevice = None
def getProgress(self):
# returns real number between 0-1, or -1 for "not supported"
if self.capabilities['accurate_update_percentage']:
return self.progressPct
else:
return -1
def install(self):
self.status = "in_progress"
if self.installFunction is not None:
return self.installFunction(self)
self.status = "failed"
raise NoInstaller(_("Attempt to install a package with no install function. Name: %s, Version: %s") % (self.name, self.version))
def getCapability(self, capability):
return self.capabilities.get(capability, None)
def attachToDevice(self, device):
self.deviceList.append(device)
def getDeviceList(self):
return self.deviceList
def setCurrentInstallDevice(self, device):
self.currentInstallDevice = device
def getCurrentInstallDevice(self):
return self.currentInstallDevice
def getStatusStr(self):
return packageStatusEnum.get(self.status, _("Programming error: status code not found."))
# Base class for all devices on a system
# required:
# displayname
# name
# version
# optional:
# compareStrategy
class Device(Package):
def __init__(self, *args, **kargs):
self.name = None
self.version = None
self.compareStrategy = defaultCompareStrategy
for key, value in kargs.items():
setattr(self, key, value)
if not hasattr(self, "uniqueInstance"):
self.uniqueInstance = self.name
assert(hasattr(self, "name"))
assert(hasattr(self, "version"))
assert(hasattr(self, "displayname"))
status = "unknown"
def __str__(self):
if hasattr(self, "displayname"):
return self.displayname
return self.name
def compareVersion(self, otherPackage):
return self.compareStrategy(self.version, otherPackage.version)
# required: (in addition to base class)
# pciDbdf
class PciDevice(Device):
def __init__(self, *args, **kargs):
super(Device, self).__init__(*args, **kargs)
assert(hasattr(self, "pciDbdf"))
self.uniqueInstance = "pci_dev_at_domain_0x%04x_bus_0x%02x_dev_0x%02x_func_0x%01x" % self.pciDbdf
firmware-tools-2.1.14/firmwaretools/plugins.py 0000664 0017654 0017654 00000020313 11376536320 026050 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
import atexit
import gettext
import sys
from trace_decorator import decorate, traceLog, getLog
import errors
API_VERSION = '2.0'
# plugin can raise this to disable plugin during load
class DisablePlugin(ImportError): pass
NEXT_AVAIL_TYPE_NUM = 0
def registerPluginType(name):
global NEXT_AVAIL_TYPE_NUM
globals()[name] = NEXT_AVAIL_TYPE_NUM
NEXT_AVAIL_TYPE_NUM = NEXT_AVAIL_TYPE_NUM + 1
# Plugin types
registerPluginType("TYPE_CORE")
registerPluginType("TYPE_INVENTORY")
registerPluginType("TYPE_CLI")
# testing types
registerPluginType("TYPE_MOCK_CORE")
registerPluginType("TYPE_MOCK_INVENTORY")
# all the 'normal' types
ALL_TYPES = (TYPE_CORE, TYPE_INVENTORY)
SLOT_TO_CONDUIT = {}
def registerSlotToConduit(slot, conduit):
global SLOT_TO_CONDUIT
SLOT_TO_CONDUIT[slot] = conduit
registerSlotToConduit('config', 'PluginConduit')
registerSlotToConduit('preinventory', 'PluginConduit')
registerSlotToConduit('inventory', 'PluginConduit')
registerSlotToConduit('postinventory', 'PluginConduit')
registerSlotToConduit('close', 'PluginConduit')
moduleLog = getLog()
moduleLogVerbose = getLog(prefix="verbose.")
class PluginExit(Exception):
'''Used by plugins to signal to stop
'''
def __init__(self, value="", translation_domain=""):
self.value = value
self.translation_domain = translation_domain
def __str__(self):
if self.translation_domain:
return gettext.dgettext(self.translation_domain, self.value)
else:
return self.value
class Plugins:
'''
Manager class for plugins.
'''
def __init__(self, base, optparser=None, types=None, disabled=None):
'''Initialise the instance.
'''
self.base = base
self.optparser = optparser
self.cmdline = (None, None)
self.verbose_logger = getLog(prefix="verbose.")
self.disabledPlugins = disabled
if types is None:
types = ALL_TYPES
if not isinstance(types, (list, tuple)):
types = (types,)
# TODO: load plugins here
self._plugins = {}
for i in self.base.listPluginsFromIni():
conf = self.base.getPluginConfFromIni(i)
moduleLogVerbose.info( "Checking Plugin (%s)" % i )
if conf.enabled:
self._loadModule(i, conf, types)
# Call close handlers when yum exit's
#atexit.register(self.run, 'close')
# Let plugins register custom config file options
self.run('config')
decorate(traceLog())
def _loadModule(self, pluginName, conf, types):
# load plugin
try:
savePath = sys.path
sys.path.insert(0,self.base.conf.pluginSearchPath)
if conf.search is not None:
sys.path.insert(0, conf.search)
module = __import__(conf.module, globals(), locals(), [])
sys.path = savePath
except DisablePlugin:
moduleLogVerbose.info("\tPlugin raised DisablePlugin exception. skipping.")
return
except ImportError, e:
sys.path = savePath
raise errors.ConfigError(
'Plugin "%s" cannot be loaded: %s' % (conf.module, e))
for i in conf.module.split(".")[1:]:
module = getattr(module, i)
# Check API version required by the plugin
if not hasattr(module, 'requires_api_version'):
raise errors.ConfigError(
'Plugin "%s" doesn\'t specify required API version' % conf.module
)
if not apiverok(API_VERSION, module.requires_api_version):
raise errors.ConfigError(
'Plugin "%s" requires API %s. Supported API is %s.' % (
conf.module,
module.requires_api_version,
API_VERSION,
))
# Check plugin type against filter
plugintypes = getattr(module, 'plugin_type', None)
if plugintypes is None:
raise errors.ConfigError(
'Plugin "%s" doesn\'t specify plugin type' % pluginName
)
if not isinstance(plugintypes, (list, tuple)):
plugintypes = (plugintypes,)
for plugintype in plugintypes:
if plugintype not in types:
moduleLogVerbose.info("\tPlugin %s not loaded: doesnt match load type (%s)" % (pluginName, plugintypes))
return
# Check if this plugin has been temporary disabled
if self.disabledPlugins:
if pluginName in self.disabledPlugins:
moduleLogVerbose.info("\tPlugin %s not loaded: disabled" % pluginName)
return
moduleLogVerbose.info("\tLoaded %s plugin" % pluginName)
self._plugins[pluginName] = {"conf": conf, "module": module}
decorate(traceLog())
def listLoaded(self):
return self._plugins.keys()
decorate(traceLog())
def run(self, slotname, *args, **kargs):
'''Run all plugin functions for the given slot.
'''
# Determine handler class to use
conduitcls = SLOT_TO_CONDUIT.get(slotname, None)
if conduitcls is None:
raise ValueError('unknown slot name "%s"' % slotname)
conduitcls = eval(conduitcls) # Convert name to class object
for pluginName, dets in self._plugins.items():
module = dets['module']
conf = dets['conf']
hook = "%s_hook" % slotname
if hasattr(module, hook):
getattr(module, hook)(conduitcls(self, self.base, conf), *args, **kargs)
class DummyPlugins:
'''
This class provides basic emulation of the YumPlugins class. It exists so
that calls to plugins.run() don't fail if plugins aren't in use.
'''
decorate(traceLog())
def run(self, *args, **kwargs):
pass
decorate(traceLog())
def setCmdLine(self, *args, **kwargs):
pass
class PluginConduit(object):
decorate(traceLog())
def __init__(self, parent, base, conf):
self._parent = parent
self._base = base
self._conf = conf
self.logger = getLog()
self.verbose_logger = getLog(prefix="verbose.")
decorate(traceLog())
def info(self, msg):
self.verbose_logger.info(msg)
decorate(traceLog())
def error(self, msg):
self.logger.error(msg)
decorate(traceLog())
def getVersion(self):
import firmwaretools
return firmwaretools.__version__
decorate(traceLog())
def getOptParser(self):
'''Return the optparse.OptionParser instance for this execution of Yum
In the "config" slot a plugin may add extra options to this
instance to extend the command line options that Yum exposes.
In all other slots a plugin may only read the OptionParser instance.
Any modification of the instance at this point will have no effect.
@return: the global optparse.OptionParser instance used by Yum. May be
None if an OptionParser isn't in use.
'''
return self._parent.optparser
decorate(traceLog())
def getBase(self):
return self._base
decorate(traceLog())
def getConf(self):
return self._conf
decorate(traceLog())
def parsever(apiver):
maj, min = apiver.split('.')
return int(maj), int(min)
decorate(traceLog())
def apiverok(a, b):
'''Return true if API version "a" supports API version "b"
'''
a = parsever(a)
b = parsever(b)
if a[0] != b[0]:
return 0
if a[1] >= b[1]:
return 1
return 0
firmware-tools-2.1.14/firmwaretools/ply_lex.py 0000664 0017654 0017654 00000117443 11452664711 026057 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
firmware-tools-2.1.14/firmwaretools/ply_yacc.py 0000664 0017654 0017654 00000372754 11452664711 026216 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> " % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> " % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non- symbols of First[x] to the result.
for f in self.First[x]:
if f == '':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
firmware-tools-2.1.14/firmwaretools/pycompat.py 0000775 0017654 0017654 00000014617 10765775302 026246 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""module
some docs here eventually.
"""
from __future__ import generators
# import arranged alphabetically
import commands
import getopt
import glob
import os
import sys
import ConfigParser
import math
import zipfile
import re
import shutil
import signal
import time
import threading
from firmwaretools.trace_decorator import decorate, traceLog, getLog
def clearLine():
return "\033[2K\033[0G"
def spinner(cycle=['/', '-', '\\', '|']):
step = cycle[0]
del cycle[0]
cycle.append(step)
# ESC codes for clear line and position cursor at horizontal pos 0
return step
def pad(strn, pad_width=67):
# truncate strn to pad_width so spinPrint does not scroll
if len(strn) > pad_width:
return strn[:pad_width] + ' ...'
else:
return strn
def spinPrint(strn, outFd=sys.stderr):
outFd.write(clearLine())
outFd.write("%s\t%s" % (spinner(), pad(strn)))
outFd.flush()
def timedSpinPrint( strn, start ):
now = time.time()
# ESC codes for position cursor at horizontal pos 65
spinPrint( strn + "\033[65G time: %2.2f" % (now - start) )
# helper class & functions for executeCommand()
# User should handle this if they specify a timeout
class commandTimeoutExpired(Exception): pass
# the problem with os.system() is that the command that is run gets any
# keyboard input and/or signals. This means that -C interrupts the
# sub-program instead of the python program. This helper function fixes that.
# It also allows us to set up a maximum timeout before all children are killed
decorate(traceLog())
def executeCommand(cmd, timeout=0):
class alarmExc(Exception): pass
def alarmhandler(signum,stackframe):
raise alarmExc("timeout expired")
pid = os.fork()
if pid:
#parent
rpid = ret = 0
oldhandler=signal.signal(signal.SIGALRM,alarmhandler)
starttime = time.time()
prevTimeout = signal.alarm(timeout)
try:
(rpid, ret) = os.waitpid(pid, 0)
signal.alarm(0)
signal.signal(signal.SIGALRM,oldhandler)
if prevTimeout:
passed = time.time() - starttime
signal.alarm(int(math.ceil(prevTimeout - passed)))
except alarmExc:
try:
os.kill(-pid, signal.SIGTERM)
time.sleep(1)
os.kill(-pid, signal.SIGKILL)
except OSError: # errno=3 == no such process
pass
(rpid, ret) = os.waitpid(pid, 0)
signal.signal(signal.SIGALRM,oldhandler)
if prevTimeout:
passed = time.time() - starttime
signal.alarm(int(max(math.ceil(prevTimeout - passed), 1)))
raise commandTimeoutExpired( "Specified timeout of %s seconds expired before command finished. Command was: %s"
% (timeout, cmd)
)
except KeyboardInterrupt:
signal.signal(signal.SIGALRM,oldhandler)
try:
os.kill(-pid, signal.SIGTERM)
time.sleep(1)
os.kill(-pid, signal.SIGKILL)
except OSError: # errno=3 == no such process
pass
(rpid, ret) = os.waitpid(pid, 0)
raise
# mask and return just return value
return (ret & 0xFF00) >> 8
else:
#child
os.setpgrp() # become process group leader so that we can kill all our children
signal.signal(signal.SIGINT, signal.SIG_IGN) #ignore -C so parent gets it
ret = os.system(cmd)
os._exit( (ret & 0xFF00) >> 8 )
decorate(traceLog())
def copyFile( source, dest, ignoreException=0 ):
try:
shutil.copyfile(source, dest)
except IOError:
if not ignoreException:
raise
# python 2.3 has a better version, but we have to run on python 2.2. :-(
decorate(traceLog())
def mktempdir( prefix="/tmp" ):
status, output = commands.getstatusoutput("mktemp -d %s/tempdir-$$-$RANDOM-XXXXXX" % prefix)
if status != 0:
raise Exception("could not create secure temporary directory: %s" % output)
return output
# generator function -- emulates the os.walk() generator in python 2.3 (mostly)
# ret = (path, dirs, files) foreach dir
decorate(traceLog())
def walkPath(topdir, direction=0):
rawFiles = os.listdir(topdir)
files=[f for f in rawFiles if os.path.isfile(os.path.join(topdir,f))]
dirs =[f for f in rawFiles if os.path.isdir (os.path.join(topdir,f))]
if direction == 0:
yield (topdir, dirs, files)
for d in dirs:
if not os.path.islink(os.path.join(topdir,d)):
for (newtopdir, newdirs, newfiles) in walkPath(os.path.join(topdir,d)):
yield (newtopdir, newdirs, newfiles)
if direction == 1:
yield (topdir, dirs, files)
decorate(traceLog())
def runLongProcess(function, args=None, kargs=None, waitLoopFunction=None):
# runs a function in a separate thread. Runs waitLoopFunction() while it
# waits for the function to finish. Good for updating GUI, or other stuff
thread = BackgroundWorker(function, args, kargs)
while thread.running:
if waitLoopFunction is not None:
waitLoopFunction()
# run waitLoopFunction one last time before exit.
# gives status opportunity to update to 100%
if waitLoopFunction is not None:
waitLoopFunction()
if thread.exception:
getLog(prefix="verbose.").exception(thread.exception)
raise thread.exception
return thread.returnCode
class BackgroundWorker(threading.Thread):
def __init__ (self, function, args=None, kargs=None):
threading.Thread.__init__(self)
self.function = function
self.args = args
self.kargs = kargs
self.exception = None
self.returnCode = None
self.running=1
if self.args is None: self.args = []
if self.kargs is None: self.kargs = {}
self.start()
decorate(traceLog())
def run(self):
try:
self.returnCode = self.function(*self.args, **self.kargs)
except (Exception,), e:
self.exception = e
self.running=0
firmware-tools-2.1.14/firmwaretools/repository.py 0000664 0017654 0017654 00000032266 11376536320 026620 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""
repository module
"""
from __future__ import generators
import os
import ConfigParser
import package
import pycompat
import dep_parser
import sys
import traceback
import firmwaretools as ft
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import logging
moduleLog = getLog()
moduleVerboseLog = getLog(prefix="verbose.")
class CircularDependencyError(Exception): pass
# TODO:
# -- conf item should NEVER be used outside of constructor (makePackage)
decorate(traceLog())
def makePackage(configFile):
conf = ConfigParser.ConfigParser()
conf.read(configFile)
# make a standard package
displayname = "unknown"
if conf.has_option("package", "displayname"):
displayname = conf.get("package", "displayname")
type = package.RepositoryPackage
try:
pymod = conf.get("package","module")
moduleLog.debug("pymod: %s" % pymod)
module = __import__(pymod, globals(), locals(), [])
for i in pymod.split(".")[1:]:
module = getattr(module, i)
packageTypeClass = conf.get("package", "type")
type = getattr(module, packageTypeClass)
moduleLog.debug("direct instantiate")
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError, ImportError, AttributeError):
moduleLog.debug(''.join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)))
pass
p = type(
displayname=displayname,
name=conf.get("package", "name"),
version=conf.get("package", "version"),
conf=conf,
path=os.path.dirname(configFile),
)
return p
class SystemInventory(object):
decorate(traceLog())
def __init__(self, *args, **kargs):
self.deviceList = {}
self.allowDowngrade=False
self.allowReflash=False
decorate(traceLog())
def addDevice(self, device):
self.deviceList[device.uniqueInstance] = { "device": device, "update": None, "available_updates": []}
decorate(traceLog())
def getDevice(self, uniqueInstance, default=None):
return self.deviceList.get(uniqueInstance, default)
decorate(traceLog())
def iterDevices(self, name=None):
for device, details in self.deviceList.items():
if name is None:
yield details["device"]
else:
if details["device"].name == name:
yield details["device"]
else:
try:
if details["device"].shortname == name:
yield details["device"]
except AttributeError:
pass
decorate(traceLog())
def addAvailablePackage(self, package):
for myDev in self.iterDevices(name=package.name):
available_updates = self.deviceList[myDev.uniqueInstance]["available_updates"]
available_updates.append(package)
self.deviceList[myDev.uniqueInstance]["available_updates"] = available_updates
package.attachToDevice(myDev)
decorate(traceLog())
def iterAvailableUpdates(self, device):
for pkg in self.deviceList[device.uniqueInstance]["available_updates"]:
yield pkg
decorate(traceLog())
def getSuggestedUpdatePackageForDevice(self, device):
ret = None
if self.deviceList.has_key(device.uniqueInstance):
ret = self.deviceList[device.uniqueInstance]["update"]
return ret
decorate(traceLog())
def getUpdatePackageForDevice(self, device):
ret = None
if self.deviceList.has_key(device.uniqueInstance):
if self.deviceList[device.uniqueInstance].has_key("pinned_update"):
ret = self.deviceList[device.uniqueInstance]["pinned_update"]
else:
ret = self.deviceList[device.uniqueInstance]["update"]
return ret
decorate(traceLog())
def pinUpdatePackage(self, device, pkg):
#TODO: ensure that pkg is in 'available_pkgs'
hasOldPin = False
if self.deviceList[device.uniqueInstance].has_key("pinned_update"):
hasOldPin = True
oldPin = self.deviceList[device.uniqueInstance]["pinned_update"]
self.deviceList[device.uniqueInstance]["pinned_update"] = pkg
# just check the rules... not actually installing
try:
for i in self.generateInstallationOrder(): pass
except CircularDependencyError, e:
# roll back
if hasOldPin:
self.deviceList[device.uniqueInstance]["pinned_update"] = oldPin
else:
del(self.deviceList[device.uniqueInstance]["pinned_update"])
raise
decorate(traceLog())
def unPinDevice(self, device):
if self.deviceList[device.uniqueInstance].has_key("pinned_update"):
del(self.deviceList[device.uniqueInstance]["pinned_update"])
decorate(traceLog())
def reset(self):
for device in self.iterDevices():
self.unPinDevice(device)
decorate(traceLog())
def getMemento(self, deviceHint=None):
memento = {}
memento['savePin'] = {}
for deviceUniqueInstance, details in self.deviceList.items():
if deviceHint:
if deviceHint.uniqueInstance != deviceUniqueInstance:
continue
if details.has_key("pinned_update"):
memento['savePin'][deviceUniqueInstance] = { 'device': details["device"], 'hasPin': 1, 'oldPin': details["pinned_update"] }
else:
memento['savePin'][deviceUniqueInstance] = { 'device': details["device"], 'hasPin': 0, 'oldPin': None }
memento["internal.allowReflash"] = self.allowReflash
memento["internal.allowDowngrade"] = self.allowDowngrade
return memento
decorate(traceLog())
def setMemento(self, memento):
self.allowReflash = memento["internal.allowReflash"]
self.allowDowngrade = memento["internal.allowDowngrade"]
for deviceUniqueInstance, details in memento['savePin'].items():
if details['hasPin']:
self.pinUpdatePackage(details["device"], details["oldPin"])
else:
self.unPinDevice(details["device"])
decorate(traceLog())
def setAllowDowngrade(self, val):
self.allowDowngrade = val
decorate(traceLog())
def getAllowDowngrade(self):
return self.allowDowngrade
decorate(traceLog())
def setAllowReflash(self, val):
self.allowReflash = val
decorate(traceLog())
def getAllowReflash(self):
return self.allowReflash
decorate(traceLog())
def checkRules(self, device, candidate, unionInventory, cb=None):
# is candidate newer than what is installed
if not self.allowDowngrade and device.compareVersion(candidate) > 0:
ft.callCB(cb, who="checkRules", what="package_not_newer", package=candidate, device=device)
return 0
# is candidate newer than what is installed
if not self.allowReflash and device.compareVersion(candidate) == 0:
ft.callCB(cb, who="checkRules", what="package_same_version", package=candidate, device=device)
return 0
#check to see if this package has specific system requirements
# for now, check if we are on a specific system by checking for
# a BIOS package w/ matching id. In future, may have specific
# system package.
if hasattr(candidate,"conf") and candidate.conf.has_option("package", "limit_system_support"):
systemVenDev = candidate.conf.get("package", "limit_system_support")
if not unionInventory.get( "system_bios(%s)" % systemVenDev ):
ft.callCB(cb, who="checkRules", what="fail_limit_system_check", package=candidate)
return 0
#check generic dependencies
if hasattr(candidate,"conf") and candidate.conf.has_option("package", "requires"):
requires = candidate.conf.get("package", "requires")
if len(requires):
d = dep_parser.DepParser(requires, unionInventory, self.deviceList)
if not d.depPass:
ft.callCB(cb, who="checkRules", what="fail_dependency_check", package=candidate, reason=d.reason)
return 0
return 1
decorate(traceLog())
def calculateUpgradeList(self, cb=None):
unionInventory = {}
for deviceUniqueInstance, details in self.deviceList.items():
unionInventory[deviceUniqueInstance] = details["device"]
# for every device, look at the available updates to see if one can be applied.
# if we do any work, start over so that dependencies work themselves out over multiple iterations.
workToDo = 1
while workToDo:
workToDo = 0
for deviceUniqueInstance, details in self.deviceList.items():
for candidate in details["available_updates"]:
# check if this package is better than the current best
if unionInventory[deviceUniqueInstance].compareVersion(candidate) >= 0:
continue
if self.checkRules(details["device"], candidate, unionInventory, cb=cb):
self.deviceList[deviceUniqueInstance]["update"] = candidate
# update union inventory
unionInventory[deviceUniqueInstance] = candidate
# need another run-through in case this fixes deps for another package
workToDo = 1
decorate(traceLog())
def generateInstallationOrder(self, returnDeviceToo=0, cb=None):
unionInventory = {}
for deviceUniqueInstance, details in self.deviceList.items():
unionInventory[deviceUniqueInstance] = details["device"]
# generate initial union inventory
# we will start with no update packages and add them in one at a time
# as we install them
updateDeviceList = [] # [ pkg, pkg, pkg ]
for pkgName, details in self.deviceList.items():
update = self.getUpdatePackageForDevice(details["device"])
if update:
updateDeviceList.append( (details["device"], update) )
workToDo = 1
while workToDo:
workToDo = 0
for device, candidate in updateDeviceList:
if self.checkRules(device, candidate, unionInventory, cb=cb):
candidate.setCurrentInstallDevice(device)
if returnDeviceToo:
yield (device, candidate)
else:
yield candidate
# move pkg from to-install list to inventory list
updateDeviceList.remove((device,candidate))
unionInventory[device.uniqueInstance] = candidate
# need another run-through in case this fixes deps for another package
workToDo = 1
if len(updateDeviceList):
raise CircularDependencyError("packages have circular dependency, or are otherwise uninstallable.")
class Repository(object):
decorate(traceLog())
def __init__(self, *args):
self.dirList = []
for i in args:
self.dirList.append(i)
decorate(traceLog())
def iterPackages(self, cb=None):
for dir in self.dirList:
try:
for (path, dirs, files) in pycompat.walkPath(dir):
if "package.ini" in files:
ft.callCB(cb, who="iterPackages", what="found_package_ini", path=os.path.join(path, "package.ini" ))
try:
p = makePackage( os.path.join(path, "package.ini" ))
ft.callCB(cb, who="iterPackages", what="made_package", package=p)
yield p
except:
moduleLog.debug(''.join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)))
pass
except OSError: # directory doesnt exist, so no repo packages. :-)
pass
decorate(traceLog())
def iterLatestPackages(self, cb=None):
latest = {}
for candidate in self.iterPackages(cb=cb):
pkgName = candidate.name
if candidate.conf.has_option("package", "limit_system_support"):
pkgName = pkgName + "_" + candidate.conf.get("package", "limit_system_support")
p = latest.get(pkgName)
if not p:
latest[pkgName] = candidate
elif p.compareVersion(candidate) < 0:
latest[pkgName] = candidate
ft.callCB(cb, who="iterLatestPackages", what="done_generating_list")
keys = latest.keys()
keys.sort()
for package in keys:
ft.callCB(cb, who="iterLatestPackages", what="made_package", package=latest[package])
yield latest[package]
firmware-tools-2.1.14/firmwaretools/trace_decorator.py 0000775 0017654 0017654 00000011437 10756414475 027550 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:textwidth=0:
# License: GPL2 or later see COPYING
# Written by Michael Brown
# Copyright (C) 2007 Michael E Brown
import logging
import os
import sys
import types
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# use python-decoratortools if it is installed, otherwise use our own local
# copy. Imported this locally because it doesnt appear to be available on SUSE
# and the fedora RPM doesnt appear to compile cleanly on SUSE
try:
from peak.util.decorators import rewrap, decorate
except ImportError:
from peak_util_decorators import rewrap, decorate
# defaults to module verbose log
# does a late binding on log. Forwards all attributes to logger.
# works around problem where reconfiguring the logging module means loggers
# configured before reconfig dont output.
class getLog(object):
def __init__(self, name=None, prefix="", *args, **kargs):
if name is None:
frame = sys._getframe(1)
name = frame.f_globals["__name__"]
self.name = prefix + name
def __getattr__(self, name):
logger = logging.getLogger(self.name)
return getattr(logger, name)
# emulates logic in logging module to ensure we only log
# messages that logger is enabled to produce.
def doLog(logger, level, *args, **kargs):
if logger.manager.disable >= level:
return
if logger.isEnabledFor(level):
try:
logger.handle(logger.makeRecord(logger.name, level, *args, **kargs))
except TypeError:
del(kargs["func"])
logger.handle(logger.makeRecord(logger.name, level, *args, **kargs))
def traceLog(log = None):
def decorator(func):
def trace(*args, **kw):
# default to logger that was passed by module, but
# can override by passing logger=foo as function parameter.
# make sure this doesnt conflict with one of the parameters
# you are expecting
filename = os.path.normcase(func.func_code.co_filename)
func_name = func.func_code.co_name
lineno = func.func_code.co_firstlineno
l2 = kw.get('logger', log)
if l2 is None:
l2 = logging.getLogger("trace.%s" % func.__module__)
if isinstance(l2, basestring):
l2 = logging.getLogger(l2)
message = "ENTER %s(" % func_name
for arg in args:
message = message + repr(arg) + ", "
for k,v in kw.items():
message = message + "%s=%s" % (k,repr(v))
message = message + ")"
frame = sys._getframe(2)
doLog(l2, logging.INFO, os.path.normcase(frame.f_code.co_filename), frame.f_lineno, message, args=[], exc_info=None, func=frame.f_code.co_name)
try:
result = "Bad exception raised: Exception was not a derived class of 'Exception'"
try:
result = func(*args, **kw)
except (KeyboardInterrupt, Exception), e:
result = "EXCEPTION RAISED"
doLog(l2, logging.INFO, filename, lineno, "EXCEPTION: %s\n" % e, args=[], exc_info=sys.exc_info(), func=func_name)
raise
finally:
doLog(l2, logging.INFO, filename, lineno, "LEAVE %s --> %s\n" % (func_name, repr(result)), args=[], exc_info=None, func=func_name)
return result
return rewrap(func, trace)
return decorator
# helper function so we can use back-compat format but not be ugly
def decorateAllFunctions(module, logger=None):
methods = [ method for method in dir(module)
if isinstance(getattr(module, method), types.FunctionType)
]
for i in methods:
setattr(module, i, traceLog(logger)(getattr(module,i)))
# unit tests...
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING,
format='%(name)s %(levelname)s %(filename)s, %(funcName)s, Line: %(lineno)d: %(message)s',)
log = getLog("foobar.bubble")
root = getLog(name="")
log.setLevel(logging.WARNING)
root.setLevel(logging.DEBUG)
log.debug(" --> debug")
log.error(" --> error")
decorate(traceLog(log))
def testFunc(arg1, arg2="default", *args, **kargs):
return 42
testFunc("hello", "world", logger=root)
testFunc("happy", "joy", name="skippy")
testFunc("hi")
decorate(traceLog(root))
def testFunc22():
return testFunc("archie", "bunker")
testFunc22()
decorate(traceLog(root))
def testGen():
yield 1
yield 2
for i in testGen():
log.debug("got: %s" % i)
decorate(traceLog())
def anotherFunc(*args):
return testFunc(*args)
anotherFunc("pretty")
getLog()
firmware-tools-2.1.14/firmwaretools/peak_util_decorators.py 0000664 0017654 0017654 00000045553 10756403330 030601 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 from types import ClassType, FunctionType
import sys, os
__all__ = [
'decorate_class', 'metaclass_is_decorator', 'metaclass_for_bases',
'frameinfo', 'decorate_assignment', 'decorate', 'struct', 'classy',
'template_function', 'rewrap', 'cache_source', 'enclosing_frame',
]
def decorate(*decorators):
"""Use Python 2.4 decorators w/Python 2.3+
Example::
class Foo(object):
decorate(classmethod)
def something(cls,etc):
\"""This is a classmethod\"""
You can pass in more than one decorator, and they are applied in the same
order that would be used for ``@`` decorators in Python 2.4.
This function can be used to write decorator-using code that will work with
both Python 2.3 and 2.4 (and up).
"""
if len(decorators)>1:
decorators = list(decorators)
decorators.reverse()
def callback(frame,k,v,old_locals):
for d in decorators:
v = d(v)
return v
return decorate_assignment(callback)
def enclosing_frame(frame=None, level=3):
"""Get an enclosing frame that skips DecoratorTools callback code"""
frame = frame or sys._getframe(level)
while frame.f_globals.get('__name__')==__name__: frame = frame.f_back
return frame
def name_and_spec(func):
from inspect import formatargspec, getargspec
funcname = func.__name__
if funcname=='':
funcname = "anonymous"
args, varargs, kwargs, defaults = getargspec(func)
return funcname, formatargspec(args, varargs, kwargs)[1:-1]
def qname(func):
m = func.__module__
return m and m+'.'+func.__name__ or func.__name__
def apply_template(wrapper, func, *args, **kw):
funcname, argspec = name_and_spec(func)
wrapname, wrapspec = name_and_spec(wrapper)
body = wrapper.__doc__.replace('%','%%').replace('$args','%(argspec)s')
d ={}
body = """
def %(wrapname)s(%(wrapspec)s):
def %(funcname)s(%(argspec)s): """+body+"""
return %(funcname)s
"""
body %= locals()
filename = "<%s wrapping %s at 0x%08X>" % (qname(wrapper), qname(func), id(func))
exec compile(body, filename, "exec") in func.func_globals, d
f = d[wrapname](func, *args, **kw)
cache_source(filename, body, f)
f.func_defaults = func.func_defaults
f.__doc__ = func.__doc__
f.__dict__ = func.__dict__
return f
def rewrap(func, wrapper):
"""Create a wrapper with the signature of `func` and a body of `wrapper`
Example::
def before_and_after(func):
def decorated(*args, **kw):
print "before"
try:
return func(*args, **kw)
finally:
print "after"
return rewrap(func, decorated)
The above function is a normal decorator, but when users run ``help()``
or other documentation tools on the returned wrapper function, they will
see a function with the original function's name, signature, module name,
etc.
This function is similar in use to the ``@template_function`` decorator,
but rather than generating the entire decorator function in one calling
layer, it simply generates an extra layer for signature compatibility.
NOTE: the function returned from ``rewrap()`` will have the same attribute
``__dict__`` as the original function, so if you need to set any function
attributes you should do so on the function returned from ``rewrap()``
(or on the original function), and *not* on the wrapper you're passing in
to ``rewrap()``.
"""
def rewrap(__original, __decorated):
"""return __decorated($args)"""
return apply_template(rewrap, func, wrapper)
if sys.version<"2.5":
# We'll need this for monkeypatching linecache
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = linecache.cache.keys()
else:
if filename in linecache.cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = linecache.cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del linecache.cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del linecache.cache[filename]
def _cache_lines(filename, lines, owner=None):
if owner is None:
owner = filename
else:
from weakref import ref
owner = ref(owner, lambda r: linecache and linecache.cache.__delitem__(filename))
global linecache; import linecache
if sys.version<"2.5" and linecache.checkcache.__module__!=__name__:
linecache.checkcache = checkcache
linecache.cache[filename] = 0, None, lines, owner
def cache_source(filename, source, owner=None):
_cache_lines(filename, source.splitlines(True), owner)
def template_function(wrapper=None):
"""Decorator that uses its wrapped function's docstring as a template
Example::
def before_and_after(func):
@template_function
def wrap(__func, __message):
'''
print "before", __message
try:
return __func($args)
finally:
print "after", __message
'''
return wrap(func, "test")
The above code will return individually-generated wrapper functions whose
signature, defaults, ``__name__``, ``__module__``, and ``func_globals``
match those of the wrapped functions.
You can use define any arguments you wish in the wrapping function, as long
as the first argument is the function to be wrapped, and the arguments are
named so as not to conflict with the arguments of the function being
wrapped. (i.e., they should have relatively unique names.)
Note that the function body will *not* have access to the globals of the
calling module, as it is compiled with the globals of the *wrapped*
function! Thus, any non-builtin values that you need in the wrapper should
be passed in as arguments to the template function.
"""
if wrapper is None:
return decorate_assignment(lambda f,k,v,o: template_function(v))
return apply_template.__get__(wrapper)
def struct(*mixins, **kw):
"""Turn a function into a simple data structure class
This decorator creates a tuple subclass with the same name and docstring as
the decorated function. The class will have read-only properties with the
same names as the function's arguments, and the ``repr()`` of its instances
will look like a call to the original function. The function should return
a tuple of values in the same order as its argument names, as it will be
used by the class' constructor. The function can perform validation, add
defaults, and/or do type conversions on the values.
If the function takes a ``*``, argument, it should flatten this argument
into the result tuple, e.g.::
@struct()
def pair(first, *rest):
return (first,) + rest
The ``rest`` property of the resulting class will thus return a tuple for
the ``*rest`` arguments, and the structure's ``repr()`` will reflect the
way it was created.
The ``struct()`` decorator takes optional mixin classes (as positional
arguments), and dictionary entries (as keyword arguments). The mixin
classes will be placed before ``tuple`` in the resulting class' bases, and
the dictionary entries will be placed in the class' dictionary. These
entries take precedence over any default entries (e.g. methods, properties,
docstring, etc.) that are created by the ``struct()`` decorator.
"""
def callback(frame, name, func, old_locals):
def __new__(cls, *args, **kw):
result = func(*args, **kw)
if type(result) is tuple:
return tuple.__new__(cls, (cls,)+result)
else:
return result
def __repr__(self):
return name+tuple.__repr__(self[1:])
import inspect
args, star, dstar, defaults = inspect.getargspec(func)
d = dict(
__new__ = __new__, __repr__ = __repr__, __doc__=func.__doc__,
__module__ = func.__module__, __args__ = args, __star__ = star,
__slots__ = [],
)
for p,a in enumerate(args):
if isinstance(a,str):
d[a] = property(lambda self, p=p+1: self[p])
if star:
d[star] = property(lambda self, p=len(args)+1: self[p:])
d.update(kw)
return type(name, mixins+(tuple,), d)
return decorate_assignment(callback)
def frameinfo(frame):
"""Return (kind, module, locals, globals) tuple for a frame
'kind' is one of "exec", "module", "class", "function call", or "unknown".
"""
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = f_locals is f_globals
hasModule = '__module__' in f_locals
hasName = '__name__' in f_globals
sameName = hasModule and hasName
sameName = sameName and f_globals['__name__']==f_locals['__module__']
module = hasName and sys.modules.get(f_globals['__name__']) or None
namespaceIsModule = module and module.__dict__ is f_globals
if not namespaceIsModule:
# some kind of funky exec
kind = "exec"
if hasModule and not sameNamespace:
kind="class"
elif sameNamespace and not hasModule:
kind = "module"
elif sameName and not sameNamespace:
kind = "class"
elif not sameNamespace:
kind = "function call"
else:
# How can you have f_locals is f_globals, and have '__module__' set?
# This is probably module-level code, but with a '__module__' variable.
kind = "unknown"
return kind,module,f_locals,f_globals
def decorate_class(decorator, depth=2, frame=None, allow_duplicates=False):
"""Set up `decorator` to be passed the containing class upon creation
This function is designed to be called by a decorator factory function
executed in a class suite. The factory function supplies a decorator that
it wishes to have executed when the containing class is created. The
decorator will be given one argument: the newly created containing class.
The return value of the decorator will be used in place of the class, so
the decorator should return the input class if it does not wish to replace
it.
The optional `depth` argument to this function determines the number of
frames between this function and the targeted class suite. `depth`
defaults to 2, since this skips the caller's frame. Thus, if you call this
function from a function that is called directly in the class suite, the
default will be correct, otherwise you will need to determine the correct
depth value yourself. Alternately, you can pass in a `frame` argument to
explicitly indicate what frame is doing the class definition.
This function works by installing a special class factory function in
place of the ``__metaclass__`` of the containing class. Therefore, only
decorators *after* the last ``__metaclass__`` assignment in the containing
class will be executed. Thus, any classes using class decorators should
declare their ``__metaclass__`` (if any) *before* specifying any class
decorators, to ensure that all class decorators will be applied."""
frame = enclosing_frame(frame, depth+1)
kind, module, caller_locals, caller_globals = frameinfo(frame)
if kind != "class":
raise SyntaxError(
"Class decorators may only be used inside a class statement"
)
elif not allow_duplicates and has_class_decorator(decorator, None, frame):
return
previousMetaclass = caller_locals.get('__metaclass__')
defaultMetaclass = caller_globals.get('__metaclass__', ClassType)
def advise(name,bases,cdict):
if '__metaclass__' in cdict:
del cdict['__metaclass__']
if previousMetaclass is None:
if bases:
# find best metaclass or use global __metaclass__ if no bases
meta = metaclass_for_bases(bases)
else:
meta = defaultMetaclass
elif metaclass_is_decorator(previousMetaclass):
# special case: we can't compute the "true" metaclass here,
# so we need to invoke the previous metaclass and let it
# figure it out for us (and apply its own advice in the process)
meta = previousMetaclass
else:
meta = metaclass_for_bases(bases, previousMetaclass)
newClass = meta(name,bases,cdict)
# this lets the decorator replace the class completely, if it wants to
return decorator(newClass)
# introspection data only, not used by inner function
# Note: these attributes cannot be renamed or it will break compatibility
# with zope.interface and any other code that uses this decoration protocol
advise.previousMetaclass = previousMetaclass
advise.callback = decorator
# install the advisor
caller_locals['__metaclass__'] = advise
def metaclass_is_decorator(ob):
"""True if 'ob' is a class advisor function"""
return isinstance(ob,FunctionType) and hasattr(ob,'previousMetaclass')
def iter_class_decorators(depth=2, frame=None):
frame = enclosing_frame(frame, depth+1)
m = frame.f_locals.get('__metaclass__')
while metaclass_is_decorator(m):
yield getattr(m, 'callback', None)
m = m.previousMetaclass
def has_class_decorator(decorator, depth=2, frame=None):
return decorator in iter_class_decorators(0, frame or sys._getframe(depth))
def metaclass_for_bases(bases, explicit_mc=None):
"""Determine metaclass from 1+ bases and optional explicit __metaclass__"""
meta = [getattr(b,'__class__',type(b)) for b in bases]
if explicit_mc is not None:
# The explicit metaclass needs to be verified for compatibility
# as well, and allowed to resolve the incompatible bases, if any
meta.append(explicit_mc)
if len(meta)==1:
# easy case
return meta[0]
classes = [c for c in meta if c is not ClassType]
candidates = []
for m in classes:
for n in classes:
if issubclass(n,m) and m is not n:
break
else:
# m has no subclasses in 'classes'
if m in candidates:
candidates.remove(m) # ensure that we're later in the list
candidates.append(m)
if not candidates:
# they're all "classic" classes
return ClassType
elif len(candidates)>1:
# We could auto-combine, but for now we won't...
raise TypeError("Incompatible metatypes",bases)
# Just one, return it
return candidates[0]
def decorate_assignment(callback, depth=2, frame=None):
"""Invoke 'callback(frame,name,value,old_locals)' on next assign in 'frame'
The frame monitored is determined by the 'depth' argument, which gets
passed to 'sys._getframe()'. When 'callback' is invoked, 'old_locals'
contains a copy of the frame's local variables as they were before the
assignment took place, allowing the callback to access the previous value
of the assigned variable, if any. The callback's return value will become
the new value of the variable. 'name' is the name of the variable being
created or modified, and 'value' is its value (the same as
'frame.f_locals[name]').
This function also returns a decorator function for forward-compatibility
with Python 2.4 '@' syntax. Note, however, that if the returned decorator
is used with Python 2.4 '@' syntax, the callback 'name' argument may be
'None' or incorrect, if the 'value' is not the original function (e.g.
when multiple decorators are used).
"""
frame = enclosing_frame(frame, depth+1)
oldtrace = [frame.f_trace]
old_locals = frame.f_locals.copy()
def tracer(frm,event,arg):
if event=='call':
# We don't want to trace into any calls
if oldtrace[0]:
# ...but give the previous tracer a chance to, if it wants
return oldtrace[0](frm,event,arg)
else:
return None
try:
if frm is frame and event !='exception':
# Aha, time to check for an assignment...
for k,v in frm.f_locals.items():
if k not in old_locals or old_locals[k] is not v:
break
else:
# No luck, keep tracing
return tracer
# Got it, fire the callback, then get the heck outta here...
frm.f_locals[k] = callback(frm,k,v,old_locals)
finally:
# Give the previous tracer a chance to run before we return
if oldtrace[0]:
# And allow it to replace our idea of the "previous" tracer
oldtrace[0] = oldtrace[0](frm,event,arg)
uninstall()
return oldtrace[0]
def uninstall():
# Unlink ourselves from the trace chain.
frame.f_trace = oldtrace[0]
sys.settrace(oldtrace[0])
# Install the trace function
frame.f_trace = tracer
sys.settrace(tracer)
def do_decorate(f):
# Python 2.4 '@' compatibility; call the callback
uninstall()
frame = sys._getframe(1)
return callback(
frame, getattr(f,'__name__',None), f, frame.f_locals
)
return do_decorate
def super_next(cls, attr):
for c in cls.__mro__:
if attr in c.__dict__:
yield getattr(c, attr).im_func
class classy_class(type):
"""Metaclass that delegates selected operations back to the class"""
def __new__(meta, name, bases, cdict):
cls = super(classy_class, meta).__new__(meta, name, bases, cdict)
supr = super_next(cls, '__class_new__').next
return supr()(meta, name, bases, cdict, supr)
def __init__(cls, name, bases, cdict):
supr = super_next(cls, '__class_init__').next
return supr()(cls, name, bases, cdict, supr)
def __call__(cls, *args, **kw):
return cls.__class_call__.im_func(cls, *args, **kw)
class classy(object):
"""Base class for classes that want to be their own metaclass"""
__metaclass__ = classy_class
__slots__ = ()
def __class_new__(meta, name, bases, cdict, supr):
return type.__new__(meta, name, bases, cdict)
def __class_init__(cls, name, bases, cdict, supr):
return type.__init__(cls, name, bases, cdict)
def __class_call__(cls, *args, **kw):
return type.__call__(cls, *args, **kw)
__class_call__ = classmethod(__class_call__)
firmware-tools-2.1.14/firmwaretools/compat_subprocess.py 0000664 0017654 0017654 00000125166 10765775302 030144 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
if 0: # <-- change this to use pywin32 instead of the _subprocess driver
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
from _subprocess import *
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# True/False does not exist on 2.2.0
try:
False
except NameError:
False = 0
True = 1
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or arg == ""
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
# On Windows, you cannot just redirect one or two handles: You
# either have to redirect all three or none. If the subprocess
# user has only redirected one or two handles, we are
# automatically creating PIPEs for the rest. We should close
# these after the process is started. See bug #1124861.
if mswindows:
if stdin is None and p2cwrite is not None:
os.close(p2cwrite)
p2cwrite = None
if stdout is None and c2pread is not None:
os.close(c2pread)
c2pread = None
if stderr is None and errread is not None:
os.close(errread)
errread = None
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
if p2cread is not None:
pass
elif stdin is None or stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
if c2pwrite is not None:
pass
elif stdout is None or stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
if errwrite is not None:
pass
elif stderr is None or stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
if p2cread and p2cread not in (0,):
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread, 1):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite, 2):
os.close(errwrite)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
firmware-tools-2.1.14/firmwaretools/__init__.py 0000664 0017654 0017654 00000022632 11376536320 026134 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Firmware-tools: update infrastructure for firmware
"""
import ConfigParser
import fcntl
import glob
import logging
import logging.config
import os
import sys
from trace_decorator import decorate, traceLog, getLog
import errors
import repository
#import config
import plugins
def mkselfrelpath(*args):
return os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), *args))
# these are replaced by autotools when installed.
__VERSION__="unreleased_version"
SYSCONFDIR=mkselfrelpath("..", "etc")
PYTHONDIR=mkselfrelpath("..")
PKGPYTHONDIR=mkselfrelpath("..", "firmwaretools")
PKGDATADIR=mkselfrelpath("..", "ft-cli")
DATADIR=mkselfrelpath("..", "ft-cli")
PKGCONFDIR=os.path.join(SYSCONFDIR,"firmware")
LOCALSTATEDIR=mkselfrelpath("..", "var")
# end build system subs
PID_FILE = '/var/run/ft.pid'
class confObj(object):
def __getattribute__(self, name):
return object.__getattribute__(self, name.lower())
def __setattr__(self, name, value):
object.__setattr__(self, name.lower(), value)
decorate(traceLog())
def callCB(cb, *args, **kargs):
if cb is None: return
try:
return cb(*args, **kargs)
except TypeError:
pass
class Callback(object):
def __init__(self):
pass
def __call__(self, *args, **kargs):
func = getattr(self, kargs.get("what", "UNKNOWN"), None)
if func is not None:
return func(*args, **kargs)
class FtBase(object):
"""This is a primary structure and base class. It houses the objects and
methods needed to perform most things . It is almost an abstract
class in that you will need to add your own class above it for most
real use."""
def __init__(self):
self.logger = getLog()
self.verbose_logger = getLog(prefix="verbose.")
self.cmdargs = []
self.cb = None
self._conf = None
self._repo = None
self._systemInventory = None
self._vendorId = None
self._systemId = None
self.verbosity = 0
self.trace = 0
self.loggingConfig = os.path.join(PKGCONFDIR, "firmware.conf")
# Start with plugins disabled
self.disablePlugins()
def _getConfig(self, cfgFiles=None, pluginTypes=(plugins.TYPE_CORE, plugins.TYPE_INVENTORY,), optparser=None, disabledPlugins=None):
if self._conf is not None:
return self._conf
if cfgFiles is None:
cfgFiles = [os.path.join(PKGCONFDIR, "firmware.conf"),]
if disabledPlugins is None:
disabledPlugins = []
self.conf = confObj()
self.setupLogging(self.loggingConfig, self.verbosity, self.trace)
self.setConfFromIni(cfgFiles)
self.conf.uid = os.geteuid()
self.doPluginSetup(optparser, pluginTypes, disabledPlugins)
return self._conf
def setupLogging(self, configFile, verbosity=1, trace=0):
# set up logging
logging.config.fileConfig(configFile)
root_log = logging.getLogger()
ft_log = logging.getLogger("firmwaretools")
ft_verbose_log = logging.getLogger("verbose")
ft_trace_log = logging.getLogger("trace")
ft_log.propagate = 0
ft_trace_log.propagate = 0
ft_verbose_log.propagate = 0
if verbosity >= 1:
ft_log.propagate = 1
if verbosity >= 2:
ft_verbose_log.propagate = 1
if verbosity >= 3:
for hdlr in root_log.handlers:
hdlr.setLevel(logging.DEBUG)
if trace:
ft_trace_log.propagate = 1
decorate(traceLog())
def setConfFromIni(self, cfgFiles):
defaults = {
"sysconfdir": SYSCONFDIR,
"pythondir": PYTHONDIR,
"datadir": DATADIR,
"pkgpythondir": PKGPYTHONDIR,
"pkgdatadir": PKGDATADIR,
"pkgconfdir": PKGCONFDIR,
"localstatedir": LOCALSTATEDIR,
}
self._ini = ConfigParser.SafeConfigParser(defaults)
for i in cfgFiles:
self._ini.read(i)
mapping = {
# conf.WHAT : (iniSection, iniOption, default)
"storageTopdir": ('main', 'storage_topdir', "%s/firmware" % DATADIR),
"pluginSearchPath": ('main', 'plugin_search_path', os.path.join(PKGDATADIR, "plugins")),
"pluginConfDir": ('main', 'plugin_config_dir', os.path.join(PKGCONFDIR, "firmware.d")),
"rpmMode": ('main', 'rpm_mode', "manual"),
}
for key, val in mapping.items():
if self._ini.has_option( val[0], val[1] ):
setattr(self.conf, key, self._ini.get(val[0], val[1]))
else:
setattr(self.conf, key, val[2])
# read plugin configs
for i in glob.glob( "%s/*.conf" % self.conf.pluginConfDir ):
self._ini.read(i)
decorate(traceLog())
def listPluginsFromIni(self):
return [x[len("plugin:"):] for x in self._ini.sections() if x.startswith("plugin:")]
decorate(traceLog())
def getPluginConfFromIni(self, plugin):
section = "plugin:%s" % plugin
conf = confObj()
conf.module = None
conf.enabled = False
conf.search = None
for i in self._ini.options(section):
setattr(conf, i, self._ini.get(section, i))
#required ("enabled", "module"):
if getattr(conf, "module", None) is None:
conf.enabled = False
return conf
# called early so no tracing.
def disablePlugins(self):
'''Disable plugins
'''
self.plugins = plugins.DummyPlugins()
decorate(traceLog())
def doPluginSetup(self, optparser=None, pluginTypes=None, disabledPlugins=None):
if isinstance(self.plugins, plugins.Plugins):
raise RuntimeError("plugins already initialised")
self.plugins = plugins.Plugins(self, optparser, pluginTypes, disabledPlugins)
decorate(traceLog())
def _getRepo(self):
if self._repo is not None:
return self._repo
self._repo = repository.Repository( self.conf.storageTopdir )
return self._repo
decorate(traceLog())
def _getInventory(self):
if self._systemInventory is not None:
return self._systemInventory
self._systemInventory = repository.SystemInventory()
self.plugins.run("preinventory", inventory=self._systemInventory)
self.plugins.run("inventory", inventory=self._systemInventory)
self.plugins.run("postinventory", inventory=self._systemInventory)
return self._systemInventory
decorate(traceLog())
def calculateUpgradeList(self, cb=None):
saveCb = self.cb
self.cb = cb
try:
for candidate in self.repo.iterPackages(cb=cb):
self.systemInventory.addAvailablePackage(candidate)
self.systemInventory.calculateUpgradeList(cb)
finally:
self.cb = saveCb
return self.systemInventory
# properties so they auto-create themselves with defaults
repo = property(fget=lambda self: self._getRepo(),
fset=lambda self, value: setattr(self, "_repo", value))
conf = property(fget=lambda self: self._getConfig(),
fset=lambda self, value: setattr(self, "_conf", value),
fdel=lambda self: setattr(self, "_conf", None))
systemInventory = property(
fget=lambda self: self._getInventory(),
fset=lambda self, value: setattr(self, "_systemInventory", value),
fdel=lambda self: setattr(self, "_systemInventory", None))
decorate(traceLog())
def lock(self):
if self.conf.uid == 0:
self.runLock = open(PID_FILE, "a+")
try:
fcntl.lockf(self.runLock.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
raise errors.LockError, "unable to obtain exclusive lock."
decorate(traceLog())
def unlock(self):
if self.conf.uid == 0:
fcntl.lockf(self.runLock.fileno(), fcntl.LOCK_UN)
os.unlink(PID_FILE)
decorate(traceLog())
def setSystemId(self, vendorId, systemId):
if not (vendorId and systemId):
raise RuntimeError("Need non-null, non-zero, id for vendor and system id.")
self._vendorId = vendorId
self._systemId = systemId
decorate(traceLog())
def getSystemId(self):
return (self._vendorId, self._systemId)
decorate(traceLog())
def yieldInventory(self, cb=None):
saveCb = self.cb
try:
self.cb = cb
for dev in self.systemInventory.iterDevices():
yield dev
except:
self.cb = saveCb
raise
firmware-tools-2.1.14/ft-cli/ 0000777 0017654 0017654 00000000000 11452664762 022310 5 ustar 00michael_e_brown michael_e_brown 0000000 0000000 firmware-tools-2.1.14/ft-cli/plugins/ 0000777 0017654 0017654 00000000000 11452664762 023771 5 ustar 00michael_e_brown michael_e_brown 0000000 0000000 firmware-tools-2.1.14/ft-cli/plugins/bootstrap_cmd.py 0000664 0017654 0017654 00000007160 11310074510 027160 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright (C) 2008 Dell Inc.
# by Michael Brown
"""
Classes for subcommands of the yum command line interface.
"""
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import firmwaretools.plugins as plugins
import ftcommands
plugin_type = (plugins.TYPE_CLI,)
requires_api_version = "2.0"
moduleLog = getLog()
def config_hook(conduit, *args, **kargs):
conduit.getOptParser().addEarlyParse("--bootstrap")
conduit.getOptParser().add_option(
"-b", "--bootstrap", help="List the bootstrap inventory",
action="store_const", const="bootstrap", dest="mode", default=None)
conduit.getBase().registerCommand(BootstrapCommand())
class BootstrapCommand(ftcommands.YumCommand):
decorate(traceLog())
def getModes(self):
return ['bootstrap']
decorate(traceLog())
def addSubOptions(self, base, mode, cmdline, processedArgs):
# need to add bootstrap-specific options to optparser
base.optparser.add_option("-u", "--up2date_mode", action="store_true", dest="comma_separated", default=False, help="Comma-separate values for use with up2date.")
base.optparser.add_option("-a", "--apt_mode", action="store_true", dest="apt_mode", default=False, help="fixup names so that they are compatible with apt")
decorate(traceLog())
def doCommand(self, base, mode, cmdline, processedArgs):
parse=str
if base.opts.apt_mode:
parse = debianCleanName
venId, sysId = base.getSystemId()
out = ""
for pkg in base.yieldInventory():
if base.opts.comma_separated:
if venId and sysId:
out = out + ",%s" % parse(pkg.name + "/system(ven_0x%04x_dev_0x%04x)" % (venId, sysId))
out = out + ",%s" % parse(pkg.name)
try:
if venId and sysId:
out = out + ",%s" % parse(pkg.shortname + "/system(ven_0x%04x_dev_0x%04x)" % (venId, sysId))
out = out + ",%s" % parse(pkg.shortname)
except AttributeError:
pass
else:
if venId and sysId:
print("%s/system(ven_0x%04x_dev_0x%04x)" % (parse(pkg.name), venId, sysId))
print("%s" % parse(pkg.name))
try:
if venId and sysId:
print("%s/system(ven_0x%04x_dev_0x%04x)" % (parse(pkg.shortname), venId, sysId))
print("%s" % parse(pkg.shortname))
except AttributeError:
pass
# strip leading comma:
out = out[1:]
if out:
print(out)
return [0, "Done"]
# used by bootstrap
decorate(traceLog())
def debianCleanName(s):
s = s.replace('_', '-')
s = s.replace('(', '-')
s = s.replace(')', '')
s = s.lower()
return s
firmware-tools-2.1.14/ft-cli/plugins/inventory_cmd.py 0000664 0017654 0017654 00000005033 11121512226 027176 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright (C) 2008 Dell Inc.
# by Michael Brown
"""
Classes for subcommands of the yum command line interface.
"""
import sys
import firmwaretools.pycompat
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import firmwaretools.plugins as plugins
import ftcommands
import cli
plugin_type = (plugins.TYPE_CLI,)
requires_api_version = "2.0"
moduleLog = getLog()
def config_hook(conduit, *args, **kargs):
conduit.getOptParser().addEarlyParse("--inventory")
conduit.getOptParser().add_option(
"--inventory", help="List system inventory",
action="store_const", const="inventory",
dest="mode", default=None)
conduit.getBase().registerCommand(InventoryCommand())
class InventoryCommand(ftcommands.YumCommand):
decorate(traceLog())
def getModes(self):
return ['inventory']
decorate(traceLog())
def addSubOptions(self, base, mode, cmdline, processedArgs):
base.optparser.add_option(
"--show-unknown", help="Show unknown devices.",
action="store_true", dest="show_unknown", default=False)
decorate(traceLog())
def doCommand(self, base, mode, cmdline, processedArgs):
sys.stderr.write("Wait while we inventory system:\n")
headerWasPrinted=False
for pkg in base.yieldInventory(cb=cli.mycb({})):
if not headerWasPrinted:
sys.stderr.write(firmwaretools.pycompat.clearLine())
sys.stderr.write("System inventory:\n")
sys.stderr.flush()
headerWasPrinted = True
if pkg.version == "unknown" and not base.opts.show_unknown :
continue
print("\t%s = %s" % (str(pkg), pkg.version))
return [0, "Done"]
firmware-tools-2.1.14/ft-cli/plugins/listplugins_cmd.py 0000664 0017654 0017654 00000003613 11121512226 027520 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright (C) 2008 Dell Inc.
# by Michael Brown
"""
Classes for subcommands of the yum command line interface.
"""
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import firmwaretools.plugins as plugins
import ftcommands
plugin_type = (plugins.TYPE_CLI,)
requires_api_version = "2.0"
moduleLog = getLog()
def config_hook(conduit, *args, **kargs):
conduit.getOptParser().addEarlyParse("--listplugins")
conduit.getOptParser().add_option(
"--listplugins", action="store_const", const="listplugins",
dest="mode", help="list available plugins.")
conduit.getBase().registerCommand(ListPluginsCommand())
class ListPluginsCommand(ftcommands.YumCommand):
decorate(traceLog())
def getModes(self):
return ['listplugins']
decorate(traceLog())
def doCommand(self, base, mode, cmdline, processedArgs):
print("Available Plugins:")
for p in base.listPluginsFromIni():
print("\t%s" % p)
print("Loaded Plugins:")
for p in base.plugins.listLoaded():
print("\t%s" % p)
return [0, "Done"]
firmware-tools-2.1.14/ft-cli/plugins/update_cmd.py 0000664 0017654 0017654 00000006347 11310074510 026433 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright (C) 2008 Dell Inc.
# by Michael Brown
"""
Classes for subcommands of the yum command line interface.
"""
from firmwaretools.trace_decorator import decorate, traceLog, getLog
import firmwaretools.plugins as plugins
import ftcommands
plugin_type = (plugins.TYPE_CLI,)
requires_api_version = "2.0"
moduleLog = getLog()
moduleVerboseLog = getLog(prefix="verbose.")
def config_hook(conduit, *args, **kargs):
conduit.getOptParser().addEarlyParse("--update")
conduit.getOptParser().add_option(
"--update", help="Update the system's firmware.",
action="store_const", const="update", dest="mode")
conduit.getBase().registerCommand(UpdateCommand())
class UpdateCommand(ftcommands.YumCommand):
decorate(traceLog())
def getModes(self):
return ['update']
decorate(traceLog())
def addSubOptions(self, base, mode, cmdline, processedArgs):
base.optparser.add_option("--rpm", action="store_true", dest="rpmMode", default=False, help="Used when running as part of an rpm \%post script.")
base.optparser.add_option("--yes", "-y", action="store_const", const=0, dest="interactive", default=1, help="Default all answers to 'yes'.")
base.optparser.add_option("--test", "-t", action="store_const", const=2, dest="interactive", help="Perform test but do not actually update.")
base.optparser.add_option( "--show-unknown", help="Show unknown devices.", action="store_true", dest="show_unknown", default=False)
base.optparser.add_option( "--storage-topdir", help="Override configured storage topdir.", action="store", dest="storage_topdir", default=None)
decorate(traceLog())
def doCheck(self, base, mode, cmdline, processedArgs):
moduleVerboseLog.info("hello world from update module doCheck()")
if base.opts.storage_topdir is not None:
moduleLog.info("overriding storage topdir. Original: %s New: %s" % (base.conf.storageTopdir, base.opts.storage_topdir))
base.conf.storageTopdir = base.opts.storage_topdir
decorate(traceLog())
def doCommand(self, base, mode, cmdline, processedArgs):
if base.opts.rpmMode:
if base.conf.rpmMode != 'auto':
print "Config does not specify automatic install during package install."
print "Please run update_firmware manually to install updates."
return [0, "Done"]
base.updateFirmware(base.opts.show_unknown)
return [0, "Done"]
firmware-tools-2.1.14/ft-cli/cli.py 0000664 0017654 0017654 00000031324 11121512226 023406 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright (C) 2008 Dell Inc.
# by Michael Brown
#
# Based in part on design and code in Yum 3.2:
# Copyright 2006 Duke University
# Written by Seth Vidal
"""
Command line interface class and related.
"""
import os
import re
import sys
import time
import random
import logging
from optparse import OptionParser
import firmwaretools
import firmwaretools.plugins as plugins
from firmwaretools.trace_decorator import decorate, traceLog, getLog
from firmwaretools.i18n import _
import signal
import ftcommands
from firmwaretools import errors
__VERSION__=firmwaretools.__VERSION__
def sigquit(signum, frame):
""" SIGQUIT handler for the cli. """
print >> sys.stderr, "Quit signal sent - exiting immediately"
sys.exit(1)
class CliError(errors.BaseError): pass
class BaseCli(firmwaretools.FtBase):
"""This is the base class for cli.
Inherits from FtBase """
def __init__(self):
# handle sigquit early on
signal.signal(signal.SIGQUIT, sigquit)
logging.basicConfig()
logging.raiseExceptions = 0
firmwaretools.FtBase.__init__(self)
self.logger = getLog()
self.verbose_logger = getLog(prefix="verbose.")
self.cli_commands = {}
def registerCommand(self, command):
for name in command.getModes():
if self.cli_commands.has_key(name):
raise errors.ConfigError('Command "%s" already defined' % name)
self.cli_commands[name] = command
def getOptionsConfig(self, args):
"""parses command line arguments, takes cli args:
sets up self.conf and self.cmds as well as logger objects
in base instance"""
self.fullCmdLine = args
self.optparser = FtOptionParser( usage='%prog [options]', version=__VERSION__)
# Parse only command line options that affect basic yum setup
self.args = []
self.opts = self.optparser.firstParse(args)
self.verbosity = self.opts.verbosity
self.trace = self.opts.trace
self.loggingConfig = self.opts.configFiles[0]
pluginTypes = [plugins.TYPE_CLI, plugins.TYPE_CORE]
if not self.opts.fake_mode:
pluginTypes.extend([plugins.TYPE_INVENTORY,])
else:
pluginTypes.extend([plugins.TYPE_MOCK_CORE, plugins.TYPE_MOCK_INVENTORY,])
# Read up configuration options and initialise plugins
try:
self._getConfig(self.opts.configFiles,
pluginTypes,
optparser=self.optparser,
disabledPlugins=self.opts.disabledPlugins)
except errors.ConfigError, e:
self.logger.critical(_('Config Error: %s'), e)
sys.exit(1)
except ValueError, e:
self.logger.critical(_('Options Error: %s'), e)
sys.exit(1)
# redo firstparse in case plugin added a new mode
self.opts = self.optparser.firstParse(args)
# subcommands can add new optparser stuff in addSubOptions()
self.doCommands("addSubOptions")
# Now parse the command line for real and
self.opts, self.args = self.optparser.parse_args(args)
# check fully-processed cmdline options
self.doCommands("doCheck")
decorate(traceLog())
def doCommands(self, funcName="doCommand"):
if not self.cli_commands.has_key(self.opts.mode):
self.usage()
raise CliError, "mode not specified."
return getattr(self.cli_commands[self.opts.mode], funcName)(self,
self.opts.mode, self.fullCmdLine, self.args)
decorate(traceLog())
def usage(self):
''' Print out command line usage '''
self.optparser.print_help()
decorate(traceLog())
def updateFirmware(self, showUnknown=False):
print
print "Running system inventory..."
depFailures = {}
updateSet = self.calculateUpgradeList(cb=mycb(depFailures))
print "\033[2K\033[0G" # clear line
needUpdate = 0
print "Searching storage directory for available BIOS updates..."
for device in updateSet.iterDevices():
if device.version == "unknown" and not showUnknown:
continue
print "Checking %s - %s" % (str(device), device.version)
for availPkg in updateSet.iterAvailableUpdates(device):
print "\tAvailable: %s - %s" % (availPkg.name, availPkg.version)
pkg = updateSet.getUpdatePackageForDevice(device)
if pkg is None:
print "\tDid not find a newer package to install that meets all installation checks."
else:
print "\tFound Update: %s - %s" % (pkg.name, pkg.version)
needUpdate = 1
if depFailures:
print
print "Following packages could apply, but have dependency failures:"
for pkg, reason in depFailures.values():
print "\t%s - %s" % (pkg.name, pkg.version)
print "\t\t REASON: %s" % reason
if not needUpdate:
print
print "This system does not appear to have any updates available."
print "No action necessary."
print
return 1
else:
print
print "Found firmware which needs to be updated."
print
# if we get to this point, that means update is necessary.
# any exit before this point means that there was an error, or no update
# was necessary and should return non-zero
if self.opts.interactive == 2:
print
print "Test mode complete."
print
return 0
if self.opts.interactive == 1:
print
print "Please run the program with the '--yes' switch to enable BIOS update."
print " UPDATE NOT COMPLETED!"
print
return 0
print "Running updates..."
for pkg in updateSet.generateInstallationOrder():
try:
def statusFunc():
if pkg.getCapability('accurate_update_percentage'):
firmwaretools.pycompat.spinPrint("%s%% Installing %s - %s" % (pkg.getProgress() * 100, pkg.name, pkg.version))
else:
firmwaretools.pycompat.spinPrint("Installing %s - %s" % (pkg.name, pkg.version))
time.sleep(0.2)
ret = firmwaretools.pycompat.runLongProcess(pkg.install, waitLoopFunction=statusFunc)
print firmwaretools.pycompat.clearLine(),
print "100%% Installing %s - %s" % (pkg.name, pkg.version)
print "Done: %s" % pkg.getStatusStr()
print
except (firmwaretools.package.NoInstaller,), e:
print "package %s - %s does not have an installer available." % (pkg.name, pkg.version)
print "skipping this package for now."
continue
except (firmwaretools.package.InstallError,), e:
print "Installation failed for package: %s - %s" % (pkg.name, pkg.version)
print "aborting update..."
print
print "The error message from the low-level command was:"
print
print e
break
class mycb(firmwaretools.Callback):
def __init__(self, depFailures):
super(mycb, self).__init__()
self.depFailures = depFailures
self.message = ""
def __call__(self, *args, **kargs):
return super(mycb, self).__call__(*args, **kargs)
def UNKNOWN(self, *args, **kargs):
firmwaretools.pycompat.spinPrint(self.message)
def running_inventory(self, who, what, details, *args, **kargs):
self.message = "%s: Running Inventory: %s" % (who, details)
firmwaretools.pycompat.spinPrint("%s: Running Inventory: %s" % (who, details))
def found_package_ini(what, path, *args, **kargs):
if len(path) > 50:
path = path[-50:]
self.message = "Checking: %s" % path
firmwaretools.pycompat.spinPrint("Checking: %s" % path)
def fail_dependency_check(self, what, package, *args, **kargs):
pkgName = "%s-%s" % (package.name, package.version)
if package.conf.has_option("package", "limit_system_support"):
pkgName = pkgName + "-" + package.conf.get("package", "limit_system_support")
self.depFailures[pkgName] = (kargs.get("package"), kargs.get("reason"))
class FtOptionParser(OptionParser):
"""Unified cmdline option parsing and config file handling."""
def __init__(self, *args, **kargs):
OptionParser.__init__(self, *args, **kargs)
self.add_option("-c", "--config", help="Override default config file with user-specified config file.", dest="configFiles", action="append", default=[])
self.add_option("--extra-plugin-config", help="Add additional plugin config file.", action="append", default=[], dest="extraConfigs")
self.add_option("-v", "--verbose", action="count", dest="verbosity", default=1, help="Display more verbose output.")
self.add_option("-q", "--quiet", action="store_const", const=0, dest="verbosity", help="Minimize program output. Only errors and warnings are displayed.")
self.add_option("--trace", action="store_true", dest="trace", default=False, help="Enable verbose function tracing.")
self.add_option("--fake-mode", action="store_true", dest="fake_mode", default=False, help="Display fake data for unit-testing.")
self.add_option("--disableplugin", action="append", dest="disabledPlugins", default=[], help="Disable single named plugin.")
# put all 'mode' arguments here so we know early what mode we are in.
self.parseOptionsFirst_novalopts = [
"--version", "-q", "-v", "--quiet", "--verbose",
"--trace", "--fake-mode", ]
self.parseOptionsFirst_valopts = [
"-c", "--config", "--disableplugin", "--extra-plugin-config"]
def addEarlyParse(self, opt, arg=0):
if arg:
self.parseOptionsFirst_valopts.append(opt)
else:
self.parseOptionsFirst_novalopts.append(opt)
def firstParse(self, args):
args = _filtercmdline(
self.parseOptionsFirst_novalopts,
self.parseOptionsFirst_valopts,
args)
opts, args = self.parse_args(args=args)
if not opts.configFiles:
opts.configFiles = [os.path.join(firmwaretools.PKGCONFDIR, "firmware.conf"), ]
opts.configFiles = opts.configFiles + opts.extraConfigs
return opts
def _filtercmdline(novalopts, valopts, args):
'''Keep only specific options from the command line argument list
This function allows us to peek at specific command line options when using
the optparse module. This is useful when some options affect what other
options should be available.
@param novalopts: A sequence of options to keep that don't take an argument.
@param valopts: A sequence of options to keep that take a single argument.
@param args: The command line arguments to parse (as per sys.argv[:1]
@return: A list of strings containing the filtered version of args.
Will raise ValueError if there was a problem parsing the command line.
'''
out = []
args = list(args) # Make a copy because this func is destructive
while len(args) > 0:
a = args.pop(0)
if '=' in a:
opt, _ = a.split('=', 1)
if opt in valopts:
out.append(a)
elif a in novalopts:
out.append(a)
elif a in valopts:
if len(args) < 1:
raise ValueError
next = args.pop(0)
if next[0] == '-':
raise ValueError
out.extend([a, next])
else:
# Check for single letter options that take a value, where the
# value is right up against the option
for opt in valopts:
if len(opt) == 2 and a.startswith(opt):
out.append(a)
return out
firmware-tools-2.1.14/ft-cli/ftcommands.py 0000664 0017654 0017654 00000003140 11121512226 024765 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright (C) 2008 Dell Inc.
# by Michael Brown
#
# Based in part on design and code in Yum 3.2:
# Copyright 2006 Duke University
# Written by Seth Vidal
"""
Classes for subcommands of the yum command line interface.
"""
from firmwaretools.trace_decorator import decorate, traceLog, getLog
moduleLog = getLog()
class YumCommand(object):
def getModes(self):
return []
def doCheck(self, base, mode, cmdline, processedArgs):
pass
def addSubOptions(self, base, mode, cmdline, processedArgs):
pass
def doCommand(self, base, mode, cmdline, processedArgs):
"""
@return: (exit_code, [ errors ]) where exit_code is:
0 = we're done, exit
1 = we've errored, exit with error string
"""
return 0, ['Nothing to do']
firmware-tools-2.1.14/ft-cli/ftmain.py 0000664 0017654 0017654 00000007032 11121512226 024114 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:tw=0
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
"""
Entrance point for the command line interface.
"""
import sys
import locale
import logging
import signal
import time # test purposes only
# fixup 'no handlers could be found for...' message
logging.raiseExceptions = 0
from firmwaretools.trace_decorator import decorate, traceLog, getLog
from firmwaretools import errors
from firmwaretools import plugins
import cli
def main(args):
"""This does all the real work"""
def setDebug():
import pdb
pdb.set_trace()
signal.signal(signal.SIGUSR1,setDebug)
def exUserCancel():
logger.critical('Exiting on user cancel')
sys.exit(1)
decorate(traceLog())
def exIOError(e):
if e.errno == 32:
logger.critical('Exiting on Broken Pipe')
else:
logger.critical(str(e))
sys.exit(1)
decorate(traceLog())
def exPluginExit(e):
'''Called when a plugin raises PluginExit.
Log the plugin's exit message if one was supplied.
'''
if str(e):
logger.warn('%s' % e)
sys.exit(1)
decorate(traceLog())
def exFatal(e):
logger.critical('%s' % e)
sys.exit(1)
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error, e:
# default to C locale if we get a failure.
print >> sys.stderr, 'Failed to set locale, defaulting to C'
locale.setlocale(locale.LC_ALL, 'C')
# our core object for the cli
base = cli.BaseCli()
logger = getLog()
verbose_logger = getLog(prefix="verbose.")
# do our cli parsing and config file setup
# also sanity check the things being passed on the cli
try:
# no logging before this returns.
base.getOptionsConfig(args)
except plugins.PluginExit, e:
exPluginExit(e)
except errors.BaseError, e:
exFatal(e)
lockerr = ""
while True:
try:
base.lock()
except errors.LockError, e:
if "%s" %(e.msg,) != lockerr:
lockerr = "%s" %(e.msg,)
logger.critical(lockerr)
logger.critical("Another app is currently holding the lock; waiting for it to exit...")
time.sleep(2)
else:
break
try:
result, resultmsgs = base.doCommands()
except plugins.PluginExit, e:
exPluginExit(e)
except errors.BaseError, e:
result = 1
resultmsgs = [str(e)]
except KeyboardInterrupt:
exUserCancel()
except IOError, e:
exIOError(e)
verbose_logger.info('Complete!')
base.unlock()
sys.exit(0)
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt, e:
print >> sys.stderr, "\n\nExiting on user cancel."
sys.exit(1)
firmware-tools-2.1.14/ft-cli/guihelpers.py 0000775 0017654 0017654 00000002310 10756403330 025012 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:ai:ts=4:sw=4:et:filetype=python:
#future imports always first
from __future__ import generators
# std python stuff
import logging
import gtk
from firmwaretools.pycompat import runLongProcess
from firmwaretools.trace_decorator import decorate, traceLog, getLog
moduleLog = getLog()
moduleVerboseLog = getLog(prefix="verbose.")
decorate(traceLog())
def getSelectionPaths(treeview):
def func(model, path, iterator, data):
model = None
iterator = None
data.append(path)
paths = []
treeselection = treeview.get_selection()
treeselection.selected_foreach(func, paths)
return paths
decorate(traceLog())
def gtkYield():
# process gui events during long-running loops
# so that we are more responsive
while gtk.events_pending():
gtk.main_iteration(False)
decorate(traceLog())
def runLongProcessGtk(function, args=None, kargs=None, waitLoopFunction=None):
decorate(traceLog())
def myFunc():
# can access outer function variables
if waitLoopFunction is not None:
waitLoopFunction()
gtkYield() # make sure current GUI is fully displayed
return runLongProcess(function, args, kargs, waitLoopFunction=myFunc)
firmware-tools-2.1.14/pkg/ 0000777 0017654 0017654 00000000000 11452664762 021713 5 ustar 00michael_e_brown michael_e_brown 0000000 0000000 firmware-tools-2.1.14/pkg/firmware-tools.spec.in 0000664 0017654 0017654 00000010155 11162006041 026120 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:tw=0:ts=4:sw=4:et
%define major @RELEASE_MAJOR@
%define minor @RELEASE_MINOR@
%define micro @RELEASE_MICRO@
%define extra @RELEASE_RPM_EXTRA@
%define release_version %{major}.%{minor}.%{micro}%{extra}
%define rpm_release 1
# per fedora python packaging guidelines
%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
Name: firmware-tools
Version: %{release_version}
Release: %{rpm_release}%{?dist}
Summary: Scripts and tools to manage firmware and BIOS updates
Group: Applications/System
License: GPLv2+ or OSL 2.1
URL: http://linux.dell.com/libsmbios/download/
Source0: http://linux.dell.com/libsmbios/download/%{name}/%{name}-%{version}/%{name}-%{version}.tar.bz2
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
# SUSE doesnt have noarch python, so for SUSE, always build arch-dependent
%if ! 0%{?suse_version}
BuildArch: noarch
%endif
BuildRequires: python-devel, rpm-python
Requires: rpm-python, pciutils
Provides: firmware_inventory(pci) = 0:%{release_version}
# packages that dont conform to latest ABI
Conflicts: firmware_addon_dell < 0:2.1.0
Conflicts: dell-dup < 0:1.1.0
%description
The firmware-tools project provides tools to inventory hardware and a plugin
architecture so that different OEM vendors can provide different inventory
components. It is intended to tie to the package system to enable seamless
installation of updated firmware via your package manager, as well as provide
a framework for BIOS and firmware updates.
%prep
%setup -q
%build
# this line lets us build an RPM directly from a git tarball
[ -e ./configure ] || \
RELEASE_MAJOR=%{major} \
RELEASE_MINOR=%{minor} \
RELEASE_MICRO=%{micro} \
RELEASE_EXTRA=%{extra} \
./autogen.sh --no-configure
# fix problems when buildsystem time is out of sync. ./configure will
# fail if newly created files are older than the packaged files.
# this should normally be a no-op on proper buildsystems.
touch configure
find . -type f -newer configure -print0 | xargs -r0 touch
%configure
make -e %{?_smp_mflags}
%check
make -e %{?_smp_mflags} check
%install
# Fedora Packaging guidelines
rm -rf $RPM_BUILD_ROOT
# SUSE Packaging rpmlint
mkdir $RPM_BUILD_ROOT
make install DESTDIR=%{buildroot} INSTALL="%{__install} -p"
mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/firmware/firmware.d/
mkdir -p $RPM_BUILD_ROOT/%{_datadir}/firmware
# backwards compatibility symlinks
mkdir -p $RPM_BUILD_ROOT/%{_bindir}
ln -s firmwaretool $RPM_BUILD_ROOT/%{_sbindir}/inventory_firmware
ln -s firmwaretool $RPM_BUILD_ROOT/%{_sbindir}/bootstrap_firmware
ln -s firmwaretool $RPM_BUILD_ROOT/%{_sbindir}/update_firmware
ln -s %{_sbindir}/firmwaretool $RPM_BUILD_ROOT/%{_bindir}/update_firmware
%clean
rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,root,root,-)
%doc COPYING-GPL COPYING-OSL COPYING.LIB README
%{python_sitelib}/*
%attr(0755,root,root) %{_sbindir}/*
%attr(0755,root,root) %{_bindir}/*
%{_datadir}/firmware-tools/
%dir %{_sysconfdir}/firmware
%dir %{_sysconfdir}/firmware/firmware.d
%config(noreplace) %{_sysconfdir}/firmware/firmware.conf
%{_datadir}/firmware/
%changelog
* Thu Aug 23 2007 Michael E Brown - 1.5.6-1
- rebase to upstream release
* Fri Aug 17 2007 Michael E Brown - 1.5.5-1
- rebase to upstream release
* Fri Aug 17 2007 Michael E Brown - 1.4.2-1
- rebase to upstream release
* Tue May 1 2007 Michael E Brown - 1.2.6-1
- disable empty debuginfo package
* Tue Mar 20 2007 Michael E Brown - 1.2.5-1
- Remove python-abi dep for RHEL3 (it was broken)
* Fri Mar 16 2007 Michael E Brown - 1.2.4-1
- fix typo in sitelib path -- only for RHEL3 build
* Wed Mar 14 2007 Michael E Brown - 1.2.3-1
- create and own {_sysconfdir}/firmware/firmware.d/ for plugins.
- Fedora review changes
* Mon Mar 12 2007 Michael E Brown - 1.2.0-1
- Fedora-compliant packaging changes.
firmware-tools-2.1.14/pkg/install-sh 0000755 0017654 0017654 00000032537 11452664727 023726 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 #!/bin/sh
# install - install a program, script, or datafile
scriptversion=2009-04-28.21; # UTC
# This originates from X11R5 (mit/util/scripts/install.sh), which was
# later released in X11R6 (xc/config/util/install.sh) with the
# following copyright and license.
#
# Copyright (C) 1994 X Consortium
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of the X Consortium shall not
# be used in advertising or otherwise to promote the sale, use or other deal-
# ings in this Software without prior written authorization from the X Consor-
# tium.
#
#
# FSF changes to this file are in the public domain.
#
# Calling this script install-sh is preferred over install.sh, to prevent
# `make' implicit rules from creating a file called install from it
# when there is no Makefile.
#
# This script is compatible with the BSD install script, but was written
# from scratch.
nl='
'
IFS=" "" $nl"
# set DOITPROG to echo to test this script
# Don't use :- since 4.3BSD and earlier shells don't like it.
doit=${DOITPROG-}
if test -z "$doit"; then
doit_exec=exec
else
doit_exec=$doit
fi
# Put in absolute file names if you don't have them in your path;
# or use environment vars.
chgrpprog=${CHGRPPROG-chgrp}
chmodprog=${CHMODPROG-chmod}
chownprog=${CHOWNPROG-chown}
cmpprog=${CMPPROG-cmp}
cpprog=${CPPROG-cp}
mkdirprog=${MKDIRPROG-mkdir}
mvprog=${MVPROG-mv}
rmprog=${RMPROG-rm}
stripprog=${STRIPPROG-strip}
posix_glob='?'
initialize_posix_glob='
test "$posix_glob" != "?" || {
if (set -f) 2>/dev/null; then
posix_glob=
else
posix_glob=:
fi
}
'
posix_mkdir=
# Desired mode of installed file.
mode=0755
chgrpcmd=
chmodcmd=$chmodprog
chowncmd=
mvcmd=$mvprog
rmcmd="$rmprog -f"
stripcmd=
src=
dst=
dir_arg=
dst_arg=
copy_on_change=false
no_target_directory=
usage="\
Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
or: $0 [OPTION]... SRCFILES... DIRECTORY
or: $0 [OPTION]... -t DIRECTORY SRCFILES...
or: $0 [OPTION]... -d DIRECTORIES...
In the 1st form, copy SRCFILE to DSTFILE.
In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
In the 4th, create DIRECTORIES.
Options:
--help display this help and exit.
--version display version info and exit.
-c (ignored)
-C install only if different (preserve the last data modification time)
-d create directories instead of installing files.
-g GROUP $chgrpprog installed files to GROUP.
-m MODE $chmodprog installed files to MODE.
-o USER $chownprog installed files to USER.
-s $stripprog installed files.
-t DIRECTORY install into DIRECTORY.
-T report an error if DSTFILE is a directory.
Environment variables override the default commands:
CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
RMPROG STRIPPROG
"
while test $# -ne 0; do
case $1 in
-c) ;;
-C) copy_on_change=true;;
-d) dir_arg=true;;
-g) chgrpcmd="$chgrpprog $2"
shift;;
--help) echo "$usage"; exit $?;;
-m) mode=$2
case $mode in
*' '* | *' '* | *'
'* | *'*'* | *'?'* | *'['*)
echo "$0: invalid mode: $mode" >&2
exit 1;;
esac
shift;;
-o) chowncmd="$chownprog $2"
shift;;
-s) stripcmd=$stripprog;;
-t) dst_arg=$2
shift;;
-T) no_target_directory=true;;
--version) echo "$0 $scriptversion"; exit $?;;
--) shift
break;;
-*) echo "$0: invalid option: $1" >&2
exit 1;;
*) break;;
esac
shift
done
if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
# When -d is used, all remaining arguments are directories to create.
# When -t is used, the destination is already specified.
# Otherwise, the last argument is the destination. Remove it from $@.
for arg
do
if test -n "$dst_arg"; then
# $@ is not empty: it contains at least $arg.
set fnord "$@" "$dst_arg"
shift # fnord
fi
shift # arg
dst_arg=$arg
done
fi
if test $# -eq 0; then
if test -z "$dir_arg"; then
echo "$0: no input file specified." >&2
exit 1
fi
# It's OK to call `install-sh -d' without argument.
# This can happen when creating conditional directories.
exit 0
fi
if test -z "$dir_arg"; then
trap '(exit $?); exit' 1 2 13 15
# Set umask so as not to create temps with too-generous modes.
# However, 'strip' requires both read and write access to temps.
case $mode in
# Optimize common cases.
*644) cp_umask=133;;
*755) cp_umask=22;;
*[0-7])
if test -z "$stripcmd"; then
u_plus_rw=
else
u_plus_rw='% 200'
fi
cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
*)
if test -z "$stripcmd"; then
u_plus_rw=
else
u_plus_rw=,u+rw
fi
cp_umask=$mode$u_plus_rw;;
esac
fi
for src
do
# Protect names starting with `-'.
case $src in
-*) src=./$src;;
esac
if test -n "$dir_arg"; then
dst=$src
dstdir=$dst
test -d "$dstdir"
dstdir_status=$?
else
# Waiting for this to be detected by the "$cpprog $src $dsttmp" command
# might cause directories to be created, which would be especially bad
# if $src (and thus $dsttmp) contains '*'.
if test ! -f "$src" && test ! -d "$src"; then
echo "$0: $src does not exist." >&2
exit 1
fi
if test -z "$dst_arg"; then
echo "$0: no destination specified." >&2
exit 1
fi
dst=$dst_arg
# Protect names starting with `-'.
case $dst in
-*) dst=./$dst;;
esac
# If destination is a directory, append the input filename; won't work
# if double slashes aren't ignored.
if test -d "$dst"; then
if test -n "$no_target_directory"; then
echo "$0: $dst_arg: Is a directory" >&2
exit 1
fi
dstdir=$dst
dst=$dstdir/`basename "$src"`
dstdir_status=0
else
# Prefer dirname, but fall back on a substitute if dirname fails.
dstdir=`
(dirname "$dst") 2>/dev/null ||
expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
X"$dst" : 'X\(//\)[^/]' \| \
X"$dst" : 'X\(//\)$' \| \
X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
echo X"$dst" |
sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
s//\1/
q
}
/^X\(\/\/\)[^/].*/{
s//\1/
q
}
/^X\(\/\/\)$/{
s//\1/
q
}
/^X\(\/\).*/{
s//\1/
q
}
s/.*/./; q'
`
test -d "$dstdir"
dstdir_status=$?
fi
fi
obsolete_mkdir_used=false
if test $dstdir_status != 0; then
case $posix_mkdir in
'')
# Create intermediate dirs using mode 755 as modified by the umask.
# This is like FreeBSD 'install' as of 1997-10-28.
umask=`umask`
case $stripcmd.$umask in
# Optimize common cases.
*[2367][2367]) mkdir_umask=$umask;;
.*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
*[0-7])
mkdir_umask=`expr $umask + 22 \
- $umask % 100 % 40 + $umask % 20 \
- $umask % 10 % 4 + $umask % 2
`;;
*) mkdir_umask=$umask,go-w;;
esac
# With -d, create the new directory with the user-specified mode.
# Otherwise, rely on $mkdir_umask.
if test -n "$dir_arg"; then
mkdir_mode=-m$mode
else
mkdir_mode=
fi
posix_mkdir=false
case $umask in
*[123567][0-7][0-7])
# POSIX mkdir -p sets u+wx bits regardless of umask, which
# is incompatible with FreeBSD 'install' when (umask & 300) != 0.
;;
*)
tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
if (umask $mkdir_umask &&
exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
then
if test -z "$dir_arg" || {
# Check for POSIX incompatibilities with -m.
# HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
# other-writeable bit of parent directory when it shouldn't.
# FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
ls_ld_tmpdir=`ls -ld "$tmpdir"`
case $ls_ld_tmpdir in
d????-?r-*) different_mode=700;;
d????-?--*) different_mode=755;;
*) false;;
esac &&
$mkdirprog -m$different_mode -p -- "$tmpdir" && {
ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
}
}
then posix_mkdir=:
fi
rmdir "$tmpdir/d" "$tmpdir"
else
# Remove any dirs left behind by ancient mkdir implementations.
rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
fi
trap '' 0;;
esac;;
esac
if
$posix_mkdir && (
umask $mkdir_umask &&
$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
)
then :
else
# The umask is ridiculous, or mkdir does not conform to POSIX,
# or it failed possibly due to a race condition. Create the
# directory the slow way, step by step, checking for races as we go.
case $dstdir in
/*) prefix='/';;
-*) prefix='./';;
*) prefix='';;
esac
eval "$initialize_posix_glob"
oIFS=$IFS
IFS=/
$posix_glob set -f
set fnord $dstdir
shift
$posix_glob set +f
IFS=$oIFS
prefixes=
for d
do
test -z "$d" && continue
prefix=$prefix$d
if test -d "$prefix"; then
prefixes=
else
if $posix_mkdir; then
(umask=$mkdir_umask &&
$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
# Don't fail if two instances are running concurrently.
test -d "$prefix" || exit 1
else
case $prefix in
*\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
*) qprefix=$prefix;;
esac
prefixes="$prefixes '$qprefix'"
fi
fi
prefix=$prefix/
done
if test -n "$prefixes"; then
# Don't fail if two instances are running concurrently.
(umask $mkdir_umask &&
eval "\$doit_exec \$mkdirprog $prefixes") ||
test -d "$dstdir" || exit 1
obsolete_mkdir_used=true
fi
fi
fi
if test -n "$dir_arg"; then
{ test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
{ test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
{ test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
else
# Make a couple of temp file names in the proper directory.
dsttmp=$dstdir/_inst.$$_
rmtmp=$dstdir/_rm.$$_
# Trap to clean up those temp files at exit.
trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
# Copy the file name to the temp name.
(umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
# and set any options; do chmod last to preserve setuid bits.
#
# If any of these fail, we abort the whole thing. If we want to
# ignore errors from any of these, just make sure not to ignore
# errors from the above "$doit $cpprog $src $dsttmp" command.
#
{ test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
{ test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
{ test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
{ test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
# If -C, don't bother to copy if it wouldn't change the file.
if $copy_on_change &&
old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
eval "$initialize_posix_glob" &&
$posix_glob set -f &&
set X $old && old=:$2:$4:$5:$6 &&
set X $new && new=:$2:$4:$5:$6 &&
$posix_glob set +f &&
test "$old" = "$new" &&
$cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
then
rm -f "$dsttmp"
else
# Rename the file to the real destination.
$doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
# The rename failed, perhaps because mv can't rename something else
# to itself, or perhaps because mv is so ancient that it does not
# support -f.
{
# Now remove or move aside any old file at destination location.
# We try this two ways since rm can't unlink itself on some
# systems and the destination file might be busy for other
# reasons. In this case, the final cleanup might fail but the new
# file should still install successfully.
{
test ! -f "$dst" ||
$doit $rmcmd -f "$dst" 2>/dev/null ||
{ $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
{ $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
} ||
{ echo "$0: cannot unlink or rename $dst" >&2
(exit 1); exit 1
}
} &&
# Now rename the file to the real destination.
$doit $mvcmd "$dsttmp" "$dst"
}
fi || exit 1
trap '' 0
fi
done
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# End:
firmware-tools-2.1.14/pkg/missing 0000755 0017654 0017654 00000026233 11452664727 023315 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 #! /bin/sh
# Common stub for a few missing GNU programs while installing.
scriptversion=2009-04-28.21; # UTC
# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006,
# 2008, 2009 Free Software Foundation, Inc.
# Originally by Fran,cois Pinard , 1996.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
if test $# -eq 0; then
echo 1>&2 "Try \`$0 --help' for more information"
exit 1
fi
run=:
sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
# In the cases where this matters, `missing' is being run in the
# srcdir already.
if test -f configure.ac; then
configure_ac=configure.ac
else
configure_ac=configure.in
fi
msg="missing on your system"
case $1 in
--run)
# Try to run requested program, and just exit if it succeeds.
run=
shift
"$@" && exit 0
# Exit code 63 means version mismatch. This often happens
# when the user try to use an ancient version of a tool on
# a file that requires a minimum version. In this case we
# we should proceed has if the program had been absent, or
# if --run hadn't been passed.
if test $? = 63; then
run=:
msg="probably too old"
fi
;;
-h|--h|--he|--hel|--help)
echo "\
$0 [OPTION]... PROGRAM [ARGUMENT]...
Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
error status if there is no known handling for PROGRAM.
Options:
-h, --help display this help and exit
-v, --version output version information and exit
--run try to run the given command, and emulate it if it fails
Supported PROGRAM values:
aclocal touch file \`aclocal.m4'
autoconf touch file \`configure'
autoheader touch file \`config.h.in'
autom4te touch the output file, or create a stub one
automake touch all \`Makefile.in' files
bison create \`y.tab.[ch]', if possible, from existing .[ch]
flex create \`lex.yy.c', if possible, from existing .c
help2man touch the output file
lex create \`lex.yy.c', if possible, from existing .c
makeinfo touch the output file
tar try tar, gnutar, gtar, then tar without non-portable flags
yacc create \`y.tab.[ch]', if possible, from existing .[ch]
Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and
\`g' are ignored when checking the name.
Send bug reports to ."
exit $?
;;
-v|--v|--ve|--ver|--vers|--versi|--versio|--version)
echo "missing $scriptversion (GNU Automake)"
exit $?
;;
-*)
echo 1>&2 "$0: Unknown \`$1' option"
echo 1>&2 "Try \`$0 --help' for more information"
exit 1
;;
esac
# normalize program name to check for.
program=`echo "$1" | sed '
s/^gnu-//; t
s/^gnu//; t
s/^g//; t'`
# Now exit if we have it, but it failed. Also exit now if we
# don't have it and --version was passed (most likely to detect
# the program). This is about non-GNU programs, so use $1 not
# $program.
case $1 in
lex*|yacc*)
# Not GNU programs, they don't have --version.
;;
tar*)
if test -n "$run"; then
echo 1>&2 "ERROR: \`tar' requires --run"
exit 1
elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
exit 1
fi
;;
*)
if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
# We have it, but it failed.
exit 1
elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
# Could not run --version or --help. This is probably someone
# running `$TOOL --version' or `$TOOL --help' to check whether
# $TOOL exists and not knowing $TOOL uses missing.
exit 1
fi
;;
esac
# If it does not exist, or fails to run (possibly an outdated version),
# try to emulate it.
case $program in
aclocal*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified \`acinclude.m4' or \`${configure_ac}'. You might want
to install the \`Automake' and \`Perl' packages. Grab them from
any GNU archive site."
touch aclocal.m4
;;
autoconf*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified \`${configure_ac}'. You might want to install the
\`Autoconf' and \`GNU m4' packages. Grab them from any GNU
archive site."
touch configure
;;
autoheader*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified \`acconfig.h' or \`${configure_ac}'. You might want
to install the \`Autoconf' and \`GNU m4' packages. Grab them
from any GNU archive site."
files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
test -z "$files" && files="config.h"
touch_files=
for f in $files; do
case $f in
*:*) touch_files="$touch_files "`echo "$f" |
sed -e 's/^[^:]*://' -e 's/:.*//'`;;
*) touch_files="$touch_files $f.in";;
esac
done
touch $touch_files
;;
automake*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
You might want to install the \`Automake' and \`Perl' packages.
Grab them from any GNU archive site."
find . -type f -name Makefile.am -print |
sed 's/\.am$/.in/' |
while read f; do touch "$f"; done
;;
autom4te*)
echo 1>&2 "\
WARNING: \`$1' is needed, but is $msg.
You might have modified some files without having the
proper tools for further handling them.
You can get \`$1' as part of \`Autoconf' from any GNU
archive site."
file=`echo "$*" | sed -n "$sed_output"`
test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
if test -f "$file"; then
touch $file
else
test -z "$file" || exec >$file
echo "#! /bin/sh"
echo "# Created by GNU Automake missing as a replacement of"
echo "# $ $@"
echo "exit 0"
chmod +x $file
exit 1
fi
;;
bison*|yacc*)
echo 1>&2 "\
WARNING: \`$1' $msg. You should only need it if
you modified a \`.y' file. You may need the \`Bison' package
in order for those modifications to take effect. You can get
\`Bison' from any GNU archive site."
rm -f y.tab.c y.tab.h
if test $# -ne 1; then
eval LASTARG="\${$#}"
case $LASTARG in
*.y)
SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
if test -f "$SRCFILE"; then
cp "$SRCFILE" y.tab.c
fi
SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
if test -f "$SRCFILE"; then
cp "$SRCFILE" y.tab.h
fi
;;
esac
fi
if test ! -f y.tab.h; then
echo >y.tab.h
fi
if test ! -f y.tab.c; then
echo 'main() { return 0; }' >y.tab.c
fi
;;
lex*|flex*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified a \`.l' file. You may need the \`Flex' package
in order for those modifications to take effect. You can get
\`Flex' from any GNU archive site."
rm -f lex.yy.c
if test $# -ne 1; then
eval LASTARG="\${$#}"
case $LASTARG in
*.l)
SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
if test -f "$SRCFILE"; then
cp "$SRCFILE" lex.yy.c
fi
;;
esac
fi
if test ! -f lex.yy.c; then
echo 'main() { return 0; }' >lex.yy.c
fi
;;
help2man*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified a dependency of a manual page. You may need the
\`Help2man' package in order for those modifications to take
effect. You can get \`Help2man' from any GNU archive site."
file=`echo "$*" | sed -n "$sed_output"`
test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
if test -f "$file"; then
touch $file
else
test -z "$file" || exec >$file
echo ".ab help2man is required to generate this page"
exit $?
fi
;;
makeinfo*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified a \`.texi' or \`.texinfo' file, or any other file
indirectly affecting the aspect of the manual. The spurious
call might also be the consequence of using a buggy \`make' (AIX,
DU, IRIX). You might want to install the \`Texinfo' package or
the \`GNU make' package. Grab either from any GNU archive site."
# The file to touch is that specified with -o ...
file=`echo "$*" | sed -n "$sed_output"`
test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
if test -z "$file"; then
# ... or it is the one specified with @setfilename ...
infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
file=`sed -n '
/^@setfilename/{
s/.* \([^ ]*\) *$/\1/
p
q
}' $infile`
# ... or it is derived from the source name (dir/f.texi becomes f.info)
test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
fi
# If the file does not exist, the user really needs makeinfo;
# let's fail without touching anything.
test -f $file || exit 1
touch $file
;;
tar*)
shift
# We have already tried tar in the generic part.
# Look for gnutar/gtar before invocation to avoid ugly error
# messages.
if (gnutar --version > /dev/null 2>&1); then
gnutar "$@" && exit 0
fi
if (gtar --version > /dev/null 2>&1); then
gtar "$@" && exit 0
fi
firstarg="$1"
if shift; then
case $firstarg in
*o*)
firstarg=`echo "$firstarg" | sed s/o//`
tar "$firstarg" "$@" && exit 0
;;
esac
case $firstarg in
*h*)
firstarg=`echo "$firstarg" | sed s/h//`
tar "$firstarg" "$@" && exit 0
;;
esac
fi
echo 1>&2 "\
WARNING: I can't seem to be able to run \`tar' with the given arguments.
You may want to install GNU tar or Free paxutils, or check the
command line arguments."
exit 1
;;
*)
echo 1>&2 "\
WARNING: \`$1' is needed, and is $msg.
You might have modified some files without having the
proper tools for further handling them. Check the \`README' file,
it often tells you about the needed prerequisites for installing
this package. You may also peek at any GNU archive site, in case
some other package would contain this missing \`$1' program."
exit 1
;;
esac
exit 0
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# End:
firmware-tools-2.1.14/pkg/py-compile 0000755 0017654 0017654 00000010135 11452664727 023714 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 #!/bin/sh
# py-compile - Compile a Python program
scriptversion=2009-04-28.21; # UTC
# Copyright (C) 2000, 2001, 2003, 2004, 2005, 2008, 2009 Free Software
# Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# This file is maintained in Automake, please report
# bugs to or send patches to
# .
if [ -z "$PYTHON" ]; then
PYTHON=python
fi
basedir=
destdir=
files=
while test $# -ne 0; do
case "$1" in
--basedir)
basedir=$2
if test -z "$basedir"; then
echo "$0: Missing argument to --basedir." 1>&2
exit 1
fi
shift
;;
--destdir)
destdir=$2
if test -z "$destdir"; then
echo "$0: Missing argument to --destdir." 1>&2
exit 1
fi
shift
;;
-h|--h*)
cat <<\EOF
Usage: py-compile [--help] [--version] [--basedir DIR] [--destdir DIR] FILES..."
Byte compile some python scripts FILES. Use --destdir to specify any
leading directory path to the FILES that you don't want to include in the
byte compiled file. Specify --basedir for any additional path information you
do want to be shown in the byte compiled file.
Example:
py-compile --destdir /tmp/pkg-root --basedir /usr/share/test test.py test2.py
Report bugs to .
EOF
exit $?
;;
-v|--v*)
echo "py-compile $scriptversion"
exit $?
;;
*)
files="$files $1"
;;
esac
shift
done
if test -z "$files"; then
echo "$0: No files given. Try \`$0 --help' for more information." 1>&2
exit 1
fi
# if basedir was given, then it should be prepended to filenames before
# byte compilation.
if [ -z "$basedir" ]; then
pathtrans="path = file"
else
pathtrans="path = os.path.join('$basedir', file)"
fi
# if destdir was given, then it needs to be prepended to the filename to
# byte compile but not go into the compiled file.
if [ -z "$destdir" ]; then
filetrans="filepath = path"
else
filetrans="filepath = os.path.normpath('$destdir' + os.sep + path)"
fi
$PYTHON -c "
import sys, os, py_compile
files = '''$files'''
sys.stdout.write('Byte-compiling python modules...\n')
for file in files.split():
$pathtrans
$filetrans
if not os.path.exists(filepath) or not (len(filepath) >= 3
and filepath[-3:] == '.py'):
continue
sys.stdout.write(file)
sys.stdout.flush()
py_compile.compile(filepath, filepath + 'c', path)
sys.stdout.write('\n')" || exit $?
# this will fail for python < 1.5, but that doesn't matter ...
$PYTHON -O -c "
import sys, os, py_compile
files = '''$files'''
sys.stdout.write('Byte-compiling python modules (optimized versions) ...\n')
for file in files.split():
$pathtrans
$filetrans
if not os.path.exists(filepath) or not (len(filepath) >= 3
and filepath[-3:] == '.py'):
continue
sys.stdout.write(file)
sys.stdout.flush()
py_compile.compile(filepath, filepath + 'o', path)
sys.stdout.write('\n')" 2>/dev/null || :
# Local Variables:
# mode: shell-script
# sh-indentation: 2
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# End:
firmware-tools-2.1.14/README 0000664 0017654 0017654 00000001256 11162002521 021765 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 This software is dual-licensed under GPL/OSL.
* Copyright (C) 2005 Dell Inc.
* by Michael Brown
* Licensed under the Open Software License version 3.0 or later.
*
* Alternatively, you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License,
* or (at your option) any later version.
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
firmware-tools-2.1.14/configure.ac 0000664 0017654 0017654 00000003432 11452664711 023412 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # -*- Autoconf -*-
# vim:tw=0:et:ts=4:sw=4
# Process this file with autoconf to produce a configure script.
##############################################################################
# RELEASE VARIABLES
##############################################################################
#
# The following variables define the libsmbios release version.
# This is the "marketing" version, or overall version of the project.
# This doesnt have anything in relation to the ABI versions of individual
# libraries, which are defined further below.
#
m4_define([release_major_version], [2])
m4_define([release_minor_version], [1])
m4_define([release_micro_version], [14])
# if you define any "extra" version info, include a leading dot (".")
m4_define([release_extra_version], [])
AC_INIT([firmware-tools],
[release_major_version().release_minor_version().release_micro_version()release_extra_version()])
####################################
AC_PREREQ(2.61)
AC_CONFIG_AUX_DIR([pkg])
AM_INIT_AUTOMAKE([1.10 subdir-objects tar-ustar dist-bzip2 dist-lzma no-define foreign])
# Checks for programs.
AC_PROG_INSTALL
# automake macros
AM_PATH_PYTHON
# versioning
AC_SUBST([RELEASE_MAJOR], [release_major_version()])
AC_SUBST([RELEASE_MINOR], [release_minor_version()])
AC_SUBST([RELEASE_MICRO], [release_micro_version()])
AC_SUBST([RELEASE_EXTRA], [release_extra_version()])
AC_SUBST([RELEASE_RPM_EXTRA], [%{nil}])
if test -n "$RELEASE_EXTRA"; then
RELEASE_RPM_EXTRA=$RELEASE_EXTRA
fi
# firmware-tools oddity: package name cannot contain '-', so we have to fix it
pkgpythondir=\${pythondir}/firmwaretools
pkgpyexecdir=\${pyexecdir}/firmwaretools
# generate files and exit
AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES([pkg/${PACKAGE_NAME}.spec])
AC_OUTPUT
firmware-tools-2.1.14/aclocal.m4 0000664 0017654 0017654 00000073545 11452664725 023005 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # generated automatically by aclocal 1.11 -*- Autoconf -*-
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
# 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.63],,
[m4_warning([this file was generated for autoconf 2.63.
You have another version of autoconf. It may work, but is not guaranteed to.
If you have problems, you may need to regenerate the build system entirely.
To do so, use the procedure documented by the package, typically `autoreconf'.])])
# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# AM_AUTOMAKE_VERSION(VERSION)
# ----------------------------
# Automake X.Y traces this macro to ensure aclocal.m4 has been
# generated from the m4 files accompanying Automake X.Y.
# (This private macro should not be called outside this file.)
AC_DEFUN([AM_AUTOMAKE_VERSION],
[am__api_version='1.11'
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
dnl require some minimum version. Point them to the right macro.
m4_if([$1], [1.11], [],
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
])
# _AM_AUTOCONF_VERSION(VERSION)
# -----------------------------
# aclocal traces this macro to find the Autoconf version.
# This is a private macro too. Using m4_define simplifies
# the logic in aclocal, which can simply ignore this definition.
m4_define([_AM_AUTOCONF_VERSION], [])
# AM_SET_CURRENT_AUTOMAKE_VERSION
# -------------------------------
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
[AM_AUTOMAKE_VERSION([1.11])dnl
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to
# `$srcdir', `$srcdir/..', or `$srcdir/../..'.
#
# Of course, Automake must honor this variable whenever it calls a
# tool from the auxiliary directory. The problem is that $srcdir (and
# therefore $ac_aux_dir as well) can be either absolute or relative,
# depending on how configure is run. This is pretty annoying, since
# it makes $ac_aux_dir quite unusable in subdirectories: in the top
# source directory, any form will work fine, but in subdirectories a
# relative path needs to be adjusted first.
#
# $ac_aux_dir/missing
# fails when called from a subdirectory if $ac_aux_dir is relative
# $top_srcdir/$ac_aux_dir/missing
# fails if $ac_aux_dir is absolute,
# fails when called from a subdirectory in a VPATH build with
# a relative $ac_aux_dir
#
# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
# are both prefixed by $srcdir. In an in-source build this is usually
# harmless because $srcdir is `.', but things will broke when you
# start a VPATH build or use an absolute $srcdir.
#
# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
# iff we strip the leading $srcdir from $ac_aux_dir. That would be:
# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
# and then we would define $MISSING as
# MISSING="\${SHELL} $am_aux_dir/missing"
# This will work as long as MISSING is not called from configure, because
# unfortunately $(top_srcdir) has no meaning in configure.
# However there are other variables, like CC, which are often used in
# configure, and could therefore not use this "fixed" $ac_aux_dir.
#
# Another solution, used here, is to always expand $ac_aux_dir to an
# absolute PATH. The drawback is that using absolute paths prevent a
# configured tree to be moved without reconfiguration.
AC_DEFUN([AM_AUX_DIR_EXPAND],
[dnl Rely on autoconf to set up CDPATH properly.
AC_PREREQ([2.50])dnl
# expand $ac_aux_dir to an absolute path
am_aux_dir=`cd $ac_aux_dir && pwd`
])
# Do all the work for Automake. -*- Autoconf -*-
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
# 2005, 2006, 2008, 2009 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# serial 16
# This macro actually does too much. Some checks are only needed if
# your package does certain things. But this isn't really a big deal.
# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
# AM_INIT_AUTOMAKE([OPTIONS])
# -----------------------------------------------
# The call with PACKAGE and VERSION arguments is the old style
# call (pre autoconf-2.50), which is being phased out. PACKAGE
# and VERSION should now be passed to AC_INIT and removed from
# the call to AM_INIT_AUTOMAKE.
# We support both call styles for the transition. After
# the next Automake release, Autoconf can make the AC_INIT
# arguments mandatory, and then we can depend on a new Autoconf
# release and drop the old call support.
AC_DEFUN([AM_INIT_AUTOMAKE],
[AC_PREREQ([2.62])dnl
dnl Autoconf wants to disallow AM_ names. We explicitly allow
dnl the ones we care about.
m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
AC_REQUIRE([AC_PROG_INSTALL])dnl
if test "`cd $srcdir && pwd`" != "`pwd`"; then
# Use -I$(srcdir) only when $(srcdir) != ., so that make's output
# is not polluted with repeated "-I."
AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
# test to see if srcdir already configured
if test -f $srcdir/config.status; then
AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
fi
fi
# test whether we have cygpath
if test -z "$CYGPATH_W"; then
if (cygpath --version) >/dev/null 2>/dev/null; then
CYGPATH_W='cygpath -w'
else
CYGPATH_W=echo
fi
fi
AC_SUBST([CYGPATH_W])
# Define the identity of the package.
dnl Distinguish between old-style and new-style calls.
m4_ifval([$2],
[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
AC_SUBST([PACKAGE], [$1])dnl
AC_SUBST([VERSION], [$2])],
[_AM_SET_OPTIONS([$1])dnl
dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,,
[m4_fatal([AC_INIT should be called with package and version arguments])])dnl
AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
_AM_IF_OPTION([no-define],,
[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl
# Some tools Automake needs.
AC_REQUIRE([AM_SANITY_CHECK])dnl
AC_REQUIRE([AC_ARG_PROGRAM])dnl
AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version})
AM_MISSING_PROG(AUTOCONF, autoconf)
AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version})
AM_MISSING_PROG(AUTOHEADER, autoheader)
AM_MISSING_PROG(MAKEINFO, makeinfo)
AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
AC_REQUIRE([AM_PROG_MKDIR_P])dnl
# We need awk for the "check" target. The system "awk" is bad on
# some platforms.
AC_REQUIRE([AC_PROG_AWK])dnl
AC_REQUIRE([AC_PROG_MAKE_SET])dnl
AC_REQUIRE([AM_SET_LEADING_DOT])dnl
_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
[_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
[_AM_PROG_TAR([v7])])])
_AM_IF_OPTION([no-dependencies],,
[AC_PROVIDE_IFELSE([AC_PROG_CC],
[_AM_DEPENDENCIES(CC)],
[define([AC_PROG_CC],
defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl
AC_PROVIDE_IFELSE([AC_PROG_CXX],
[_AM_DEPENDENCIES(CXX)],
[define([AC_PROG_CXX],
defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl
AC_PROVIDE_IFELSE([AC_PROG_OBJC],
[_AM_DEPENDENCIES(OBJC)],
[define([AC_PROG_OBJC],
defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl
])
_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl
dnl The `parallel-tests' driver may need to know about EXEEXT, so add the
dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro
dnl is hooked onto _AC_COMPILER_EXEEXT early, see below.
AC_CONFIG_COMMANDS_PRE(dnl
[m4_provide_if([_AM_COMPILER_EXEEXT],
[AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
])
dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
dnl mangled by Autoconf and run in a shell conditional statement.
m4_define([_AC_COMPILER_EXEEXT],
m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
# When config.status generates a header, we must update the stamp-h file.
# This file resides in the same directory as the config header
# that is generated. The stamp files are numbered to have different names.
# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
# loop where config.status creates the headers, so we can generate
# our stamp files there.
AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
[# Compute $1's index in $config_headers.
_am_arg=$1
_am_stamp_count=1
for _am_header in $config_headers :; do
case $_am_header in
$_am_arg | $_am_arg:* )
break ;;
* )
_am_stamp_count=`expr $_am_stamp_count + 1` ;;
esac
done
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
# Copyright (C) 2001, 2003, 2005, 2008 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# AM_PROG_INSTALL_SH
# ------------------
# Define $install_sh.
AC_DEFUN([AM_PROG_INSTALL_SH],
[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
if test x"${install_sh}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
*)
install_sh="\${SHELL} $am_aux_dir/install-sh"
esac
fi
AC_SUBST(install_sh)])
# Copyright (C) 2003, 2005 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# serial 2
# Check whether the underlying file-system supports filenames
# with a leading dot. For instance MS-DOS doesn't.
AC_DEFUN([AM_SET_LEADING_DOT],
[rm -rf .tst 2>/dev/null
mkdir .tst 2>/dev/null
if test -d .tst; then
am__leading_dot=.
else
am__leading_dot=_
fi
rmdir .tst 2>/dev/null
AC_SUBST([am__leading_dot])])
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008
# Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# serial 6
# AM_MISSING_PROG(NAME, PROGRAM)
# ------------------------------
AC_DEFUN([AM_MISSING_PROG],
[AC_REQUIRE([AM_MISSING_HAS_RUN])
$1=${$1-"${am_missing_run}$2"}
AC_SUBST($1)])
# AM_MISSING_HAS_RUN
# ------------------
# Define MISSING if not defined so far and test if it supports --run.
# If it does, set am_missing_run to use it, otherwise, to nothing.
AC_DEFUN([AM_MISSING_HAS_RUN],
[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
AC_REQUIRE_AUX_FILE([missing])dnl
if test x"${MISSING+set}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
*)
MISSING="\${SHELL} $am_aux_dir/missing" ;;
esac
fi
# Use eval to expand $SHELL
if eval "$MISSING --run true"; then
am_missing_run="$MISSING --run "
else
am_missing_run=
AC_MSG_WARN([`missing' script is too old or missing])
fi
])
# Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# AM_PROG_MKDIR_P
# ---------------
# Check for `mkdir -p'.
AC_DEFUN([AM_PROG_MKDIR_P],
[AC_PREREQ([2.60])dnl
AC_REQUIRE([AC_PROG_MKDIR_P])dnl
dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P,
dnl while keeping a definition of mkdir_p for backward compatibility.
dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile.
dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of
dnl Makefile.ins that do not define MKDIR_P, so we do our own
dnl adjustment using top_builddir (which is defined more often than
dnl MKDIR_P).
AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl
case $mkdir_p in
[[\\/$]]* | ?:[[\\/]]*) ;;
*/*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
esac
])
# Helper functions for option handling. -*- Autoconf -*-
# Copyright (C) 2001, 2002, 2003, 2005, 2008 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# serial 4
# _AM_MANGLE_OPTION(NAME)
# -----------------------
AC_DEFUN([_AM_MANGLE_OPTION],
[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
# _AM_SET_OPTION(NAME)
# ------------------------------
# Set option NAME. Presently that only means defining a flag for this option.
AC_DEFUN([_AM_SET_OPTION],
[m4_define(_AM_MANGLE_OPTION([$1]), 1)])
# _AM_SET_OPTIONS(OPTIONS)
# ----------------------------------
# OPTIONS is a space-separated list of Automake options.
AC_DEFUN([_AM_SET_OPTIONS],
[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
# -------------------------------------------
# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
AC_DEFUN([_AM_IF_OPTION],
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
# Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# AM_PATH_PYTHON([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
# ---------------------------------------------------------------------------
# Adds support for distributing Python modules and packages. To
# install modules, copy them to $(pythondir), using the python_PYTHON
# automake variable. To install a package with the same name as the
# automake package, install to $(pkgpythondir), or use the
# pkgpython_PYTHON automake variable.
#
# The variables $(pyexecdir) and $(pkgpyexecdir) are provided as
# locations to install python extension modules (shared libraries).
# Another macro is required to find the appropriate flags to compile
# extension modules.
#
# If your package is configured with a different prefix to python,
# users will have to add the install directory to the PYTHONPATH
# environment variable, or create a .pth file (see the python
# documentation for details).
#
# If the MINIMUM-VERSION argument is passed, AM_PATH_PYTHON will
# cause an error if the version of python installed on the system
# doesn't meet the requirement. MINIMUM-VERSION should consist of
# numbers and dots only.
AC_DEFUN([AM_PATH_PYTHON],
[
dnl Find a Python interpreter. Python versions prior to 2.0 are not
dnl supported. (2.0 was released on October 16, 2000).
m4_define_default([_AM_PYTHON_INTERPRETER_LIST],
[python python2 python3 python3.0 python2.5 python2.4 python2.3 python2.2 dnl
python2.1 python2.0])
m4_if([$1],[],[
dnl No version check is needed.
# Find any Python interpreter.
if test -z "$PYTHON"; then
AC_PATH_PROGS([PYTHON], _AM_PYTHON_INTERPRETER_LIST, :)
fi
am_display_PYTHON=python
], [
dnl A version check is needed.
if test -n "$PYTHON"; then
# If the user set $PYTHON, use it and don't search something else.
AC_MSG_CHECKING([whether $PYTHON version >= $1])
AM_PYTHON_CHECK_VERSION([$PYTHON], [$1],
[AC_MSG_RESULT(yes)],
[AC_MSG_ERROR(too old)])
am_display_PYTHON=$PYTHON
else
# Otherwise, try each interpreter until we find one that satisfies
# VERSION.
AC_CACHE_CHECK([for a Python interpreter with version >= $1],
[am_cv_pathless_PYTHON],[
for am_cv_pathless_PYTHON in _AM_PYTHON_INTERPRETER_LIST none; do
test "$am_cv_pathless_PYTHON" = none && break
AM_PYTHON_CHECK_VERSION([$am_cv_pathless_PYTHON], [$1], [break])
done])
# Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON.
if test "$am_cv_pathless_PYTHON" = none; then
PYTHON=:
else
AC_PATH_PROG([PYTHON], [$am_cv_pathless_PYTHON])
fi
am_display_PYTHON=$am_cv_pathless_PYTHON
fi
])
if test "$PYTHON" = :; then
dnl Run any user-specified action, or abort.
m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])])
else
dnl Query Python for its version number. Getting [:3] seems to be
dnl the best way to do this; it's what "site.py" does in the standard
dnl library.
AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version],
[am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`])
AC_SUBST([PYTHON_VERSION], [$am_cv_python_version])
dnl Use the values of $prefix and $exec_prefix for the corresponding
dnl values of PYTHON_PREFIX and PYTHON_EXEC_PREFIX. These are made
dnl distinct variables so they can be overridden if need be. However,
dnl general consensus is that you shouldn't need this ability.
AC_SUBST([PYTHON_PREFIX], ['${prefix}'])
AC_SUBST([PYTHON_EXEC_PREFIX], ['${exec_prefix}'])
dnl At times (like when building shared libraries) you may want
dnl to know which OS platform Python thinks this is.
AC_CACHE_CHECK([for $am_display_PYTHON platform], [am_cv_python_platform],
[am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"`])
AC_SUBST([PYTHON_PLATFORM], [$am_cv_python_platform])
dnl Set up 4 directories:
dnl pythondir -- where to install python scripts. This is the
dnl site-packages directory, not the python standard library
dnl directory like in previous automake betas. This behavior
dnl is more consistent with lispdir.m4 for example.
dnl Query distutils for this directory. distutils does not exist in
dnl Python 1.5, so we fall back to the hardcoded directory if it
dnl doesn't work.
AC_CACHE_CHECK([for $am_display_PYTHON script directory],
[am_cv_python_pythondir],
[if test "x$prefix" = xNONE
then
am_py_prefix=$ac_default_prefix
else
am_py_prefix=$prefix
fi
am_cv_python_pythondir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(0,0,prefix='$am_py_prefix'))" 2>/dev/null ||
echo "$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages"`
case $am_cv_python_pythondir in
$am_py_prefix*)
am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'`
am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"`
;;
esac
])
AC_SUBST([pythondir], [$am_cv_python_pythondir])
dnl pkgpythondir -- $PACKAGE directory under pythondir. Was
dnl PYTHON_SITE_PACKAGE in previous betas, but this naming is
dnl more consistent with the rest of automake.
AC_SUBST([pkgpythondir], [\${pythondir}/$PACKAGE])
dnl pyexecdir -- directory for installing python extension modules
dnl (shared libraries)
dnl Query distutils for this directory. distutils does not exist in
dnl Python 1.5, so we fall back to the hardcoded directory if it
dnl doesn't work.
AC_CACHE_CHECK([for $am_display_PYTHON extension module directory],
[am_cv_python_pyexecdir],
[if test "x$exec_prefix" = xNONE
then
am_py_exec_prefix=$am_py_prefix
else
am_py_exec_prefix=$exec_prefix
fi
am_cv_python_pyexecdir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(1,0,prefix='$am_py_exec_prefix'))" 2>/dev/null ||
echo "$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages"`
case $am_cv_python_pyexecdir in
$am_py_exec_prefix*)
am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'`
am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"`
;;
esac
])
AC_SUBST([pyexecdir], [$am_cv_python_pyexecdir])
dnl pkgpyexecdir -- $(pyexecdir)/$(PACKAGE)
AC_SUBST([pkgpyexecdir], [\${pyexecdir}/$PACKAGE])
dnl Run any user-specified action.
$2
fi
])
# AM_PYTHON_CHECK_VERSION(PROG, VERSION, [ACTION-IF-TRUE], [ACTION-IF-FALSE])
# ---------------------------------------------------------------------------
# Run ACTION-IF-TRUE if the Python interpreter PROG has version >= VERSION.
# Run ACTION-IF-FALSE otherwise.
# This test uses sys.hexversion instead of the string equivalent (first
# word of sys.version), in order to cope with versions such as 2.2c1.
# This supports Python 2.0 or higher. (2.0 was released on October 16, 2000).
AC_DEFUN([AM_PYTHON_CHECK_VERSION],
[prog="import sys
# split strings by '.' and convert to numeric. Append some zeros
# because we need at least 4 digits for the hex conversion.
# map returns an iterator in Python 3.0 and a list in 2.x
minver = list(map(int, '$2'.split('.'))) + [[0, 0, 0]]
minverhex = 0
# xrange is not present in Python 3.0 and range returns an iterator
for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]]
sys.exit(sys.hexversion < minverhex)"
AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])])
# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# AM_RUN_LOG(COMMAND)
# -------------------
# Run COMMAND, save the exit status in ac_status, and log it.
# (This has been adapted from Autoconf's _AC_RUN_LOG macro.)
AC_DEFUN([AM_RUN_LOG],
[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD
($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
(exit $ac_status); }])
# Check to make sure that the build environment is sane. -*- Autoconf -*-
# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008
# Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# serial 5
# AM_SANITY_CHECK
# ---------------
AC_DEFUN([AM_SANITY_CHECK],
[AC_MSG_CHECKING([whether build environment is sane])
# Just in case
sleep 1
echo timestamp > conftest.file
# Reject unsafe characters in $srcdir or the absolute working directory
# name. Accept space and tab only in the latter.
am_lf='
'
case `pwd` in
*[[\\\"\#\$\&\'\`$am_lf]]*)
AC_MSG_ERROR([unsafe absolute working directory name]);;
esac
case $srcdir in
*[[\\\"\#\$\&\'\`$am_lf\ \ ]]*)
AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);;
esac
# Do `set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
if test "$[*]" = "X"; then
# -L didn't work.
set X `ls -t "$srcdir/configure" conftest.file`
fi
rm -f conftest.file
if test "$[*]" != "X $srcdir/configure conftest.file" \
&& test "$[*]" != "X conftest.file $srcdir/configure"; then
# If neither matched, then we have a broken ls. This can happen
# if, for instance, CONFIG_SHELL is bash and it inherits a
# broken ls alias from the environment. This has actually
# happened. Such a system could not be considered "sane".
AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
alias in your environment])
fi
test "$[2]" = conftest.file
)
then
# Ok.
:
else
AC_MSG_ERROR([newly created file is older than distributed files!
Check your system clock])
fi
AC_MSG_RESULT(yes)])
# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# AM_PROG_INSTALL_STRIP
# ---------------------
# One issue with vendor `install' (even GNU) is that you can't
# specify the program used to strip binaries. This is especially
# annoying in cross-compiling environments, where the build's strip
# is unlikely to handle the host's binaries.
# Fortunately install-sh will honor a STRIPPROG variable, so we
# always use install-sh in `make install-strip', and initialize
# STRIPPROG with the value of the STRIP variable (set by the user).
AC_DEFUN([AM_PROG_INSTALL_STRIP],
[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
# Installed binaries are usually stripped using `strip' when the user
# run `make install-strip'. However `strip' might not be the right
# tool to use in cross-compilation environments, therefore Automake
# will honor the `STRIP' environment variable to overrule this program.
dnl Don't test for $cross_compiling = yes, because it might be `maybe'.
if test "$cross_compiling" != no; then
AC_CHECK_TOOL([STRIP], [strip], :)
fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
AC_SUBST([INSTALL_STRIP_PROGRAM])])
# Copyright (C) 2006, 2008 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# serial 2
# _AM_SUBST_NOTMAKE(VARIABLE)
# ---------------------------
# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
# This macro is traced by Automake.
AC_DEFUN([_AM_SUBST_NOTMAKE])
# AM_SUBST_NOTMAKE(VARIABLE)
# ---------------------------
# Public sister of _AM_SUBST_NOTMAKE.
AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Check how to create a tarball. -*- Autoconf -*-
# Copyright (C) 2004, 2005 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# serial 2
# _AM_PROG_TAR(FORMAT)
# --------------------
# Check how to create a tarball in format FORMAT.
# FORMAT should be one of `v7', `ustar', or `pax'.
#
# Substitute a variable $(am__tar) that is a command
# writing to stdout a FORMAT-tarball containing the directory
# $tardir.
# tardir=directory && $(am__tar) > result.tar
#
# Substitute a variable $(am__untar) that extract such
# a tarball read from stdin.
# $(am__untar) < result.tar
AC_DEFUN([_AM_PROG_TAR],
[# Always define AMTAR for backward compatibility.
AM_MISSING_PROG([AMTAR], [tar])
m4_if([$1], [v7],
[am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'],
[m4_case([$1], [ustar],, [pax],,
[m4_fatal([Unknown tar format])])
AC_MSG_CHECKING([how to create a $1 tar archive])
# Loop over all known methods to create a tar archive until one works.
_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
_am_tools=${am_cv_prog_tar_$1-$_am_tools}
# Do not fold the above two line into one, because Tru64 sh and
# Solaris sh will not grok spaces in the rhs of `-'.
for _am_tool in $_am_tools
do
case $_am_tool in
gnutar)
for _am_tar in tar gnutar gtar;
do
AM_RUN_LOG([$_am_tar --version]) && break
done
am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
am__untar="$_am_tar -xf -"
;;
plaintar)
# Must skip GNU tar: if it does not support --format= it doesn't create
# ustar tarball either.
(tar --version) >/dev/null 2>&1 && continue
am__tar='tar chf - "$$tardir"'
am__tar_='tar chf - "$tardir"'
am__untar='tar xf -'
;;
pax)
am__tar='pax -L -x $1 -w "$$tardir"'
am__tar_='pax -L -x $1 -w "$tardir"'
am__untar='pax -r'
;;
cpio)
am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
am__untar='cpio -i -H $1 -d'
;;
none)
am__tar=false
am__tar_=false
am__untar=false
;;
esac
# If the value was cached, stop now. We just wanted to have am__tar
# and am__untar set.
test -n "${am_cv_prog_tar_$1}" && break
# tar/untar a dummy directory, and stop if the command works
rm -rf conftest.dir
mkdir conftest.dir
echo GrepMe > conftest.dir/file
AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
rm -rf conftest.dir
if test -s conftest.tar; then
AM_RUN_LOG([$am__untar /dev/null 2>&1 && break
fi
done
rm -rf conftest.dir
AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
AC_MSG_RESULT([$am_cv_prog_tar_$1])])
AC_SUBST([am__tar])
AC_SUBST([am__untar])
]) # _AM_PROG_TAR
firmware-tools-2.1.14/Makefile-std 0000664 0017654 0017654 00000005065 11376562051 023357 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:noexpandtab:autoindent:tabstop=8:shiftwidth=8:filetype=make:nocindent:tw=0:
# This is a template of all of the 'Standard' stuff that we use in all our
# projects.
CLEANFILES=$(PACKAGE_NAME)-*.tar.gz $(PACKAGE_NAME)-*.tar.bz2 $(PACKAGE_NAME)-*.rpm _buildtemp version
DISTCLEANFILES=*~
EXTRA_DIST =
EXTRA_PROGRAMS=
CLEANFILES += $(EXTRA_PROGRAMS)
CLEANFILES += *.pyc */*.pyc */*/*.pyc */*/*/*.pyc
DISTCLEANFILES += pkg/$(PACKAGE).spec
clean-local:
-test -z "$(CLEANFILES)" || rm -rf $(CLEANFILES)
distclean-local:
-test -z "$(DISTCLEANFILES)" || rm -rf $(DISTCLEANFILES)
.PHONY: git-tag
git-tag:
git tag -u libsmbios -m "tag for official release: $(PACKAGE_STRING)" v$(PACKAGE_VERSION)
.PHONY: get-version
get-version:
@echo 'PACKAGE_STRING="$(PACKAGE_STRING)"'
@echo 'PACKAGE_VERSION="$(PACKAGE_VERSION)"'
@echo 'PACKAGE="$(PACKAGE)"'
ChangeLog:
(GIT_DIR=$(top_srcdir)/.git git log > .changelog.tmp && mv .changelog.tmp ChangeLog; rm -f .changelog.tmp) || (touch ChangeLog; echo 'git directory not found: installing possibly empty changelog.' >&2)
AUTHORS:
(GIT_DIR=$(top_srcdir)/.git git log | grep ^Author | sort |uniq > .authors.tmp && mv .authors.tmp AUTHORS; rm -f .authors.tmp) || (touch AUTHORS; echo 'git directory not found: installing possibly empty AUTHORS.' >&2)
REPLACE_VARS=GETTEXT_PACKAGE PACKAGE_VERSION PACKAGE localedir libdir libexecdir datadir sysconfdir pythondir pkgpythondir pkgdatadir pkgconfdir pkggladedir pkglibexecdir
define replace_vars_in_file
$(foreach VAR,$(REPLACE_VARS),perl -p -i -e "s|^$(VAR)\s*=.*|$(VAR)=\"$($(VAR))\"|" $(1);)
endef
DATA_HOOK_REPLACE=
install-data-hook:
$(foreach FILE,$(DATA_HOOK_REPLACE),$(call replace_vars_in_file,$(addprefix $(DESTDIR)/,$(FILE))))
EXEC_HOOK_REPLACE=
install-exec-hook:
$(foreach FILE,$(EXEC_HOOK_REPLACE),$(call replace_vars_in_file,$(addprefix $(DESTDIR)/,$(FILE))))
TOPDIR := $(shell cd $(top_builddir);pwd)
BUILDDIR = $(TOPDIR)/_rpmbuild
RPMDIR = $(TOPDIR)
SOURCEDIR = $(TOPDIR)
SPECFILE= $(TOPDIR)/pkg/$(PACKAGE_NAME).spec
SPECDIR = $(TOPDIR)/pkg
SRCRPMDIR = $(TOPDIR)
AM_RPM_DEFINES = --define "_topdir $(TOPDIR)" \
--define "_builddir $(BUILDDIR)" \
--define "_rpmdir $(RPMDIR)" \
--define "_sourcedir $(SOURCEDIR)" \
--define "_specdir $(SPECDIR)" \
--define "_srcrpmdir $(SRCRPMDIR)" \
$(RPM_DEFINES)
.PHONY: rpm srpm
rpm: pkg/$(PACKAGE_NAME).spec dist
mkdir -p $(BUILDDIR)
rpmbuild $(AM_RPM_DEFINES) -ba --nodeps $(SPECFILE)
rm -rf $(BUILDDIR)
srpm: pkg/$(PACKAGE_NAME).spec dist
mkdir -p $(BUILDDIR)
rpmbuild $(AM_RPM_DEFINES) -bs --nodeps $(SPECFILE)
rm -rf $(BUILDDIR)
firmware-tools-2.1.14/Makefile.am 0000664 0017654 0017654 00000003731 11376561576 023174 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # vim:noexpandtab:autoindent:tabstop=8:shiftwidth=8:filetype=make:nocindent:tw=0:
include Makefile-std
pkgconfdir = $(sysconfdir)/firmware/
nodist_pkgconf_DATA = etc/firmware/firmware.conf
EXTRA_DIST += etc doc glade test yum-plugin COPYING-GPL COPYING-OSL COPYING.LIB
TESTS = test/testAll.py
nodist_check_SCRIPTS = test/testAll.py
dist_sbin_SCRIPTS = \
bin/firmwaretool \
bin/inventory_firmware_gui
nodist_pkgdata_DATA = glade/inventory_firmware_gui.glade
pkgpython_PYTHON = \
firmwaretools/bootstrap_pci.py \
firmwaretools/dep_parser.py \
firmwaretools/errors.py \
firmwaretools/i18n.py \
firmwaretools/mockpackage.py \
firmwaretools/mockrepository.py \
firmwaretools/package.py \
firmwaretools/plugins.py \
firmwaretools/ply_lex.py \
firmwaretools/ply_yacc.py \
firmwaretools/pycompat.py \
firmwaretools/repository.py \
firmwaretools/trace_decorator.py \
firmwaretools/peak_util_decorators.py \
firmwaretools/compat_subprocess.py \
firmwaretools/generated/__init__.py
clidir = $(datadir)/$(PACKAGE)
cli_PYTHON = \
ft-cli/cli.py \
ft-cli/ftcommands.py \
ft-cli/ftmain.py \
ft-cli/guihelpers.py
plugindir = $(clidir)/plugins
plugin_PYTHON = \
ft-cli/plugins/bootstrap_cmd.py \
ft-cli/plugins/inventory_cmd.py \
ft-cli/plugins/listplugins_cmd.py \
ft-cli/plugins/update_cmd.py
__VERSION__=$(VERSION)
PYTHONDIR=$(pythondir)
PKGDATADIR=$(pkgdatadir)
PKGGLADEDIR=$(pkgdatadir)
SYSCONFDIR=$(sysconfdir)
PKGPYTHONDIR=$(pkgpythondir)
DATADIR=$(datadir)
PKGCONFDIR=$(pkgconfdir)
REPLACE_VARS+= __VERSION__ PYTHONDIR PKGDATADIR PKGGLADEDIR SYSCONFDIR PKGPYTHONDIR DATADIR PKGCONFDIR
DATA_HOOK_REPLACE += \
$(sbindir)/firmwaretool \
$(sbindir)/inventory_firmware_gui
EXTRA_DIST += firmwaretools/__init__.py
DISTCLEANFILES += firmwaretools/generated/__init__.py
REPL_FILE=
firmwaretools/generated/__init__.py: firmwaretools/__init__.py configure Makefile config.status
mkdir -p $$(dirname $@) ||:
cp $< $@
$(call replace_vars_in_file,$@)
firmware-tools-2.1.14/Makefile.in 0000664 0017654 0017654 00000076335 11452664727 023214 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 # Makefile.in generated by automake 1.11 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
# Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
# vim:noexpandtab:autoindent:tabstop=8:shiftwidth=8:filetype=make:nocindent:tw=0:
# vim:noexpandtab:autoindent:tabstop=8:shiftwidth=8:filetype=make:nocindent:tw=0:
# This is a template of all of the 'Standard' stuff that we use in all our
# projects.
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
DIST_COMMON = README $(am__configure_deps) $(cli_PYTHON) \
$(dist_sbin_SCRIPTS) $(pkgpython_PYTHON) $(plugin_PYTHON) \
$(srcdir)/Makefile-std $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(top_srcdir)/configure \
$(top_srcdir)/pkg/${PACKAGE_NAME}.spec.in AUTHORS COPYING.LIB \
ChangeLog pkg/install-sh pkg/missing pkg/py-compile
EXTRA_PROGRAMS =
subdir = .
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
configure.lineno config.status.lineno
mkinstalldirs = $(install_sh) -d
CONFIG_CLEAN_FILES = pkg/${PACKAGE_NAME}.spec
CONFIG_CLEAN_VPATH_FILES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__installdirs = "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(clidir)" \
"$(DESTDIR)$(pkgpythondir)" "$(DESTDIR)$(plugindir)" \
"$(DESTDIR)$(pkgconfdir)" "$(DESTDIR)$(pkgdatadir)"
SCRIPTS = $(dist_sbin_SCRIPTS)
SOURCES =
DIST_SOURCES =
py_compile = $(top_srcdir)/pkg/py-compile
DATA = $(nodist_pkgconf_DATA) $(nodist_pkgdata_DATA)
am__tty_colors = \
red=; grn=; lgn=; blu=; std=
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
distdir = $(PACKAGE)-$(VERSION)
top_distdir = $(distdir)
am__remove_distdir = \
{ test ! -d "$(distdir)" \
|| { find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
&& rm -fr "$(distdir)"; }; }
DIST_ARCHIVES = $(distdir).tar.gz $(distdir).tar.bz2 \
$(distdir).tar.lzma
GZIP_ENV = --best
distuninstallcheck_listfiles = find . -type f -print
distcleancheck_listfiles = find . -type f -print
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
MKDIR_P = @MKDIR_P@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PYTHON = @PYTHON@
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
PYTHON_PLATFORM = @PYTHON_PLATFORM@
PYTHON_PREFIX = @PYTHON_PREFIX@
PYTHON_VERSION = @PYTHON_VERSION@
RELEASE_EXTRA = @RELEASE_EXTRA@
RELEASE_MAJOR = @RELEASE_MAJOR@
RELEASE_MICRO = @RELEASE_MICRO@
RELEASE_MINOR = @RELEASE_MINOR@
RELEASE_RPM_EXTRA = @RELEASE_RPM_EXTRA@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
am__leading_dot = @am__leading_dot@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build_alias = @build_alias@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host_alias = @host_alias@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
pkgpyexecdir = @pkgpyexecdir@
pkgpythondir = @pkgpythondir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pyexecdir = @pyexecdir@
pythondir = @pythondir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
CLEANFILES = $(PACKAGE_NAME)-*.tar.gz $(PACKAGE_NAME)-*.tar.bz2 \
$(PACKAGE_NAME)-*.rpm _buildtemp version $(EXTRA_PROGRAMS) \
*.pyc */*.pyc */*/*.pyc */*/*/*.pyc
DISTCLEANFILES = *~ pkg/$(PACKAGE).spec \
firmwaretools/generated/__init__.py
EXTRA_DIST = etc doc glade test yum-plugin COPYING-GPL COPYING-OSL \
COPYING.LIB firmwaretools/__init__.py
REPLACE_VARS = GETTEXT_PACKAGE PACKAGE_VERSION PACKAGE localedir \
libdir libexecdir datadir sysconfdir pythondir pkgpythondir \
pkgdatadir pkgconfdir pkggladedir pkglibexecdir __VERSION__ \
PYTHONDIR PKGDATADIR PKGGLADEDIR SYSCONFDIR PKGPYTHONDIR \
DATADIR PKGCONFDIR
DATA_HOOK_REPLACE = $(sbindir)/firmwaretool \
$(sbindir)/inventory_firmware_gui
EXEC_HOOK_REPLACE =
TOPDIR := $(shell cd $(top_builddir);pwd)
BUILDDIR = $(TOPDIR)/_rpmbuild
RPMDIR = $(TOPDIR)
SOURCEDIR = $(TOPDIR)
SPECFILE = $(TOPDIR)/pkg/$(PACKAGE_NAME).spec
SPECDIR = $(TOPDIR)/pkg
SRCRPMDIR = $(TOPDIR)
AM_RPM_DEFINES = --define "_topdir $(TOPDIR)" \
--define "_builddir $(BUILDDIR)" \
--define "_rpmdir $(RPMDIR)" \
--define "_sourcedir $(SOURCEDIR)" \
--define "_specdir $(SPECDIR)" \
--define "_srcrpmdir $(SRCRPMDIR)" \
$(RPM_DEFINES)
pkgconfdir = $(sysconfdir)/firmware/
nodist_pkgconf_DATA = etc/firmware/firmware.conf
TESTS = test/testAll.py
nodist_check_SCRIPTS = test/testAll.py
dist_sbin_SCRIPTS = \
bin/firmwaretool \
bin/inventory_firmware_gui
nodist_pkgdata_DATA = glade/inventory_firmware_gui.glade
pkgpython_PYTHON = \
firmwaretools/bootstrap_pci.py \
firmwaretools/dep_parser.py \
firmwaretools/errors.py \
firmwaretools/i18n.py \
firmwaretools/mockpackage.py \
firmwaretools/mockrepository.py \
firmwaretools/package.py \
firmwaretools/plugins.py \
firmwaretools/ply_lex.py \
firmwaretools/ply_yacc.py \
firmwaretools/pycompat.py \
firmwaretools/repository.py \
firmwaretools/trace_decorator.py \
firmwaretools/peak_util_decorators.py \
firmwaretools/compat_subprocess.py \
firmwaretools/generated/__init__.py
clidir = $(datadir)/$(PACKAGE)
cli_PYTHON = \
ft-cli/cli.py \
ft-cli/ftcommands.py \
ft-cli/ftmain.py \
ft-cli/guihelpers.py
plugindir = $(clidir)/plugins
plugin_PYTHON = \
ft-cli/plugins/bootstrap_cmd.py \
ft-cli/plugins/inventory_cmd.py \
ft-cli/plugins/listplugins_cmd.py \
ft-cli/plugins/update_cmd.py
__VERSION__ = $(VERSION)
PYTHONDIR = $(pythondir)
PKGDATADIR = $(pkgdatadir)
PKGGLADEDIR = $(pkgdatadir)
SYSCONFDIR = $(sysconfdir)
PKGPYTHONDIR = $(pkgpythondir)
DATADIR = $(datadir)
PKGCONFDIR = $(pkgconfdir)
REPL_FILE =
all: all-am
.SUFFIXES:
am--refresh:
@:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/Makefile-std $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
$(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
echo ' $(SHELL) ./config.status'; \
$(SHELL) ./config.status;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
$(SHELL) ./config.status --recheck
$(top_srcdir)/configure: $(am__configure_deps)
$(am__cd) $(srcdir) && $(AUTOCONF)
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
$(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
$(am__aclocal_m4_deps):
pkg/${PACKAGE_NAME}.spec: $(top_builddir)/config.status $(top_srcdir)/pkg/${PACKAGE_NAME}.spec.in
cd $(top_builddir) && $(SHELL) ./config.status $@
install-dist_sbinSCRIPTS: $(dist_sbin_SCRIPTS)
@$(NORMAL_INSTALL)
test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)"
@list='$(dist_sbin_SCRIPTS)'; test -n "$(sbindir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
done | \
sed -e 'p;s,.*/,,;n' \
-e 'h;s|.*|.|' \
-e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
{ d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
if ($$2 == $$4) { files[d] = files[d] " " $$1; \
if (++n[d] == $(am__install_max)) { \
print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
else { print "f", d "/" $$4, $$1 } } \
END { for (d in files) print "f", d, files[d] }' | \
while read type dir files; do \
if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
test -z "$$files" || { \
echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(sbindir)$$dir'"; \
$(INSTALL_SCRIPT) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \
} \
; done
uninstall-dist_sbinSCRIPTS:
@$(NORMAL_UNINSTALL)
@list='$(dist_sbin_SCRIPTS)'; test -n "$(sbindir)" || exit 0; \
files=`for p in $$list; do echo "$$p"; done | \
sed -e 's,.*/,,;$(transform)'`; \
test -n "$$list" || exit 0; \
echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(sbindir)" && rm -f $$files
install-cliPYTHON: $(cli_PYTHON)
@$(NORMAL_INSTALL)
test -z "$(clidir)" || $(MKDIR_P) "$(DESTDIR)$(clidir)"
@list='$(cli_PYTHON)'; dlist=; list2=; test -n "$(clidir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \
if test -f $$b$$p; then \
$(am__strip_dir) \
dlist="$$dlist $$f"; \
list2="$$list2 $$b$$p"; \
else :; fi; \
done; \
for file in $$list2; do echo $$file; done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(clidir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(clidir)" || exit $$?; \
done || exit $$?; \
if test -n "$$dlist"; then \
if test -z "$(DESTDIR)"; then \
PYTHON=$(PYTHON) $(py_compile) --basedir "$(clidir)" $$dlist; \
else \
PYTHON=$(PYTHON) $(py_compile) --destdir "$(DESTDIR)" --basedir "$(clidir)" $$dlist; \
fi; \
else :; fi
uninstall-cliPYTHON:
@$(NORMAL_UNINSTALL)
@list='$(cli_PYTHON)'; test -n "$(clidir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
filesc=`echo "$$files" | sed 's|$$|c|'`; \
fileso=`echo "$$files" | sed 's|$$|o|'`; \
echo " ( cd '$(DESTDIR)$(clidir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(clidir)" && rm -f $$files || exit $$?; \
echo " ( cd '$(DESTDIR)$(clidir)' && rm -f" $$filesc ")"; \
cd "$(DESTDIR)$(clidir)" && rm -f $$filesc || exit $$?; \
echo " ( cd '$(DESTDIR)$(clidir)' && rm -f" $$fileso ")"; \
cd "$(DESTDIR)$(clidir)" && rm -f $$fileso
install-pkgpythonPYTHON: $(pkgpython_PYTHON)
@$(NORMAL_INSTALL)
test -z "$(pkgpythondir)" || $(MKDIR_P) "$(DESTDIR)$(pkgpythondir)"
@list='$(pkgpython_PYTHON)'; dlist=; list2=; test -n "$(pkgpythondir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \
if test -f $$b$$p; then \
$(am__strip_dir) \
dlist="$$dlist $$f"; \
list2="$$list2 $$b$$p"; \
else :; fi; \
done; \
for file in $$list2; do echo $$file; done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgpythondir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(pkgpythondir)" || exit $$?; \
done || exit $$?; \
if test -n "$$dlist"; then \
if test -z "$(DESTDIR)"; then \
PYTHON=$(PYTHON) $(py_compile) --basedir "$(pkgpythondir)" $$dlist; \
else \
PYTHON=$(PYTHON) $(py_compile) --destdir "$(DESTDIR)" --basedir "$(pkgpythondir)" $$dlist; \
fi; \
else :; fi
uninstall-pkgpythonPYTHON:
@$(NORMAL_UNINSTALL)
@list='$(pkgpython_PYTHON)'; test -n "$(pkgpythondir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
filesc=`echo "$$files" | sed 's|$$|c|'`; \
fileso=`echo "$$files" | sed 's|$$|o|'`; \
echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$files || exit $$?; \
echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$filesc ")"; \
cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$filesc || exit $$?; \
echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$fileso ")"; \
cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$fileso
install-pluginPYTHON: $(plugin_PYTHON)
@$(NORMAL_INSTALL)
test -z "$(plugindir)" || $(MKDIR_P) "$(DESTDIR)$(plugindir)"
@list='$(plugin_PYTHON)'; dlist=; list2=; test -n "$(plugindir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \
if test -f $$b$$p; then \
$(am__strip_dir) \
dlist="$$dlist $$f"; \
list2="$$list2 $$b$$p"; \
else :; fi; \
done; \
for file in $$list2; do echo $$file; done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(plugindir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(plugindir)" || exit $$?; \
done || exit $$?; \
if test -n "$$dlist"; then \
if test -z "$(DESTDIR)"; then \
PYTHON=$(PYTHON) $(py_compile) --basedir "$(plugindir)" $$dlist; \
else \
PYTHON=$(PYTHON) $(py_compile) --destdir "$(DESTDIR)" --basedir "$(plugindir)" $$dlist; \
fi; \
else :; fi
uninstall-pluginPYTHON:
@$(NORMAL_UNINSTALL)
@list='$(plugin_PYTHON)'; test -n "$(plugindir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
filesc=`echo "$$files" | sed 's|$$|c|'`; \
fileso=`echo "$$files" | sed 's|$$|o|'`; \
echo " ( cd '$(DESTDIR)$(plugindir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(plugindir)" && rm -f $$files || exit $$?; \
echo " ( cd '$(DESTDIR)$(plugindir)' && rm -f" $$filesc ")"; \
cd "$(DESTDIR)$(plugindir)" && rm -f $$filesc || exit $$?; \
echo " ( cd '$(DESTDIR)$(plugindir)' && rm -f" $$fileso ")"; \
cd "$(DESTDIR)$(plugindir)" && rm -f $$fileso
install-nodist_pkgconfDATA: $(nodist_pkgconf_DATA)
@$(NORMAL_INSTALL)
test -z "$(pkgconfdir)" || $(MKDIR_P) "$(DESTDIR)$(pkgconfdir)"
@list='$(nodist_pkgconf_DATA)'; test -n "$(pkgconfdir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfdir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfdir)" || exit $$?; \
done
uninstall-nodist_pkgconfDATA:
@$(NORMAL_UNINSTALL)
@list='$(nodist_pkgconf_DATA)'; test -n "$(pkgconfdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(pkgconfdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(pkgconfdir)" && rm -f $$files
install-nodist_pkgdataDATA: $(nodist_pkgdata_DATA)
@$(NORMAL_INSTALL)
test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)"
@list='$(nodist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \
done
uninstall-nodist_pkgdataDATA:
@$(NORMAL_UNINSTALL)
@list='$(nodist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
check-TESTS: $(TESTS)
@failed=0; all=0; xfail=0; xpass=0; skip=0; \
srcdir=$(srcdir); export srcdir; \
list=' $(TESTS) '; \
$(am__tty_colors); \
if test -n "$$list"; then \
for tst in $$list; do \
if test -f ./$$tst; then dir=./; \
elif test -f $$tst; then dir=; \
else dir="$(srcdir)/"; fi; \
if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \
all=`expr $$all + 1`; \
case " $(XFAIL_TESTS) " in \
*[\ \ ]$$tst[\ \ ]*) \
xpass=`expr $$xpass + 1`; \
failed=`expr $$failed + 1`; \
col=$$red; res=XPASS; \
;; \
*) \
col=$$grn; res=PASS; \
;; \
esac; \
elif test $$? -ne 77; then \
all=`expr $$all + 1`; \
case " $(XFAIL_TESTS) " in \
*[\ \ ]$$tst[\ \ ]*) \
xfail=`expr $$xfail + 1`; \
col=$$lgn; res=XFAIL; \
;; \
*) \
failed=`expr $$failed + 1`; \
col=$$red; res=FAIL; \
;; \
esac; \
else \
skip=`expr $$skip + 1`; \
col=$$blu; res=SKIP; \
fi; \
echo "$${col}$$res$${std}: $$tst"; \
done; \
if test "$$all" -eq 1; then \
tests="test"; \
All=""; \
else \
tests="tests"; \
All="All "; \
fi; \
if test "$$failed" -eq 0; then \
if test "$$xfail" -eq 0; then \
banner="$$All$$all $$tests passed"; \
else \
if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \
banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \
fi; \
else \
if test "$$xpass" -eq 0; then \
banner="$$failed of $$all $$tests failed"; \
else \
if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \
banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \
fi; \
fi; \
dashes="$$banner"; \
skipped=""; \
if test "$$skip" -ne 0; then \
if test "$$skip" -eq 1; then \
skipped="($$skip test was not run)"; \
else \
skipped="($$skip tests were not run)"; \
fi; \
test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \
dashes="$$skipped"; \
fi; \
report=""; \
if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \
report="Please report to $(PACKAGE_BUGREPORT)"; \
test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \
dashes="$$report"; \
fi; \
dashes=`echo "$$dashes" | sed s/./=/g`; \
if test "$$failed" -eq 0; then \
echo "$$grn$$dashes"; \
else \
echo "$$red$$dashes"; \
fi; \
echo "$$banner"; \
test -z "$$skipped" || echo "$$skipped"; \
test -z "$$report" || echo "$$report"; \
echo "$$dashes$$std"; \
test "$$failed" -eq 0; \
else :; fi
distdir: $(DISTFILES)
$(am__remove_distdir)
test -d "$(distdir)" || mkdir "$(distdir)"
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
-test -n "$(am__skip_mode_fix)" \
|| find "$(distdir)" -type d ! -perm -777 -exec chmod a+rwx {} \; -o \
! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
! -type d ! -perm -400 -exec chmod a+r {} \; -o \
! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
|| chmod -R a+r "$(distdir)"
dist-gzip: distdir
tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
$(am__remove_distdir)
dist-bzip2: distdir
tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2
$(am__remove_distdir)
dist-lzma: distdir
tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
$(am__remove_distdir)
dist-xz: distdir
tardir=$(distdir) && $(am__tar) | xz -c >$(distdir).tar.xz
$(am__remove_distdir)
dist-tarZ: distdir
tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
$(am__remove_distdir)
dist-shar: distdir
shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
$(am__remove_distdir)
dist-zip: distdir
-rm -f $(distdir).zip
zip -rq $(distdir).zip $(distdir)
$(am__remove_distdir)
dist dist-all: distdir
tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2
tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
$(am__remove_distdir)
# This target untars the dist file and tries a VPATH configuration. Then
# it guarantees that the distribution is self-contained by making another
# tarfile.
distcheck: dist
case '$(DIST_ARCHIVES)' in \
*.tar.gz*) \
GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\
*.tar.bz2*) \
bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\
*.tar.lzma*) \
unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\
*.tar.xz*) \
xz -dc $(distdir).tar.xz | $(am__untar) ;;\
*.tar.Z*) \
uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
*.shar.gz*) \
GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\
*.zip*) \
unzip $(distdir).zip ;;\
esac
chmod -R a-w $(distdir); chmod a+w $(distdir)
mkdir $(distdir)/_build
mkdir $(distdir)/_inst
chmod a-w $(distdir)
test -d $(distdir)/_build || exit 0; \
dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
&& dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
&& am__cwd=`pwd` \
&& $(am__cd) $(distdir)/_build \
&& ../configure --srcdir=.. --prefix="$$dc_install_base" \
$(DISTCHECK_CONFIGURE_FLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) dvi \
&& $(MAKE) $(AM_MAKEFLAGS) check \
&& $(MAKE) $(AM_MAKEFLAGS) install \
&& $(MAKE) $(AM_MAKEFLAGS) installcheck \
&& $(MAKE) $(AM_MAKEFLAGS) uninstall \
&& $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
distuninstallcheck \
&& chmod -R a-w "$$dc_install_base" \
&& ({ \
(cd ../.. && umask 077 && mkdir "$$dc_destdir") \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
} || { rm -rf "$$dc_destdir"; exit 1; }) \
&& rm -rf "$$dc_destdir" \
&& $(MAKE) $(AM_MAKEFLAGS) dist \
&& rm -rf $(DIST_ARCHIVES) \
&& $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
&& cd "$$am__cwd" \
|| exit 1
$(am__remove_distdir)
@(echo "$(distdir) archives ready for distribution: "; \
list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
distuninstallcheck:
@$(am__cd) '$(distuninstallcheck_dir)' \
&& test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \
|| { echo "ERROR: files left after uninstall:" ; \
if test -n "$(DESTDIR)"; then \
echo " (check DESTDIR support)"; \
fi ; \
$(distuninstallcheck_listfiles) ; \
exit 1; } >&2
distcleancheck: distclean
@if test '$(srcdir)' = . ; then \
echo "ERROR: distcleancheck can only run from a VPATH build" ; \
exit 1 ; \
fi
@test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
|| { echo "ERROR: files left in build directory after distclean:" ; \
$(distcleancheck_listfiles) ; \
exit 1; } >&2
check-am: all-am
$(MAKE) $(AM_MAKEFLAGS) $(nodist_check_SCRIPTS)
$(MAKE) $(AM_MAKEFLAGS) check-TESTS
check: check-am
all-am: Makefile $(SCRIPTS) $(DATA)
installdirs:
for dir in "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(clidir)" "$(DESTDIR)$(pkgpythondir)" "$(DESTDIR)$(plugindir)" "$(DESTDIR)$(pkgconfdir)" "$(DESTDIR)$(pkgdatadir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic clean-local mostlyclean-am
distclean: distclean-am
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -f Makefile
distclean-am: clean-am distclean-generic distclean-local
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am: install-cliPYTHON install-nodist_pkgconfDATA \
install-nodist_pkgdataDATA install-pkgpythonPYTHON \
install-pluginPYTHON
@$(NORMAL_INSTALL)
$(MAKE) $(AM_MAKEFLAGS) install-data-hook
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am: install-dist_sbinSCRIPTS
@$(NORMAL_INSTALL)
$(MAKE) $(AM_MAKEFLAGS) install-exec-hook
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -rf $(top_srcdir)/autom4te.cache
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-cliPYTHON uninstall-dist_sbinSCRIPTS \
uninstall-nodist_pkgconfDATA uninstall-nodist_pkgdataDATA \
uninstall-pkgpythonPYTHON uninstall-pluginPYTHON
.MAKE: check-am install-am install-data-am install-exec-am \
install-strip
.PHONY: all all-am am--refresh check check-TESTS check-am clean \
clean-generic clean-local dist dist-all dist-bzip2 dist-gzip \
dist-lzma dist-shar dist-tarZ dist-xz dist-zip distcheck \
distclean distclean-generic distclean-local distcleancheck \
distdir distuninstallcheck dvi dvi-am html html-am info \
info-am install install-am install-cliPYTHON install-data \
install-data-am install-data-hook install-dist_sbinSCRIPTS \
install-dvi install-dvi-am install-exec install-exec-am \
install-exec-hook install-html install-html-am install-info \
install-info-am install-man install-nodist_pkgconfDATA \
install-nodist_pkgdataDATA install-pdf install-pdf-am \
install-pkgpythonPYTHON install-pluginPYTHON install-ps \
install-ps-am install-strip installcheck installcheck-am \
installdirs maintainer-clean maintainer-clean-generic \
mostlyclean mostlyclean-generic pdf pdf-am ps ps-am uninstall \
uninstall-am uninstall-cliPYTHON uninstall-dist_sbinSCRIPTS \
uninstall-nodist_pkgconfDATA uninstall-nodist_pkgdataDATA \
uninstall-pkgpythonPYTHON uninstall-pluginPYTHON
clean-local:
-test -z "$(CLEANFILES)" || rm -rf $(CLEANFILES)
distclean-local:
-test -z "$(DISTCLEANFILES)" || rm -rf $(DISTCLEANFILES)
.PHONY: git-tag
git-tag:
git tag -u libsmbios -m "tag for official release: $(PACKAGE_STRING)" v$(PACKAGE_VERSION)
.PHONY: get-version
get-version:
@echo 'PACKAGE_STRING="$(PACKAGE_STRING)"'
@echo 'PACKAGE_VERSION="$(PACKAGE_VERSION)"'
@echo 'PACKAGE="$(PACKAGE)"'
ChangeLog:
(GIT_DIR=$(top_srcdir)/.git git log > .changelog.tmp && mv .changelog.tmp ChangeLog; rm -f .changelog.tmp) || (touch ChangeLog; echo 'git directory not found: installing possibly empty changelog.' >&2)
AUTHORS:
(GIT_DIR=$(top_srcdir)/.git git log | grep ^Author | sort |uniq > .authors.tmp && mv .authors.tmp AUTHORS; rm -f .authors.tmp) || (touch AUTHORS; echo 'git directory not found: installing possibly empty AUTHORS.' >&2)
define replace_vars_in_file
$(foreach VAR,$(REPLACE_VARS),perl -p -i -e "s|^$(VAR)\s*=.*|$(VAR)=\"$($(VAR))\"|" $(1);)
endef
install-data-hook:
$(foreach FILE,$(DATA_HOOK_REPLACE),$(call replace_vars_in_file,$(addprefix $(DESTDIR)/,$(FILE))))
install-exec-hook:
$(foreach FILE,$(EXEC_HOOK_REPLACE),$(call replace_vars_in_file,$(addprefix $(DESTDIR)/,$(FILE))))
.PHONY: rpm srpm
rpm: pkg/$(PACKAGE_NAME).spec dist
mkdir -p $(BUILDDIR)
rpmbuild $(AM_RPM_DEFINES) -ba --nodeps $(SPECFILE)
rm -rf $(BUILDDIR)
srpm: pkg/$(PACKAGE_NAME).spec dist
mkdir -p $(BUILDDIR)
rpmbuild $(AM_RPM_DEFINES) -bs --nodeps $(SPECFILE)
rm -rf $(BUILDDIR)
firmwaretools/generated/__init__.py: firmwaretools/__init__.py configure Makefile config.status
mkdir -p $$(dirname $@) ||:
cp $< $@
$(call replace_vars_in_file,$@)
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
firmware-tools-2.1.14/configure 0000775 0017654 0017654 00000322037 11452664726 023046 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 #! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.63 for firmware-tools 2.1.14.
#
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
# 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
# This configure script is free software; the Free Software Foundation
# gives unlimited permission to copy, distribute and modify it.
## --------------------- ##
## M4sh Initialization. ##
## --------------------- ##
# Be more Bourne compatible
DUALCASE=1; export DUALCASE # for MKS sh
if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
emulate sh
NULLCMD=:
# Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
# is contrary to our usage. Disable this feature.
alias -g '${1+"$@"}'='"$@"'
setopt NO_GLOB_SUBST
else
case `(set -o) 2>/dev/null` in
*posix*) set -o posix ;;
esac
fi
# PATH needs CR
# Avoid depending upon Character Ranges.
as_cr_letters='abcdefghijklmnopqrstuvwxyz'
as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
as_cr_Letters=$as_cr_letters$as_cr_LETTERS
as_cr_digits='0123456789'
as_cr_alnum=$as_cr_Letters$as_cr_digits
as_nl='
'
export as_nl
# Printing a long string crashes Solaris 7 /usr/bin/printf.
as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
as_echo='printf %s\n'
as_echo_n='printf %s'
else
if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
as_echo_n='/usr/ucb/echo -n'
else
as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
as_echo_n_body='eval
arg=$1;
case $arg in
*"$as_nl"*)
expr "X$arg" : "X\\(.*\\)$as_nl";
arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
esac;
expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
'
export as_echo_n_body
as_echo_n='sh -c $as_echo_n_body as_echo'
fi
export as_echo_body
as_echo='sh -c $as_echo_body as_echo'
fi
# The user is always right.
if test "${PATH_SEPARATOR+set}" != set; then
PATH_SEPARATOR=:
(PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
(PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
PATH_SEPARATOR=';'
}
fi
# Support unset when possible.
if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
as_unset=unset
else
as_unset=false
fi
# IFS
# We need space, tab and new line, in precisely that order. Quoting is
# there to prevent editors from complaining about space-tab.
# (If _AS_PATH_WALK were called with IFS unset, it would disable word
# splitting by setting IFS to empty value.)
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
case $0 in
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
done
IFS=$as_save_IFS
;;
esac
# We did not find ourselves, most probably we were run as `sh COMMAND'
# in which case we are not to be found in the path.
if test "x$as_myself" = x; then
as_myself=$0
fi
if test ! -f "$as_myself"; then
$as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
{ (exit 1); exit 1; }
fi
# Work around bugs in pre-3.0 UWIN ksh.
for as_var in ENV MAIL MAILPATH
do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
done
PS1='$ '
PS2='> '
PS4='+ '
# NLS nuisances.
LC_ALL=C
export LC_ALL
LANGUAGE=C
export LANGUAGE
# Required to use basename.
if expr a : '\(a\)' >/dev/null 2>&1 &&
test "X`expr 00001 : '.*\(...\)'`" = X001; then
as_expr=expr
else
as_expr=false
fi
if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
as_basename=basename
else
as_basename=false
fi
# Name of the executable.
as_me=`$as_basename -- "$0" ||
$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
X"$0" : 'X\(//\)$' \| \
X"$0" : 'X\(/\)' \| . 2>/dev/null ||
$as_echo X/"$0" |
sed '/^.*\/\([^/][^/]*\)\/*$/{
s//\1/
q
}
/^X\/\(\/\/\)$/{
s//\1/
q
}
/^X\/\(\/\).*/{
s//\1/
q
}
s/.*/./; q'`
# CDPATH.
$as_unset CDPATH
if test "x$CONFIG_SHELL" = x; then
if (eval ":") 2>/dev/null; then
as_have_required=yes
else
as_have_required=no
fi
if test $as_have_required = yes && (eval ":
(as_func_return () {
(exit \$1)
}
as_func_success () {
as_func_return 0
}
as_func_failure () {
as_func_return 1
}
as_func_ret_success () {
return 0
}
as_func_ret_failure () {
return 1
}
exitcode=0
if as_func_success; then
:
else
exitcode=1
echo as_func_success failed.
fi
if as_func_failure; then
exitcode=1
echo as_func_failure succeeded.
fi
if as_func_ret_success; then
:
else
exitcode=1
echo as_func_ret_success failed.
fi
if as_func_ret_failure; then
exitcode=1
echo as_func_ret_failure succeeded.
fi
if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
:
else
exitcode=1
echo positional parameters were not saved.
fi
test \$exitcode = 0) || { (exit 1); exit 1; }
(
as_lineno_1=\$LINENO
as_lineno_2=\$LINENO
test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" &&
test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; }
") 2> /dev/null; then
:
else
as_candidate_shells=
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
case $as_dir in
/*)
for as_base in sh bash ksh sh5; do
as_candidate_shells="$as_candidate_shells $as_dir/$as_base"
done;;
esac
done
IFS=$as_save_IFS
for as_shell in $as_candidate_shells $SHELL; do
# Try only shells that exist, to save several forks.
if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
{ ("$as_shell") 2> /dev/null <<\_ASEOF
if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
emulate sh
NULLCMD=:
# Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
# is contrary to our usage. Disable this feature.
alias -g '${1+"$@"}'='"$@"'
setopt NO_GLOB_SUBST
else
case `(set -o) 2>/dev/null` in
*posix*) set -o posix ;;
esac
fi
:
_ASEOF
}; then
CONFIG_SHELL=$as_shell
as_have_required=yes
if { "$as_shell" 2> /dev/null <<\_ASEOF
if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
emulate sh
NULLCMD=:
# Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
# is contrary to our usage. Disable this feature.
alias -g '${1+"$@"}'='"$@"'
setopt NO_GLOB_SUBST
else
case `(set -o) 2>/dev/null` in
*posix*) set -o posix ;;
esac
fi
:
(as_func_return () {
(exit $1)
}
as_func_success () {
as_func_return 0
}
as_func_failure () {
as_func_return 1
}
as_func_ret_success () {
return 0
}
as_func_ret_failure () {
return 1
}
exitcode=0
if as_func_success; then
:
else
exitcode=1
echo as_func_success failed.
fi
if as_func_failure; then
exitcode=1
echo as_func_failure succeeded.
fi
if as_func_ret_success; then
:
else
exitcode=1
echo as_func_ret_success failed.
fi
if as_func_ret_failure; then
exitcode=1
echo as_func_ret_failure succeeded.
fi
if ( set x; as_func_ret_success y && test x = "$1" ); then
:
else
exitcode=1
echo positional parameters were not saved.
fi
test $exitcode = 0) || { (exit 1); exit 1; }
(
as_lineno_1=$LINENO
as_lineno_2=$LINENO
test "x$as_lineno_1" != "x$as_lineno_2" &&
test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; }
_ASEOF
}; then
break
fi
fi
done
if test "x$CONFIG_SHELL" != x; then
for as_var in BASH_ENV ENV
do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
done
export CONFIG_SHELL
exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
fi
if test $as_have_required = no; then
echo This script requires a shell more modern than all the
echo shells that I found on your system. Please install a
echo modern shell, or manually run the script under such a
echo shell if you do have one.
{ (exit 1); exit 1; }
fi
fi
fi
(eval "as_func_return () {
(exit \$1)
}
as_func_success () {
as_func_return 0
}
as_func_failure () {
as_func_return 1
}
as_func_ret_success () {
return 0
}
as_func_ret_failure () {
return 1
}
exitcode=0
if as_func_success; then
:
else
exitcode=1
echo as_func_success failed.
fi
if as_func_failure; then
exitcode=1
echo as_func_failure succeeded.
fi
if as_func_ret_success; then
:
else
exitcode=1
echo as_func_ret_success failed.
fi
if as_func_ret_failure; then
exitcode=1
echo as_func_ret_failure succeeded.
fi
if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
:
else
exitcode=1
echo positional parameters were not saved.
fi
test \$exitcode = 0") || {
echo No shell found that supports shell functions.
echo Please tell bug-autoconf@gnu.org about your system,
echo including any error possibly output before this message.
echo This can help us improve future autoconf versions.
echo Configuration will now proceed without shell functions.
}
as_lineno_1=$LINENO
as_lineno_2=$LINENO
test "x$as_lineno_1" != "x$as_lineno_2" &&
test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
# Create $as_me.lineno as a copy of $as_myself, but with $LINENO
# uniformly replaced by the line number. The first 'sed' inserts a
# line-number line after each line using $LINENO; the second 'sed'
# does the real work. The second script uses 'N' to pair each
# line-number line with the line containing $LINENO, and appends
# trailing '-' during substitution so that $LINENO is not a special
# case at line end.
# (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
# scripts with optimization help from Paolo Bonzini. Blame Lee
# E. McMahon (1931-1989) for sed's syntax. :-)
sed -n '
p
/[$]LINENO/=
' <$as_myself |
sed '
s/[$]LINENO.*/&-/
t lineno
b
:lineno
N
:loop
s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
t loop
s/-\n.*//
' >$as_me.lineno &&
chmod +x "$as_me.lineno" ||
{ $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
{ (exit 1); exit 1; }; }
# Don't try to exec as it changes $[0], causing all sort of problems
# (the dirname of $[0] is not the place where we might find the
# original and so on. Autoconf is especially sensitive to this).
. "./$as_me.lineno"
# Exit status is that of the last command.
exit
}
if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
as_dirname=dirname
else
as_dirname=false
fi
ECHO_C= ECHO_N= ECHO_T=
case `echo -n x` in
-n*)
case `echo 'x\c'` in
*c*) ECHO_T=' ';; # ECHO_T is single tab character.
*) ECHO_C='\c';;
esac;;
*)
ECHO_N='-n';;
esac
if expr a : '\(a\)' >/dev/null 2>&1 &&
test "X`expr 00001 : '.*\(...\)'`" = X001; then
as_expr=expr
else
as_expr=false
fi
rm -f conf$$ conf$$.exe conf$$.file
if test -d conf$$.dir; then
rm -f conf$$.dir/conf$$.file
else
rm -f conf$$.dir
mkdir conf$$.dir 2>/dev/null
fi
if (echo >conf$$.file) 2>/dev/null; then
if ln -s conf$$.file conf$$ 2>/dev/null; then
as_ln_s='ln -s'
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
# In both cases, we have to default to `cp -p'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
as_ln_s='cp -p'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
as_ln_s='cp -p'
fi
else
as_ln_s='cp -p'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
if mkdir -p . 2>/dev/null; then
as_mkdir_p=:
else
test -d ./-p && rmdir ./-p
as_mkdir_p=false
fi
if test -x / >/dev/null 2>&1; then
as_test_x='test -x'
else
if ls -dL / >/dev/null 2>&1; then
as_ls_L_option=L
else
as_ls_L_option=
fi
as_test_x='
eval sh -c '\''
if test -d "$1"; then
test -d "$1/.";
else
case $1 in
-*)set "./$1";;
esac;
case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
???[sx]*):;;*)false;;esac;fi
'\'' sh
'
fi
as_executable_p=$as_test_x
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
# Sed expression to map a string onto a valid variable name.
as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
exec 7<&0 &1
# Name of the host.
# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
# so uname gets run too.
ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
#
# Initializations.
#
ac_default_prefix=/usr/local
ac_clean_files=
ac_config_libobj_dir=.
LIBOBJS=
cross_compiling=no
subdirs=
MFLAGS=
MAKEFLAGS=
SHELL=${CONFIG_SHELL-/bin/sh}
# Identity of this package.
PACKAGE_NAME='firmware-tools'
PACKAGE_TARNAME='firmware-tools'
PACKAGE_VERSION='2.1.14'
PACKAGE_STRING='firmware-tools 2.1.14'
PACKAGE_BUGREPORT=''
ac_subst_vars='LTLIBOBJS
LIBOBJS
RELEASE_RPM_EXTRA
RELEASE_EXTRA
RELEASE_MICRO
RELEASE_MINOR
RELEASE_MAJOR
pkgpyexecdir
pyexecdir
pkgpythondir
pythondir
PYTHON_PLATFORM
PYTHON_EXEC_PREFIX
PYTHON_PREFIX
PYTHON_VERSION
PYTHON
am__untar
am__tar
AMTAR
am__leading_dot
SET_MAKE
AWK
mkdir_p
MKDIR_P
INSTALL_STRIP_PROGRAM
STRIP
install_sh
MAKEINFO
AUTOHEADER
AUTOMAKE
AUTOCONF
ACLOCAL
VERSION
PACKAGE
CYGPATH_W
am__isrc
INSTALL_DATA
INSTALL_SCRIPT
INSTALL_PROGRAM
target_alias
host_alias
build_alias
LIBS
ECHO_T
ECHO_N
ECHO_C
DEFS
mandir
localedir
libdir
psdir
pdfdir
dvidir
htmldir
infodir
docdir
oldincludedir
includedir
localstatedir
sharedstatedir
sysconfdir
datadir
datarootdir
libexecdir
sbindir
bindir
program_transform_name
prefix
exec_prefix
PACKAGE_BUGREPORT
PACKAGE_STRING
PACKAGE_VERSION
PACKAGE_TARNAME
PACKAGE_NAME
PATH_SEPARATOR
SHELL'
ac_subst_files=''
ac_user_opts='
enable_option_checking
'
ac_precious_vars='build_alias
host_alias
target_alias'
# Initialize some variables set by options.
ac_init_help=
ac_init_version=false
ac_unrecognized_opts=
ac_unrecognized_sep=
# The variables have the same names as the options, with
# dashes changed to underlines.
cache_file=/dev/null
exec_prefix=NONE
no_create=
no_recursion=
prefix=NONE
program_prefix=NONE
program_suffix=NONE
program_transform_name=s,x,x,
silent=
site=
srcdir=
verbose=
x_includes=NONE
x_libraries=NONE
# Installation directory options.
# These are left unexpanded so users can "make install exec_prefix=/foo"
# and all the variables that are supposed to be based on exec_prefix
# by default will actually change.
# Use braces instead of parens because sh, perl, etc. also accept them.
# (The list follows the same order as the GNU Coding Standards.)
bindir='${exec_prefix}/bin'
sbindir='${exec_prefix}/sbin'
libexecdir='${exec_prefix}/libexec'
datarootdir='${prefix}/share'
datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
infodir='${datarootdir}/info'
htmldir='${docdir}'
dvidir='${docdir}'
pdfdir='${docdir}'
psdir='${docdir}'
libdir='${exec_prefix}/lib'
localedir='${datarootdir}/locale'
mandir='${datarootdir}/man'
ac_prev=
ac_dashdash=
for ac_option
do
# If the previous option needs an argument, assign it.
if test -n "$ac_prev"; then
eval $ac_prev=\$ac_option
ac_prev=
continue
fi
case $ac_option in
*=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
*) ac_optarg=yes ;;
esac
# Accept the important Cygnus configure options, so we can diagnose typos.
case $ac_dashdash$ac_option in
--)
ac_dashdash=yes ;;
-bindir | --bindir | --bindi | --bind | --bin | --bi)
ac_prev=bindir ;;
-bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
bindir=$ac_optarg ;;
-build | --build | --buil | --bui | --bu)
ac_prev=build_alias ;;
-build=* | --build=* | --buil=* | --bui=* | --bu=*)
build_alias=$ac_optarg ;;
-cache-file | --cache-file | --cache-fil | --cache-fi \
| --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
ac_prev=cache_file ;;
-cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
| --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
cache_file=$ac_optarg ;;
--config-cache | -C)
cache_file=config.cache ;;
-datadir | --datadir | --datadi | --datad)
ac_prev=datadir ;;
-datadir=* | --datadir=* | --datadi=* | --datad=*)
datadir=$ac_optarg ;;
-datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
| --dataroo | --dataro | --datar)
ac_prev=datarootdir ;;
-datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
| --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
datarootdir=$ac_optarg ;;
-disable-* | --disable-*)
ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
{ $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2
{ (exit 1); exit 1; }; }
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
*"
"enable_$ac_useropt"
"*) ;;
*) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
ac_unrecognized_sep=', ';;
esac
eval enable_$ac_useropt=no ;;
-docdir | --docdir | --docdi | --doc | --do)
ac_prev=docdir ;;
-docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
docdir=$ac_optarg ;;
-dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
ac_prev=dvidir ;;
-dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
dvidir=$ac_optarg ;;
-enable-* | --enable-*)
ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
{ $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2
{ (exit 1); exit 1; }; }
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
*"
"enable_$ac_useropt"
"*) ;;
*) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
ac_unrecognized_sep=', ';;
esac
eval enable_$ac_useropt=\$ac_optarg ;;
-exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
| --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
| --exec | --exe | --ex)
ac_prev=exec_prefix ;;
-exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
| --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
| --exec=* | --exe=* | --ex=*)
exec_prefix=$ac_optarg ;;
-gas | --gas | --ga | --g)
# Obsolete; use --with-gas.
with_gas=yes ;;
-help | --help | --hel | --he | -h)
ac_init_help=long ;;
-help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
ac_init_help=recursive ;;
-help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
ac_init_help=short ;;
-host | --host | --hos | --ho)
ac_prev=host_alias ;;
-host=* | --host=* | --hos=* | --ho=*)
host_alias=$ac_optarg ;;
-htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
ac_prev=htmldir ;;
-htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
| --ht=*)
htmldir=$ac_optarg ;;
-includedir | --includedir | --includedi | --included | --include \
| --includ | --inclu | --incl | --inc)
ac_prev=includedir ;;
-includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
| --includ=* | --inclu=* | --incl=* | --inc=*)
includedir=$ac_optarg ;;
-infodir | --infodir | --infodi | --infod | --info | --inf)
ac_prev=infodir ;;
-infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
infodir=$ac_optarg ;;
-libdir | --libdir | --libdi | --libd)
ac_prev=libdir ;;
-libdir=* | --libdir=* | --libdi=* | --libd=*)
libdir=$ac_optarg ;;
-libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
| --libexe | --libex | --libe)
ac_prev=libexecdir ;;
-libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
| --libexe=* | --libex=* | --libe=*)
libexecdir=$ac_optarg ;;
-localedir | --localedir | --localedi | --localed | --locale)
ac_prev=localedir ;;
-localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
localedir=$ac_optarg ;;
-localstatedir | --localstatedir | --localstatedi | --localstated \
| --localstate | --localstat | --localsta | --localst | --locals)
ac_prev=localstatedir ;;
-localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
| --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
localstatedir=$ac_optarg ;;
-mandir | --mandir | --mandi | --mand | --man | --ma | --m)
ac_prev=mandir ;;
-mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
mandir=$ac_optarg ;;
-nfp | --nfp | --nf)
# Obsolete; use --without-fp.
with_fp=no ;;
-no-create | --no-create | --no-creat | --no-crea | --no-cre \
| --no-cr | --no-c | -n)
no_create=yes ;;
-no-recursion | --no-recursion | --no-recursio | --no-recursi \
| --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
no_recursion=yes ;;
-oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
| --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
| --oldin | --oldi | --old | --ol | --o)
ac_prev=oldincludedir ;;
-oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
| --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
| --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
oldincludedir=$ac_optarg ;;
-prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
ac_prev=prefix ;;
-prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
prefix=$ac_optarg ;;
-program-prefix | --program-prefix | --program-prefi | --program-pref \
| --program-pre | --program-pr | --program-p)
ac_prev=program_prefix ;;
-program-prefix=* | --program-prefix=* | --program-prefi=* \
| --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
program_prefix=$ac_optarg ;;
-program-suffix | --program-suffix | --program-suffi | --program-suff \
| --program-suf | --program-su | --program-s)
ac_prev=program_suffix ;;
-program-suffix=* | --program-suffix=* | --program-suffi=* \
| --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
program_suffix=$ac_optarg ;;
-program-transform-name | --program-transform-name \
| --program-transform-nam | --program-transform-na \
| --program-transform-n | --program-transform- \
| --program-transform | --program-transfor \
| --program-transfo | --program-transf \
| --program-trans | --program-tran \
| --progr-tra | --program-tr | --program-t)
ac_prev=program_transform_name ;;
-program-transform-name=* | --program-transform-name=* \
| --program-transform-nam=* | --program-transform-na=* \
| --program-transform-n=* | --program-transform-=* \
| --program-transform=* | --program-transfor=* \
| --program-transfo=* | --program-transf=* \
| --program-trans=* | --program-tran=* \
| --progr-tra=* | --program-tr=* | --program-t=*)
program_transform_name=$ac_optarg ;;
-pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
ac_prev=pdfdir ;;
-pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
pdfdir=$ac_optarg ;;
-psdir | --psdir | --psdi | --psd | --ps)
ac_prev=psdir ;;
-psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
psdir=$ac_optarg ;;
-q | -quiet | --quiet | --quie | --qui | --qu | --q \
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
| --sbi=* | --sb=*)
sbindir=$ac_optarg ;;
-sharedstatedir | --sharedstatedir | --sharedstatedi \
| --sharedstated | --sharedstate | --sharedstat | --sharedsta \
| --sharedst | --shareds | --shared | --share | --shar \
| --sha | --sh)
ac_prev=sharedstatedir ;;
-sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
| --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
| --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
| --sha=* | --sh=*)
sharedstatedir=$ac_optarg ;;
-site | --site | --sit)
ac_prev=site ;;
-site=* | --site=* | --sit=*)
site=$ac_optarg ;;
-srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
ac_prev=srcdir ;;
-srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
srcdir=$ac_optarg ;;
-sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
| --syscon | --sysco | --sysc | --sys | --sy)
ac_prev=sysconfdir ;;
-sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
| --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
sysconfdir=$ac_optarg ;;
-target | --target | --targe | --targ | --tar | --ta | --t)
ac_prev=target_alias ;;
-target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
target_alias=$ac_optarg ;;
-v | -verbose | --verbose | --verbos | --verbo | --verb)
verbose=yes ;;
-version | --version | --versio | --versi | --vers | -V)
ac_init_version=: ;;
-with-* | --with-*)
ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
{ $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2
{ (exit 1); exit 1; }; }
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
*"
"with_$ac_useropt"
"*) ;;
*) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
ac_unrecognized_sep=', ';;
esac
eval with_$ac_useropt=\$ac_optarg ;;
-without-* | --without-*)
ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
{ $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2
{ (exit 1); exit 1; }; }
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
*"
"with_$ac_useropt"
"*) ;;
*) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
ac_unrecognized_sep=', ';;
esac
eval with_$ac_useropt=no ;;
--x)
# Obsolete; use --with-x.
with_x=yes ;;
-x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
| --x-incl | --x-inc | --x-in | --x-i)
ac_prev=x_includes ;;
-x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
| --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
x_includes=$ac_optarg ;;
-x-libraries | --x-libraries | --x-librarie | --x-librari \
| --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
ac_prev=x_libraries ;;
-x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
| --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
x_libraries=$ac_optarg ;;
-*) { $as_echo "$as_me: error: unrecognized option: $ac_option
Try \`$0 --help' for more information." >&2
{ (exit 1); exit 1; }; }
;;
*=*)
ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
# Reject names that are not valid shell variable names.
expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
{ $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2
{ (exit 1); exit 1; }; }
eval $ac_envvar=\$ac_optarg
export $ac_envvar ;;
*)
# FIXME: should be removed in autoconf 3.0.
$as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
$as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
: ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
;;
esac
done
if test -n "$ac_prev"; then
ac_option=--`echo $ac_prev | sed 's/_/-/g'`
{ $as_echo "$as_me: error: missing argument to $ac_option" >&2
{ (exit 1); exit 1; }; }
fi
if test -n "$ac_unrecognized_opts"; then
case $enable_option_checking in
no) ;;
fatal) { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2
{ (exit 1); exit 1; }; } ;;
*) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
esac
fi
# Check all directory arguments for consistency.
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
libdir localedir mandir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
case $ac_val in
*/ )
ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
eval $ac_var=\$ac_val;;
esac
# Be sure to have absolute directory names.
case $ac_val in
[\\/$]* | ?:[\\/]* ) continue;;
NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
esac
{ $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
{ (exit 1); exit 1; }; }
done
# There might be people who depend on the old broken behavior: `$host'
# used to hold the argument of --host etc.
# FIXME: To remove some day.
build=$build_alias
host=$host_alias
target=$target_alias
# FIXME: To remove some day.
if test "x$host_alias" != x; then
if test "x$build_alias" = x; then
cross_compiling=maybe
$as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
If a cross compiler is detected then cross compile mode will be used." >&2
elif test "x$build_alias" != "x$host_alias"; then
cross_compiling=yes
fi
fi
ac_tool_prefix=
test -n "$host_alias" && ac_tool_prefix=$host_alias-
test "$silent" = yes && exec 6>/dev/null
ac_pwd=`pwd` && test -n "$ac_pwd" &&
ac_ls_di=`ls -di .` &&
ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
{ $as_echo "$as_me: error: working directory cannot be determined" >&2
{ (exit 1); exit 1; }; }
test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
{ $as_echo "$as_me: error: pwd does not report name of working directory" >&2
{ (exit 1); exit 1; }; }
# Find the source files, if location was not specified.
if test -z "$srcdir"; then
ac_srcdir_defaulted=yes
# Try the directory containing this script, then the parent directory.
ac_confdir=`$as_dirname -- "$as_myself" ||
$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
X"$as_myself" : 'X\(//\)[^/]' \| \
X"$as_myself" : 'X\(//\)$' \| \
X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
$as_echo X"$as_myself" |
sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
s//\1/
q
}
/^X\(\/\/\)[^/].*/{
s//\1/
q
}
/^X\(\/\/\)$/{
s//\1/
q
}
/^X\(\/\).*/{
s//\1/
q
}
s/.*/./; q'`
srcdir=$ac_confdir
if test ! -r "$srcdir/$ac_unique_file"; then
srcdir=..
fi
else
ac_srcdir_defaulted=no
fi
if test ! -r "$srcdir/$ac_unique_file"; then
test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
{ $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
{ (exit 1); exit 1; }; }
fi
ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
ac_abs_confdir=`(
cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2
{ (exit 1); exit 1; }; }
pwd)`
# When building in place, set srcdir=.
if test "$ac_abs_confdir" = "$ac_pwd"; then
srcdir=.
fi
# Remove unnecessary trailing slashes from srcdir.
# Double slashes in file names in object file debugging info
# mess up M-x gdb in Emacs.
case $srcdir in
*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
esac
for ac_var in $ac_precious_vars; do
eval ac_env_${ac_var}_set=\${${ac_var}+set}
eval ac_env_${ac_var}_value=\$${ac_var}
eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
eval ac_cv_env_${ac_var}_value=\$${ac_var}
done
#
# Report the --help message.
#
if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures firmware-tools 2.1.14 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
To assign environment variables (e.g., CC, CFLAGS...), specify them as
VAR=VALUE. See below for descriptions of some of the useful variables.
Defaults for the options are specified in brackets.
Configuration:
-h, --help display this help and exit
--help=short display options specific to this package
--help=recursive display the short help of all the included packages
-V, --version display version information and exit
-q, --quiet, --silent do not print \`checking...' messages
--cache-file=FILE cache test results in FILE [disabled]
-C, --config-cache alias for \`--cache-file=config.cache'
-n, --no-create do not create output files
--srcdir=DIR find the sources in DIR [configure dir or \`..']
Installation directories:
--prefix=PREFIX install architecture-independent files in PREFIX
[$ac_default_prefix]
--exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
[PREFIX]
By default, \`make install' will install all the files in
\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
an installation prefix other than \`$ac_default_prefix' using \`--prefix',
for instance \`--prefix=\$HOME'.
For better control, use the options below.
Fine tuning of the installation directories:
--bindir=DIR user executables [EPREFIX/bin]
--sbindir=DIR system admin executables [EPREFIX/sbin]
--libexecdir=DIR program executables [EPREFIX/libexec]
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
--datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
--datadir=DIR read-only architecture-independent data [DATAROOTDIR]
--infodir=DIR info documentation [DATAROOTDIR/info]
--localedir=DIR locale-dependent data [DATAROOTDIR/locale]
--mandir=DIR man documentation [DATAROOTDIR/man]
--docdir=DIR documentation root [DATAROOTDIR/doc/firmware-tools]
--htmldir=DIR html documentation [DOCDIR]
--dvidir=DIR dvi documentation [DOCDIR]
--pdfdir=DIR pdf documentation [DOCDIR]
--psdir=DIR ps documentation [DOCDIR]
_ACEOF
cat <<\_ACEOF
Program names:
--program-prefix=PREFIX prepend PREFIX to installed program names
--program-suffix=SUFFIX append SUFFIX to installed program names
--program-transform-name=PROGRAM run sed PROGRAM on installed program names
_ACEOF
fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of firmware-tools 2.1.14:";;
esac
cat <<\_ACEOF
_ACEOF
ac_status=$?
fi
if test "$ac_init_help" = "recursive"; then
# If there are subdirs, report their specific --help.
for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
test -d "$ac_dir" ||
{ cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
continue
ac_builddir=.
case "$ac_dir" in
.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
*)
ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
# A ".." for each directory in $ac_dir_suffix.
ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
case $ac_top_builddir_sub in
"") ac_top_builddir_sub=. ac_top_build_prefix= ;;
*) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
esac ;;
esac
ac_abs_top_builddir=$ac_pwd
ac_abs_builddir=$ac_pwd$ac_dir_suffix
# for backward compatibility:
ac_top_builddir=$ac_top_build_prefix
case $srcdir in
.) # We are building in place.
ac_srcdir=.
ac_top_srcdir=$ac_top_builddir_sub
ac_abs_top_srcdir=$ac_pwd ;;
[\\/]* | ?:[\\/]* ) # Absolute name.
ac_srcdir=$srcdir$ac_dir_suffix;
ac_top_srcdir=$srcdir
ac_abs_top_srcdir=$srcdir ;;
*) # Relative name.
ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
ac_top_srcdir=$ac_top_build_prefix$srcdir
ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
esac
ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
cd "$ac_dir" || { ac_status=$?; continue; }
# Check for guested configure.
if test -f "$ac_srcdir/configure.gnu"; then
echo &&
$SHELL "$ac_srcdir/configure.gnu" --help=recursive
elif test -f "$ac_srcdir/configure"; then
echo &&
$SHELL "$ac_srcdir/configure" --help=recursive
else
$as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
fi || ac_status=$?
cd "$ac_pwd" || { ac_status=$?; break; }
done
fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
firmware-tools configure 2.1.14
generated by GNU Autoconf 2.63
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
_ACEOF
exit
fi
cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by firmware-tools $as_me 2.1.14, which was
generated by GNU Autoconf 2.63. Invocation command line was
$ $0 $@
_ACEOF
exec 5>>config.log
{
cat <<_ASUNAME
## --------- ##
## Platform. ##
## --------- ##
hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
uname -m = `(uname -m) 2>/dev/null || echo unknown`
uname -r = `(uname -r) 2>/dev/null || echo unknown`
uname -s = `(uname -s) 2>/dev/null || echo unknown`
uname -v = `(uname -v) 2>/dev/null || echo unknown`
/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
_ASUNAME
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
$as_echo "PATH: $as_dir"
done
IFS=$as_save_IFS
} >&5
cat >&5 <<_ACEOF
## ----------- ##
## Core tests. ##
## ----------- ##
_ACEOF
# Keep a trace of the command line.
# Strip out --no-create and --no-recursion so they do not pile up.
# Strip out --silent because we don't want to record it for future runs.
# Also quote any args containing shell meta-characters.
# Make two passes to allow for proper duplicate-argument suppression.
ac_configure_args=
ac_configure_args0=
ac_configure_args1=
ac_must_keep_next=false
for ac_pass in 1 2
do
for ac_arg
do
case $ac_arg in
-no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
-q | -quiet | --quiet | --quie | --qui | --qu | --q \
| -silent | --silent | --silen | --sile | --sil)
continue ;;
*\'*)
ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
esac
case $ac_pass in
1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
2)
ac_configure_args1="$ac_configure_args1 '$ac_arg'"
if test $ac_must_keep_next = true; then
ac_must_keep_next=false # Got value, back to normal.
else
case $ac_arg in
*=* | --config-cache | -C | -disable-* | --disable-* \
| -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
| -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
| -with-* | --with-* | -without-* | --without-* | --x)
case "$ac_configure_args0 " in
"$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
esac
;;
-* ) ac_must_keep_next=true ;;
esac
fi
ac_configure_args="$ac_configure_args '$ac_arg'"
;;
esac
done
done
$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
# When interrupted or exit'd, cleanup temporary files, and complete
# config.log. We remove comments because anyway the quotes in there
# would cause problems or look ugly.
# WARNING: Use '\'' to represent an apostrophe within the trap.
# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
trap 'exit_status=$?
# Save into config.log some information that might help in debugging.
{
echo
cat <<\_ASBOX
## ---------------- ##
## Cache variables. ##
## ---------------- ##
_ASBOX
echo
# The following way of writing the cache mishandles newlines in values,
(
for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
eval ac_val=\$$ac_var
case $ac_val in #(
*${as_nl}*)
case $ac_var in #(
*_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5
$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
esac
case $ac_var in #(
_ | IFS | as_nl) ;; #(
BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
*) $as_unset $ac_var ;;
esac ;;
esac
done
(set) 2>&1 |
case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
*${as_nl}ac_space=\ *)
sed -n \
"s/'\''/'\''\\\\'\'''\''/g;
s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
;; #(
*)
sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
;;
esac |
sort
)
echo
cat <<\_ASBOX
## ----------------- ##
## Output variables. ##
## ----------------- ##
_ASBOX
echo
for ac_var in $ac_subst_vars
do
eval ac_val=\$$ac_var
case $ac_val in
*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
esac
$as_echo "$ac_var='\''$ac_val'\''"
done | sort
echo
if test -n "$ac_subst_files"; then
cat <<\_ASBOX
## ------------------- ##
## File substitutions. ##
## ------------------- ##
_ASBOX
echo
for ac_var in $ac_subst_files
do
eval ac_val=\$$ac_var
case $ac_val in
*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
esac
$as_echo "$ac_var='\''$ac_val'\''"
done | sort
echo
fi
if test -s confdefs.h; then
cat <<\_ASBOX
## ----------- ##
## confdefs.h. ##
## ----------- ##
_ASBOX
echo
cat confdefs.h
echo
fi
test "$ac_signal" != 0 &&
$as_echo "$as_me: caught signal $ac_signal"
$as_echo "$as_me: exit $exit_status"
} >&5
rm -f core *.core core.conftest.* &&
rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
exit $exit_status
' 0
for ac_signal in 1 2 13 15; do
trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
done
ac_signal=0
# confdefs.h avoids OS command line length limits that DEFS can exceed.
rm -f -r conftest* confdefs.h
# Predefined preprocessor variables.
cat >>confdefs.h <<_ACEOF
#define PACKAGE_NAME "$PACKAGE_NAME"
_ACEOF
cat >>confdefs.h <<_ACEOF
#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
_ACEOF
cat >>confdefs.h <<_ACEOF
#define PACKAGE_VERSION "$PACKAGE_VERSION"
_ACEOF
cat >>confdefs.h <<_ACEOF
#define PACKAGE_STRING "$PACKAGE_STRING"
_ACEOF
cat >>confdefs.h <<_ACEOF
#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
_ACEOF
# Let the site file select an alternate cache file if it wants to.
# Prefer an explicitly selected file to automatically selected ones.
ac_site_file1=NONE
ac_site_file2=NONE
if test -n "$CONFIG_SITE"; then
ac_site_file1=$CONFIG_SITE
elif test "x$prefix" != xNONE; then
ac_site_file1=$prefix/share/config.site
ac_site_file2=$prefix/etc/config.site
else
ac_site_file1=$ac_default_prefix/share/config.site
ac_site_file2=$ac_default_prefix/etc/config.site
fi
for ac_site_file in "$ac_site_file1" "$ac_site_file2"
do
test "x$ac_site_file" = xNONE && continue
if test -r "$ac_site_file"; then
{ $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
$as_echo "$as_me: loading site script $ac_site_file" >&6;}
sed 's/^/| /' "$ac_site_file" >&5
. "$ac_site_file"
fi
done
if test -r "$cache_file"; then
# Some versions of bash will fail to source /dev/null (special
# files actually), so we avoid doing that.
if test -f "$cache_file"; then
{ $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5
$as_echo "$as_me: loading cache $cache_file" >&6;}
case $cache_file in
[\\/]* | ?:[\\/]* ) . "$cache_file";;
*) . "./$cache_file";;
esac
fi
else
{ $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5
$as_echo "$as_me: creating cache $cache_file" >&6;}
>$cache_file
fi
# Check that the precious variables saved in the cache have kept the same
# value.
ac_cache_corrupted=false
for ac_var in $ac_precious_vars; do
eval ac_old_set=\$ac_cv_env_${ac_var}_set
eval ac_new_set=\$ac_env_${ac_var}_set
eval ac_old_val=\$ac_cv_env_${ac_var}_value
eval ac_new_val=\$ac_env_${ac_var}_value
case $ac_old_set,$ac_new_set in
set,)
{ $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
ac_cache_corrupted=: ;;
,set)
{ $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
ac_cache_corrupted=: ;;
,);;
*)
if test "x$ac_old_val" != "x$ac_new_val"; then
# differences in whitespace do not lead to failure.
ac_old_val_w=`echo x $ac_old_val`
ac_new_val_w=`echo x $ac_new_val`
if test "$ac_old_val_w" != "$ac_new_val_w"; then
{ $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
ac_cache_corrupted=:
else
{ $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
eval $ac_var=\$ac_old_val
fi
{ $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5
$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
{ $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5
$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
fi;;
esac
# Pass precious variables to config.status.
if test "$ac_new_set" = set; then
case $ac_new_val in
*\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
*) ac_arg=$ac_var=$ac_new_val ;;
esac
case " $ac_configure_args " in
*" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
*) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
esac
fi
done
if $ac_cache_corrupted; then
{ $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
{ $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
{ { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
$as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
{ (exit 1); exit 1; }; }
fi
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
####################################
ac_aux_dir=
for ac_dir in pkg "$srcdir"/pkg; do
if test -f "$ac_dir/install-sh"; then
ac_aux_dir=$ac_dir
ac_install_sh="$ac_aux_dir/install-sh -c"
break
elif test -f "$ac_dir/install.sh"; then
ac_aux_dir=$ac_dir
ac_install_sh="$ac_aux_dir/install.sh -c"
break
elif test -f "$ac_dir/shtool"; then
ac_aux_dir=$ac_dir
ac_install_sh="$ac_aux_dir/shtool install -c"
break
fi
done
if test -z "$ac_aux_dir"; then
{ { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in pkg \"$srcdir\"/pkg" >&5
$as_echo "$as_me: error: cannot find install-sh or install.sh in pkg \"$srcdir\"/pkg" >&2;}
{ (exit 1); exit 1; }; }
fi
# These three variables are undocumented and unsupported,
# and are intended to be withdrawn in a future Autoconf release.
# They can cause serious problems if a builder's source tree is in a directory
# whose full name contains unusual characters.
ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
am__api_version='1.11'
# Find a good install program. We prefer a C program (faster),
# so one script is as good as another. But avoid the broken or
# incompatible versions:
# SysV /etc/install, /usr/sbin/install
# SunOS /usr/etc/install
# IRIX /sbin/install
# AIX /bin/install
# AmigaOS /C/install, which installs bootblocks on floppy discs
# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
# AFS /usr/afsws/bin/install, which mishandles nonexistent args
# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
# OS/2's system install, which has a completely different semantic
# ./install, which can be erroneously created by make from ./install.sh.
# Reject install programs that cannot install multiple files.
{ $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
$as_echo_n "checking for a BSD-compatible install... " >&6; }
if test -z "$INSTALL"; then
if test "${ac_cv_path_install+set}" = set; then
$as_echo_n "(cached) " >&6
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
# Account for people who put trailing slashes in PATH elements.
case $as_dir/ in
./ | .// | /cC/* | \
/etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \
/usr/ucb/* ) ;;
*)
# OSF1 and SCO ODT 3.0 have their own names for install.
# Don't use installbsd from OSF since it installs stuff as root
# by default.
for ac_prog in ginstall scoinst install; do
for ac_exec_ext in '' $ac_executable_extensions; do
if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
if test $ac_prog = install &&
grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# AIX install. It has an incompatible calling convention.
:
elif test $ac_prog = install &&
grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# program-specific install script used by HP pwplus--don't use.
:
else
rm -rf conftest.one conftest.two conftest.dir
echo one > conftest.one
echo two > conftest.two
mkdir conftest.dir
if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
test -s conftest.one && test -s conftest.two &&
test -s conftest.dir/conftest.one &&
test -s conftest.dir/conftest.two
then
ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
break 3
fi
fi
fi
done
done
;;
esac
done
IFS=$as_save_IFS
rm -rf conftest.one conftest.two conftest.dir
fi
if test "${ac_cv_path_install+set}" = set; then
INSTALL=$ac_cv_path_install
else
# As a last resort, use the slow shell script. Don't cache a
# value for INSTALL within a source directory, because that will
# break other packages using the cache if that directory is
# removed, or if the value is a relative name.
INSTALL=$ac_install_sh
fi
fi
{ $as_echo "$as_me:$LINENO: result: $INSTALL" >&5
$as_echo "$INSTALL" >&6; }
# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
# It thinks the first close brace ends the variable substitution.
test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
{ $as_echo "$as_me:$LINENO: checking whether build environment is sane" >&5
$as_echo_n "checking whether build environment is sane... " >&6; }
# Just in case
sleep 1
echo timestamp > conftest.file
# Reject unsafe characters in $srcdir or the absolute working directory
# name. Accept space and tab only in the latter.
am_lf='
'
case `pwd` in
*[\\\"\#\$\&\'\`$am_lf]*)
{ { $as_echo "$as_me:$LINENO: error: unsafe absolute working directory name" >&5
$as_echo "$as_me: error: unsafe absolute working directory name" >&2;}
{ (exit 1); exit 1; }; };;
esac
case $srcdir in
*[\\\"\#\$\&\'\`$am_lf\ \ ]*)
{ { $as_echo "$as_me:$LINENO: error: unsafe srcdir value: \`$srcdir'" >&5
$as_echo "$as_me: error: unsafe srcdir value: \`$srcdir'" >&2;}
{ (exit 1); exit 1; }; };;
esac
# Do `set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
if test "$*" = "X"; then
# -L didn't work.
set X `ls -t "$srcdir/configure" conftest.file`
fi
rm -f conftest.file
if test "$*" != "X $srcdir/configure conftest.file" \
&& test "$*" != "X conftest.file $srcdir/configure"; then
# If neither matched, then we have a broken ls. This can happen
# if, for instance, CONFIG_SHELL is bash and it inherits a
# broken ls alias from the environment. This has actually
# happened. Such a system could not be considered "sane".
{ { $as_echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken
alias in your environment" >&5
$as_echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken
alias in your environment" >&2;}
{ (exit 1); exit 1; }; }
fi
test "$2" = conftest.file
)
then
# Ok.
:
else
{ { $as_echo "$as_me:$LINENO: error: newly created file is older than distributed files!
Check your system clock" >&5
$as_echo "$as_me: error: newly created file is older than distributed files!
Check your system clock" >&2;}
{ (exit 1); exit 1; }; }
fi
{ $as_echo "$as_me:$LINENO: result: yes" >&5
$as_echo "yes" >&6; }
test "$program_prefix" != NONE &&
program_transform_name="s&^&$program_prefix&;$program_transform_name"
# Use a double $ so make ignores it.
test "$program_suffix" != NONE &&
program_transform_name="s&\$&$program_suffix&;$program_transform_name"
# Double any \ or $.
# By default was `s,x,x', remove it if useless.
ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
# expand $ac_aux_dir to an absolute path
am_aux_dir=`cd $ac_aux_dir && pwd`
if test x"${MISSING+set}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
*)
MISSING="\${SHELL} $am_aux_dir/missing" ;;
esac
fi
# Use eval to expand $SHELL
if eval "$MISSING --run true"; then
am_missing_run="$MISSING --run "
else
am_missing_run=
{ $as_echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5
$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;}
fi
if test x"${install_sh}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
*)
install_sh="\${SHELL} $am_aux_dir/install-sh"
esac
fi
# Installed binaries are usually stripped using `strip' when the user
# run `make install-strip'. However `strip' might not be the right
# tool to use in cross-compilation environments, therefore Automake
# will honor the `STRIP' environment variable to overrule this program.
if test "$cross_compiling" != no; then
if test -n "$ac_tool_prefix"; then
# Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
set dummy ${ac_tool_prefix}strip; ac_word=$2
{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if test "${ac_cv_prog_STRIP+set}" = set; then
$as_echo_n "(cached) " >&6
else
if test -n "$STRIP"; then
ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
ac_cv_prog_STRIP="${ac_tool_prefix}strip"
$as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
fi
fi
STRIP=$ac_cv_prog_STRIP
if test -n "$STRIP"; then
{ $as_echo "$as_me:$LINENO: result: $STRIP" >&5
$as_echo "$STRIP" >&6; }
else
{ $as_echo "$as_me:$LINENO: result: no" >&5
$as_echo "no" >&6; }
fi
fi
if test -z "$ac_cv_prog_STRIP"; then
ac_ct_STRIP=$STRIP
# Extract the first word of "strip", so it can be a program name with args.
set dummy strip; ac_word=$2
{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_STRIP"; then
ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
ac_cv_prog_ac_ct_STRIP="strip"
$as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
fi
fi
ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
if test -n "$ac_ct_STRIP"; then
{ $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5
$as_echo "$ac_ct_STRIP" >&6; }
else
{ $as_echo "$as_me:$LINENO: result: no" >&5
$as_echo "no" >&6; }
fi
if test "x$ac_ct_STRIP" = x; then
STRIP=":"
else
case $cross_compiling:$ac_tool_warned in
yes:)
{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
ac_tool_warned=yes ;;
esac
STRIP=$ac_ct_STRIP
fi
else
STRIP="$ac_cv_prog_STRIP"
fi
fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
{ $as_echo "$as_me:$LINENO: checking for a thread-safe mkdir -p" >&5
$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
if test -z "$MKDIR_P"; then
if test "${ac_cv_path_mkdir+set}" = set; then
$as_echo_n "(cached) " >&6
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_prog in mkdir gmkdir; do
for ac_exec_ext in '' $ac_executable_extensions; do
{ test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue
case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
'mkdir (GNU coreutils) '* | \
'mkdir (coreutils) '* | \
'mkdir (fileutils) '4.1*)
ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
break 3;;
esac
done
done
done
IFS=$as_save_IFS
fi
if test "${ac_cv_path_mkdir+set}" = set; then
MKDIR_P="$ac_cv_path_mkdir -p"
else
# As a last resort, use the slow shell script. Don't cache a
# value for MKDIR_P within a source directory, because that will
# break other packages using the cache if that directory is
# removed, or if the value is a relative name.
test -d ./--version && rmdir ./--version
MKDIR_P="$ac_install_sh -d"
fi
fi
{ $as_echo "$as_me:$LINENO: result: $MKDIR_P" >&5
$as_echo "$MKDIR_P" >&6; }
mkdir_p="$MKDIR_P"
case $mkdir_p in
[\\/$]* | ?:[\\/]*) ;;
*/*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
esac
for ac_prog in gawk mawk nawk awk
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if test "${ac_cv_prog_AWK+set}" = set; then
$as_echo_n "(cached) " >&6
else
if test -n "$AWK"; then
ac_cv_prog_AWK="$AWK" # Let the user override the test.
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
ac_cv_prog_AWK="$ac_prog"
$as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
fi
fi
AWK=$ac_cv_prog_AWK
if test -n "$AWK"; then
{ $as_echo "$as_me:$LINENO: result: $AWK" >&5
$as_echo "$AWK" >&6; }
else
{ $as_echo "$as_me:$LINENO: result: no" >&5
$as_echo "no" >&6; }
fi
test -n "$AWK" && break
done
{ $as_echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5
$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
set x ${MAKE-make}
ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then
$as_echo_n "(cached) " >&6
else
cat >conftest.make <<\_ACEOF
SHELL = /bin/sh
all:
@echo '@@@%%%=$(MAKE)=@@@%%%'
_ACEOF
# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
case `${MAKE-make} -f conftest.make 2>/dev/null` in
*@@@%%%=?*=@@@%%%*)
eval ac_cv_prog_make_${ac_make}_set=yes;;
*)
eval ac_cv_prog_make_${ac_make}_set=no;;
esac
rm -f conftest.make
fi
if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
{ $as_echo "$as_me:$LINENO: result: yes" >&5
$as_echo "yes" >&6; }
SET_MAKE=
else
{ $as_echo "$as_me:$LINENO: result: no" >&5
$as_echo "no" >&6; }
SET_MAKE="MAKE=${MAKE-make}"
fi
rm -rf .tst 2>/dev/null
mkdir .tst 2>/dev/null
if test -d .tst; then
am__leading_dot=.
else
am__leading_dot=_
fi
rmdir .tst 2>/dev/null
if test "`cd $srcdir && pwd`" != "`pwd`"; then
# Use -I$(srcdir) only when $(srcdir) != ., so that make's output
# is not polluted with repeated "-I."
am__isrc=' -I$(srcdir)'
# test to see if srcdir already configured
if test -f $srcdir/config.status; then
{ { $as_echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5
$as_echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;}
{ (exit 1); exit 1; }; }
fi
fi
# test whether we have cygpath
if test -z "$CYGPATH_W"; then
if (cygpath --version) >/dev/null 2>/dev/null; then
CYGPATH_W='cygpath -w'
else
CYGPATH_W=echo
fi
fi
# Define the identity of the package.
PACKAGE='firmware-tools'
VERSION='2.1.14'
# Some tools Automake needs.
ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
# We need awk for the "check" target. The system "awk" is bad on
# some platforms.
# Always define AMTAR for backward compatibility.
AMTAR=${AMTAR-"${am_missing_run}tar"}
{ $as_echo "$as_me:$LINENO: checking how to create a ustar tar archive" >&5
$as_echo_n "checking how to create a ustar tar archive... " >&6; }
# Loop over all known methods to create a tar archive until one works.
_am_tools='gnutar plaintar pax cpio none'
_am_tools=${am_cv_prog_tar_ustar-$_am_tools}
# Do not fold the above two line into one, because Tru64 sh and
# Solaris sh will not grok spaces in the rhs of `-'.
for _am_tool in $_am_tools
do
case $_am_tool in
gnutar)
for _am_tar in tar gnutar gtar;
do
{ echo "$as_me:$LINENO: $_am_tar --version" >&5
($_am_tar --version) >&5 2>&5
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } && break
done
am__tar="$_am_tar --format=ustar -chf - "'"$$tardir"'
am__tar_="$_am_tar --format=ustar -chf - "'"$tardir"'
am__untar="$_am_tar -xf -"
;;
plaintar)
# Must skip GNU tar: if it does not support --format= it doesn't create
# ustar tarball either.
(tar --version) >/dev/null 2>&1 && continue
am__tar='tar chf - "$$tardir"'
am__tar_='tar chf - "$tardir"'
am__untar='tar xf -'
;;
pax)
am__tar='pax -L -x ustar -w "$$tardir"'
am__tar_='pax -L -x ustar -w "$tardir"'
am__untar='pax -r'
;;
cpio)
am__tar='find "$$tardir" -print | cpio -o -H ustar -L'
am__tar_='find "$tardir" -print | cpio -o -H ustar -L'
am__untar='cpio -i -H ustar -d'
;;
none)
am__tar=false
am__tar_=false
am__untar=false
;;
esac
# If the value was cached, stop now. We just wanted to have am__tar
# and am__untar set.
test -n "${am_cv_prog_tar_ustar}" && break
# tar/untar a dummy directory, and stop if the command works
rm -rf conftest.dir
mkdir conftest.dir
echo GrepMe > conftest.dir/file
{ echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5
(tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); }
rm -rf conftest.dir
if test -s conftest.tar; then
{ echo "$as_me:$LINENO: $am__untar &5
($am__untar &5 2>&5
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); }
grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
fi
done
rm -rf conftest.dir
if test "${am_cv_prog_tar_ustar+set}" = set; then
$as_echo_n "(cached) " >&6
else
am_cv_prog_tar_ustar=$_am_tool
fi
{ $as_echo "$as_me:$LINENO: result: $am_cv_prog_tar_ustar" >&5
$as_echo "$am_cv_prog_tar_ustar" >&6; }
# Checks for programs.
# Find a good install program. We prefer a C program (faster),
# so one script is as good as another. But avoid the broken or
# incompatible versions:
# SysV /etc/install, /usr/sbin/install
# SunOS /usr/etc/install
# IRIX /sbin/install
# AIX /bin/install
# AmigaOS /C/install, which installs bootblocks on floppy discs
# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
# AFS /usr/afsws/bin/install, which mishandles nonexistent args
# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
# OS/2's system install, which has a completely different semantic
# ./install, which can be erroneously created by make from ./install.sh.
# Reject install programs that cannot install multiple files.
{ $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
$as_echo_n "checking for a BSD-compatible install... " >&6; }
if test -z "$INSTALL"; then
if test "${ac_cv_path_install+set}" = set; then
$as_echo_n "(cached) " >&6
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
# Account for people who put trailing slashes in PATH elements.
case $as_dir/ in
./ | .// | /cC/* | \
/etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \
/usr/ucb/* ) ;;
*)
# OSF1 and SCO ODT 3.0 have their own names for install.
# Don't use installbsd from OSF since it installs stuff as root
# by default.
for ac_prog in ginstall scoinst install; do
for ac_exec_ext in '' $ac_executable_extensions; do
if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
if test $ac_prog = install &&
grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# AIX install. It has an incompatible calling convention.
:
elif test $ac_prog = install &&
grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# program-specific install script used by HP pwplus--don't use.
:
else
rm -rf conftest.one conftest.two conftest.dir
echo one > conftest.one
echo two > conftest.two
mkdir conftest.dir
if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
test -s conftest.one && test -s conftest.two &&
test -s conftest.dir/conftest.one &&
test -s conftest.dir/conftest.two
then
ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
break 3
fi
fi
fi
done
done
;;
esac
done
IFS=$as_save_IFS
rm -rf conftest.one conftest.two conftest.dir
fi
if test "${ac_cv_path_install+set}" = set; then
INSTALL=$ac_cv_path_install
else
# As a last resort, use the slow shell script. Don't cache a
# value for INSTALL within a source directory, because that will
# break other packages using the cache if that directory is
# removed, or if the value is a relative name.
INSTALL=$ac_install_sh
fi
fi
{ $as_echo "$as_me:$LINENO: result: $INSTALL" >&5
$as_echo "$INSTALL" >&6; }
# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
# It thinks the first close brace ends the variable substitution.
test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
# automake macros
# Find any Python interpreter.
if test -z "$PYTHON"; then
for ac_prog in python python2 python3 python3.0 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if test "${ac_cv_path_PYTHON+set}" = set; then
$as_echo_n "(cached) " >&6
else
case $PYTHON in
[\\/]* | ?:[\\/]*)
ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path.
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
;;
esac
fi
PYTHON=$ac_cv_path_PYTHON
if test -n "$PYTHON"; then
{ $as_echo "$as_me:$LINENO: result: $PYTHON" >&5
$as_echo "$PYTHON" >&6; }
else
{ $as_echo "$as_me:$LINENO: result: no" >&5
$as_echo "no" >&6; }
fi
test -n "$PYTHON" && break
done
test -n "$PYTHON" || PYTHON=":"
fi
am_display_PYTHON=python
if test "$PYTHON" = :; then
{ { $as_echo "$as_me:$LINENO: error: no suitable Python interpreter found" >&5
$as_echo "$as_me: error: no suitable Python interpreter found" >&2;}
{ (exit 1); exit 1; }; }
else
{ $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON version" >&5
$as_echo_n "checking for $am_display_PYTHON version... " >&6; }
if test "${am_cv_python_version+set}" = set; then
$as_echo_n "(cached) " >&6
else
am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[:3])"`
fi
{ $as_echo "$as_me:$LINENO: result: $am_cv_python_version" >&5
$as_echo "$am_cv_python_version" >&6; }
PYTHON_VERSION=$am_cv_python_version
PYTHON_PREFIX='${prefix}'
PYTHON_EXEC_PREFIX='${exec_prefix}'
{ $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON platform" >&5
$as_echo_n "checking for $am_display_PYTHON platform... " >&6; }
if test "${am_cv_python_platform+set}" = set; then
$as_echo_n "(cached) " >&6
else
am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"`
fi
{ $as_echo "$as_me:$LINENO: result: $am_cv_python_platform" >&5
$as_echo "$am_cv_python_platform" >&6; }
PYTHON_PLATFORM=$am_cv_python_platform
{ $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON script directory" >&5
$as_echo_n "checking for $am_display_PYTHON script directory... " >&6; }
if test "${am_cv_python_pythondir+set}" = set; then
$as_echo_n "(cached) " >&6
else
if test "x$prefix" = xNONE
then
am_py_prefix=$ac_default_prefix
else
am_py_prefix=$prefix
fi
am_cv_python_pythondir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(0,0,prefix='$am_py_prefix'))" 2>/dev/null ||
echo "$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages"`
case $am_cv_python_pythondir in
$am_py_prefix*)
am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'`
am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"`
;;
esac
fi
{ $as_echo "$as_me:$LINENO: result: $am_cv_python_pythondir" >&5
$as_echo "$am_cv_python_pythondir" >&6; }
pythondir=$am_cv_python_pythondir
pkgpythondir=\${pythondir}/$PACKAGE
{ $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON extension module directory" >&5
$as_echo_n "checking for $am_display_PYTHON extension module directory... " >&6; }
if test "${am_cv_python_pyexecdir+set}" = set; then
$as_echo_n "(cached) " >&6
else
if test "x$exec_prefix" = xNONE
then
am_py_exec_prefix=$am_py_prefix
else
am_py_exec_prefix=$exec_prefix
fi
am_cv_python_pyexecdir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(1,0,prefix='$am_py_exec_prefix'))" 2>/dev/null ||
echo "$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages"`
case $am_cv_python_pyexecdir in
$am_py_exec_prefix*)
am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'`
am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"`
;;
esac
fi
{ $as_echo "$as_me:$LINENO: result: $am_cv_python_pyexecdir" >&5
$as_echo "$am_cv_python_pyexecdir" >&6; }
pyexecdir=$am_cv_python_pyexecdir
pkgpyexecdir=\${pyexecdir}/$PACKAGE
fi
# versioning
RELEASE_MAJOR=2
RELEASE_MINOR=1
RELEASE_MICRO=14
RELEASE_EXTRA=
RELEASE_RPM_EXTRA=%{nil}
if test -n "$RELEASE_EXTRA"; then
RELEASE_RPM_EXTRA=$RELEASE_EXTRA
fi
# firmware-tools oddity: package name cannot contain '-', so we have to fix it
pkgpythondir=\${pythondir}/firmwaretools
pkgpyexecdir=\${pyexecdir}/firmwaretools
# generate files and exit
ac_config_files="$ac_config_files Makefile"
ac_config_files="$ac_config_files pkg/${PACKAGE_NAME}.spec"
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
# scripts and configure runs, see configure's option --config-cache.
# It is not useful on other systems. If it contains results you don't
# want to keep, you may remove or edit it.
#
# config.status only pays attention to the cache file if you give it
# the --recheck option to rerun configure.
#
# `ac_cv_env_foo' variables (set or unset) will be overridden when
# loading this file, other *unset* `ac_cv_foo' will be assigned the
# following values.
_ACEOF
# The following way of writing the cache mishandles newlines in values,
# but we know of no workaround that is simple, portable, and efficient.
# So, we kill variables containing newlines.
# Ultrix sh set writes to stderr and can't be redirected directly,
# and sets the high bit in the cache file unless we assign to the vars.
(
for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
eval ac_val=\$$ac_var
case $ac_val in #(
*${as_nl}*)
case $ac_var in #(
*_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5
$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
esac
case $ac_var in #(
_ | IFS | as_nl) ;; #(
BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
*) $as_unset $ac_var ;;
esac ;;
esac
done
(set) 2>&1 |
case $as_nl`(ac_space=' '; set) 2>&1` in #(
*${as_nl}ac_space=\ *)
# `set' does not quote correctly, so add quotes (double-quote
# substitution turns \\\\ into \\, and sed turns \\ into \).
sed -n \
"s/'/'\\\\''/g;
s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
;; #(
*)
# `set' quotes correctly as required by POSIX, so do not add quotes.
sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
;;
esac |
sort
) |
sed '
/^ac_cv_env_/b end
t clear
:clear
s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
t end
s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
:end' >>confcache
if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
if test -w "$cache_file"; then
test "x$cache_file" != "x/dev/null" &&
{ $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5
$as_echo "$as_me: updating cache $cache_file" >&6;}
cat confcache >$cache_file
else
{ $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
fi
fi
rm -f confcache
test "x$prefix" = xNONE && prefix=$ac_default_prefix
# Let make expand exec_prefix.
test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
# Transform confdefs.h into DEFS.
# Protect against shell expansion while executing Makefile rules.
# Protect against Makefile macro expansion.
#
# If the first sed substitution is executed (which looks for macros that
# take arguments), then branch to the quote section. Otherwise,
# look for a macro that doesn't take arguments.
ac_script='
:mline
/\\$/{
N
s,\\\n,,
b mline
}
t clear
:clear
s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g
t quote
s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g
t quote
b any
:quote
s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g
s/\[/\\&/g
s/\]/\\&/g
s/\$/$$/g
H
:any
${
g
s/^\n//
s/\n/ /g
p
}
'
DEFS=`sed -n "$ac_script" confdefs.h`
ac_libobjs=
ac_ltlibobjs=
for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
# 1. Remove the extension, and $U if already installed.
ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
# 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
# will be set to the directory where LIBOBJS objects are built.
ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext"
ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo'
done
LIBOBJS=$ac_libobjs
LTLIBOBJS=$ac_ltlibobjs
: ${CONFIG_STATUS=./config.status}
ac_write_fail=0
ac_clean_files_save=$ac_clean_files
ac_clean_files="$ac_clean_files $CONFIG_STATUS"
{ $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
#! $SHELL
# Generated by $as_me.
# Run this file to recreate the current configuration.
# Compiler output produced by configure, useful for debugging
# configure, is in config.log if it exists.
debug=false
ac_cs_recheck=false
ac_cs_silent=false
SHELL=\${CONFIG_SHELL-$SHELL}
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
## --------------------- ##
## M4sh Initialization. ##
## --------------------- ##
# Be more Bourne compatible
DUALCASE=1; export DUALCASE # for MKS sh
if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
emulate sh
NULLCMD=:
# Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
# is contrary to our usage. Disable this feature.
alias -g '${1+"$@"}'='"$@"'
setopt NO_GLOB_SUBST
else
case `(set -o) 2>/dev/null` in
*posix*) set -o posix ;;
esac
fi
# PATH needs CR
# Avoid depending upon Character Ranges.
as_cr_letters='abcdefghijklmnopqrstuvwxyz'
as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
as_cr_Letters=$as_cr_letters$as_cr_LETTERS
as_cr_digits='0123456789'
as_cr_alnum=$as_cr_Letters$as_cr_digits
as_nl='
'
export as_nl
# Printing a long string crashes Solaris 7 /usr/bin/printf.
as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
as_echo='printf %s\n'
as_echo_n='printf %s'
else
if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
as_echo_n='/usr/ucb/echo -n'
else
as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
as_echo_n_body='eval
arg=$1;
case $arg in
*"$as_nl"*)
expr "X$arg" : "X\\(.*\\)$as_nl";
arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
esac;
expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
'
export as_echo_n_body
as_echo_n='sh -c $as_echo_n_body as_echo'
fi
export as_echo_body
as_echo='sh -c $as_echo_body as_echo'
fi
# The user is always right.
if test "${PATH_SEPARATOR+set}" != set; then
PATH_SEPARATOR=:
(PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
(PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
PATH_SEPARATOR=';'
}
fi
# Support unset when possible.
if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
as_unset=unset
else
as_unset=false
fi
# IFS
# We need space, tab and new line, in precisely that order. Quoting is
# there to prevent editors from complaining about space-tab.
# (If _AS_PATH_WALK were called with IFS unset, it would disable word
# splitting by setting IFS to empty value.)
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
case $0 in
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
done
IFS=$as_save_IFS
;;
esac
# We did not find ourselves, most probably we were run as `sh COMMAND'
# in which case we are not to be found in the path.
if test "x$as_myself" = x; then
as_myself=$0
fi
if test ! -f "$as_myself"; then
$as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
{ (exit 1); exit 1; }
fi
# Work around bugs in pre-3.0 UWIN ksh.
for as_var in ENV MAIL MAILPATH
do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
done
PS1='$ '
PS2='> '
PS4='+ '
# NLS nuisances.
LC_ALL=C
export LC_ALL
LANGUAGE=C
export LANGUAGE
# Required to use basename.
if expr a : '\(a\)' >/dev/null 2>&1 &&
test "X`expr 00001 : '.*\(...\)'`" = X001; then
as_expr=expr
else
as_expr=false
fi
if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
as_basename=basename
else
as_basename=false
fi
# Name of the executable.
as_me=`$as_basename -- "$0" ||
$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
X"$0" : 'X\(//\)$' \| \
X"$0" : 'X\(/\)' \| . 2>/dev/null ||
$as_echo X/"$0" |
sed '/^.*\/\([^/][^/]*\)\/*$/{
s//\1/
q
}
/^X\/\(\/\/\)$/{
s//\1/
q
}
/^X\/\(\/\).*/{
s//\1/
q
}
s/.*/./; q'`
# CDPATH.
$as_unset CDPATH
as_lineno_1=$LINENO
as_lineno_2=$LINENO
test "x$as_lineno_1" != "x$as_lineno_2" &&
test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
# Create $as_me.lineno as a copy of $as_myself, but with $LINENO
# uniformly replaced by the line number. The first 'sed' inserts a
# line-number line after each line using $LINENO; the second 'sed'
# does the real work. The second script uses 'N' to pair each
# line-number line with the line containing $LINENO, and appends
# trailing '-' during substitution so that $LINENO is not a special
# case at line end.
# (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
# scripts with optimization help from Paolo Bonzini. Blame Lee
# E. McMahon (1931-1989) for sed's syntax. :-)
sed -n '
p
/[$]LINENO/=
' <$as_myself |
sed '
s/[$]LINENO.*/&-/
t lineno
b
:lineno
N
:loop
s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
t loop
s/-\n.*//
' >$as_me.lineno &&
chmod +x "$as_me.lineno" ||
{ $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
{ (exit 1); exit 1; }; }
# Don't try to exec as it changes $[0], causing all sort of problems
# (the dirname of $[0] is not the place where we might find the
# original and so on. Autoconf is especially sensitive to this).
. "./$as_me.lineno"
# Exit status is that of the last command.
exit
}
if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
as_dirname=dirname
else
as_dirname=false
fi
ECHO_C= ECHO_N= ECHO_T=
case `echo -n x` in
-n*)
case `echo 'x\c'` in
*c*) ECHO_T=' ';; # ECHO_T is single tab character.
*) ECHO_C='\c';;
esac;;
*)
ECHO_N='-n';;
esac
if expr a : '\(a\)' >/dev/null 2>&1 &&
test "X`expr 00001 : '.*\(...\)'`" = X001; then
as_expr=expr
else
as_expr=false
fi
rm -f conf$$ conf$$.exe conf$$.file
if test -d conf$$.dir; then
rm -f conf$$.dir/conf$$.file
else
rm -f conf$$.dir
mkdir conf$$.dir 2>/dev/null
fi
if (echo >conf$$.file) 2>/dev/null; then
if ln -s conf$$.file conf$$ 2>/dev/null; then
as_ln_s='ln -s'
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
# In both cases, we have to default to `cp -p'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
as_ln_s='cp -p'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
as_ln_s='cp -p'
fi
else
as_ln_s='cp -p'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
if mkdir -p . 2>/dev/null; then
as_mkdir_p=:
else
test -d ./-p && rmdir ./-p
as_mkdir_p=false
fi
if test -x / >/dev/null 2>&1; then
as_test_x='test -x'
else
if ls -dL / >/dev/null 2>&1; then
as_ls_L_option=L
else
as_ls_L_option=
fi
as_test_x='
eval sh -c '\''
if test -d "$1"; then
test -d "$1/.";
else
case $1 in
-*)set "./$1";;
esac;
case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
???[sx]*):;;*)false;;esac;fi
'\'' sh
'
fi
as_executable_p=$as_test_x
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
# Sed expression to map a string onto a valid variable name.
as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
exec 6>&1
# Save the log message, to keep $[0] and so on meaningful, and to
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by firmware-tools $as_me 2.1.14, which was
generated by GNU Autoconf 2.63. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
CONFIG_LINKS = $CONFIG_LINKS
CONFIG_COMMANDS = $CONFIG_COMMANDS
$ $0 $@
on `(hostname || uname -n) 2>/dev/null | sed 1q`
"
_ACEOF
case $ac_config_files in *"
"*) set x $ac_config_files; shift; ac_config_files=$*;;
esac
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
# Files that config.status was made for.
config_files="$ac_config_files"
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
ac_cs_usage="\
\`$as_me' instantiates files from templates according to the
current configuration.
Usage: $0 [OPTION]... [FILE]...
-h, --help print this help, then exit
-V, --version print version number and configuration settings, then exit
-q, --quiet, --silent
do not print progress messages
-d, --debug don't remove temporary files
--recheck update $as_me by reconfiguring in the same conditions
--file=FILE[:TEMPLATE]
instantiate the configuration file FILE
Configuration files:
$config_files
Report bugs to ."
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_version="\\
firmware-tools config.status 2.1.14
configured by $0, generated by GNU Autoconf 2.63,
with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
Copyright (C) 2008 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it."
ac_pwd='$ac_pwd'
srcdir='$srcdir'
INSTALL='$INSTALL'
MKDIR_P='$MKDIR_P'
AWK='$AWK'
test -n "\$AWK" || AWK=awk
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# The default lists apply if the user does not specify any file.
ac_need_defaults=:
while test $# != 0
do
case $1 in
--*=*)
ac_option=`expr "X$1" : 'X\([^=]*\)='`
ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
ac_shift=:
;;
*)
ac_option=$1
ac_optarg=$2
ac_shift=shift
;;
esac
case $ac_option in
# Handling of the options.
-recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
ac_cs_recheck=: ;;
--version | --versio | --versi | --vers | --ver | --ve | --v | -V )
$as_echo "$ac_cs_version"; exit ;;
--debug | --debu | --deb | --de | --d | -d )
debug=: ;;
--file | --fil | --fi | --f )
$ac_shift
case $ac_optarg in
*\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
esac
CONFIG_FILES="$CONFIG_FILES '$ac_optarg'"
ac_need_defaults=false;;
--he | --h | --help | --hel | -h )
$as_echo "$ac_cs_usage"; exit ;;
-q | -quiet | --quiet | --quie | --qui | --qu | --q \
| -silent | --silent | --silen | --sile | --sil | --si | --s)
ac_cs_silent=: ;;
# This is an error.
-*) { $as_echo "$as_me: error: unrecognized option: $1
Try \`$0 --help' for more information." >&2
{ (exit 1); exit 1; }; } ;;
*) ac_config_targets="$ac_config_targets $1"
ac_need_defaults=false ;;
esac
shift
done
ac_configure_extra_args=
if $ac_cs_silent; then
exec 6>/dev/null
ac_configure_extra_args="$ac_configure_extra_args --silent"
fi
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
if \$ac_cs_recheck; then
set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
shift
\$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
CONFIG_SHELL='$SHELL'
export CONFIG_SHELL
exec "\$@"
fi
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
exec 5>>config.log
{
echo
sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
## Running $as_me. ##
_ASBOX
$as_echo "$ac_log"
} >&5
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# Handling of arguments.
for ac_config_target in $ac_config_targets
do
case $ac_config_target in
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"pkg/${PACKAGE_NAME}.spec") CONFIG_FILES="$CONFIG_FILES pkg/${PACKAGE_NAME}.spec" ;;
*) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
{ (exit 1); exit 1; }; };;
esac
done
# If the user did not use the arguments to specify the items to instantiate,
# then the envvar interface is used. Set only those that are not.
# We use the long form for the default assignment because of an extremely
# bizarre bug on SunOS 4.1.3.
if $ac_need_defaults; then
test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
fi
# Have a temporary directory for convenience. Make it in the build tree
# simply because there is no reason against having it here, and in addition,
# creating and moving files from /tmp can sometimes cause problems.
# Hook for its removal unless debugging.
# Note that there is a small window in which the directory will not be cleaned:
# after its creation but before its name has been assigned to `$tmp'.
$debug ||
{
tmp=
trap 'exit_status=$?
{ test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
' 0
trap '{ (exit 1); exit 1; }' 1 2 13 15
}
# Create a (secure) tmp directory for tmp files.
{
tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
test -n "$tmp" && test -d "$tmp"
} ||
{
tmp=./conf$$-$RANDOM
(umask 077 && mkdir "$tmp")
} ||
{
$as_echo "$as_me: cannot create a temporary directory in ." >&2
{ (exit 1); exit 1; }
}
# Set up the scripts for CONFIG_FILES section.
# No need to generate them if there are no CONFIG_FILES.
# This happens for instance with `./config.status config.h'.
if test -n "$CONFIG_FILES"; then
ac_cr='
'
ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null`
if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
ac_cs_awk_cr='\\r'
else
ac_cs_awk_cr=$ac_cr
fi
echo 'BEGIN {' >"$tmp/subs1.awk" &&
_ACEOF
{
echo "cat >conf$$subs.awk <<_ACEOF" &&
echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
echo "_ACEOF"
} >conf$$subs.sh ||
{ { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
{ (exit 1); exit 1; }; }
ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
ac_delim='%!_!# '
for ac_last_try in false false false false false :; do
. ./conf$$subs.sh ||
{ { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
{ (exit 1); exit 1; }; }
ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
if test $ac_delim_n = $ac_delim_num; then
break
elif $ac_last_try; then
{ { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
{ (exit 1); exit 1; }; }
else
ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
fi
done
rm -f conf$$subs.sh
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
_ACEOF
sed -n '
h
s/^/S["/; s/!.*/"]=/
p
g
s/^[^!]*!//
:repl
t repl
s/'"$ac_delim"'$//
t delim
:nl
h
s/\(.\{148\}\).*/\1/
t more1
s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
p
n
b repl
:more1
s/["\\]/\\&/g; s/^/"/; s/$/"\\/
p
g
s/.\{148\}//
t nl
:delim
h
s/\(.\{148\}\).*/\1/
t more2
s/["\\]/\\&/g; s/^/"/; s/$/"/
p
b
:more2
s/["\\]/\\&/g; s/^/"/; s/$/"\\/
p
g
s/.\{148\}//
t delim
' >$CONFIG_STATUS || ac_write_fail=1
rm -f conf$$subs.awk
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
_ACAWK
cat >>"\$tmp/subs1.awk" <<_ACAWK &&
for (key in S) S_is_set[key] = 1
FS = ""
}
{
line = $ 0
nfields = split(line, field, "@")
substed = 0
len = length(field[1])
for (i = 2; i < nfields; i++) {
key = field[i]
keylen = length(key)
if (S_is_set[key]) {
value = S[key]
line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
len += length(value) + length(field[++i])
substed = 1
} else
len += 1 + keylen
}
print line
}
_ACAWK
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
else
cat
fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
|| { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5
$as_echo "$as_me: error: could not setup config files machinery" >&2;}
{ (exit 1); exit 1; }; }
_ACEOF
# VPATH may cause trouble with some makes, so we remove $(srcdir),
# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
# trailing colons and then remove the whole line if VPATH becomes empty
# (actually we leave an empty line to preserve line numbers).
if test "x$srcdir" = x.; then
ac_vpsub='/^[ ]*VPATH[ ]*=/{
s/:*\$(srcdir):*/:/
s/:*\${srcdir}:*/:/
s/:*@srcdir@:*/:/
s/^\([^=]*=[ ]*\):*/\1/
s/:*$//
s/^[^=]*=[ ]*$//
}'
fi
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
fi # test -n "$CONFIG_FILES"
eval set X " :F $CONFIG_FILES "
shift
for ac_tag
do
case $ac_tag in
:[FHLC]) ac_mode=$ac_tag; continue;;
esac
case $ac_mode$ac_tag in
:[FHL]*:*);;
:L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5
$as_echo "$as_me: error: invalid tag $ac_tag" >&2;}
{ (exit 1); exit 1; }; };;
:[FH]-) ac_tag=-:-;;
:[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
esac
ac_save_IFS=$IFS
IFS=:
set x $ac_tag
IFS=$ac_save_IFS
shift
ac_file=$1
shift
case $ac_mode in
:L) ac_source=$1;;
:[FH])
ac_file_inputs=
for ac_f
do
case $ac_f in
-) ac_f="$tmp/stdin";;
*) # Look for the file first in the build tree, then in the source tree
# (if the path is not absolute). The absolute path cannot be DOS-style,
# because $ac_f cannot contain `:'.
test -f "$ac_f" ||
case $ac_f in
[\\/$]*) false;;
*) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
esac ||
{ { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
$as_echo "$as_me: error: cannot find input file: $ac_f" >&2;}
{ (exit 1); exit 1; }; };;
esac
case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
ac_file_inputs="$ac_file_inputs '$ac_f'"
done
# Let's still pretend it is `configure' which instantiates (i.e., don't
# use $as_me), people would be surprised to read:
# /* config.h. Generated by config.status. */
configure_input='Generated from '`
$as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
`' by configure.'
if test x"$ac_file" != x-; then
configure_input="$ac_file. $configure_input"
{ $as_echo "$as_me:$LINENO: creating $ac_file" >&5
$as_echo "$as_me: creating $ac_file" >&6;}
fi
# Neutralize special characters interpreted by sed in replacement strings.
case $configure_input in #(
*\&* | *\|* | *\\* )
ac_sed_conf_input=`$as_echo "$configure_input" |
sed 's/[\\\\&|]/\\\\&/g'`;; #(
*) ac_sed_conf_input=$configure_input;;
esac
case $ac_tag in
*:-:* | *:-) cat >"$tmp/stdin" \
|| { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
$as_echo "$as_me: error: could not create $ac_file" >&2;}
{ (exit 1); exit 1; }; } ;;
esac
;;
esac
ac_dir=`$as_dirname -- "$ac_file" ||
$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
X"$ac_file" : 'X\(//\)[^/]' \| \
X"$ac_file" : 'X\(//\)$' \| \
X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
$as_echo X"$ac_file" |
sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
s//\1/
q
}
/^X\(\/\/\)[^/].*/{
s//\1/
q
}
/^X\(\/\/\)$/{
s//\1/
q
}
/^X\(\/\).*/{
s//\1/
q
}
s/.*/./; q'`
{ as_dir="$ac_dir"
case $as_dir in #(
-*) as_dir=./$as_dir;;
esac
test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
as_dirs=
while :; do
case $as_dir in #(
*\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
*) as_qdir=$as_dir;;
esac
as_dirs="'$as_qdir' $as_dirs"
as_dir=`$as_dirname -- "$as_dir" ||
$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
X"$as_dir" : 'X\(//\)[^/]' \| \
X"$as_dir" : 'X\(//\)$' \| \
X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
$as_echo X"$as_dir" |
sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
s//\1/
q
}
/^X\(\/\/\)[^/].*/{
s//\1/
q
}
/^X\(\/\/\)$/{
s//\1/
q
}
/^X\(\/\).*/{
s//\1/
q
}
s/.*/./; q'`
test -d "$as_dir" && break
done
test -z "$as_dirs" || eval "mkdir $as_dirs"
} || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
$as_echo "$as_me: error: cannot create directory $as_dir" >&2;}
{ (exit 1); exit 1; }; }; }
ac_builddir=.
case "$ac_dir" in
.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
*)
ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
# A ".." for each directory in $ac_dir_suffix.
ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
case $ac_top_builddir_sub in
"") ac_top_builddir_sub=. ac_top_build_prefix= ;;
*) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
esac ;;
esac
ac_abs_top_builddir=$ac_pwd
ac_abs_builddir=$ac_pwd$ac_dir_suffix
# for backward compatibility:
ac_top_builddir=$ac_top_build_prefix
case $srcdir in
.) # We are building in place.
ac_srcdir=.
ac_top_srcdir=$ac_top_builddir_sub
ac_abs_top_srcdir=$ac_pwd ;;
[\\/]* | ?:[\\/]* ) # Absolute name.
ac_srcdir=$srcdir$ac_dir_suffix;
ac_top_srcdir=$srcdir
ac_abs_top_srcdir=$srcdir ;;
*) # Relative name.
ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
ac_top_srcdir=$ac_top_build_prefix$srcdir
ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
esac
ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
case $ac_mode in
:F)
#
# CONFIG_FILE
#
case $INSTALL in
[\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
*) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
esac
ac_MKDIR_P=$MKDIR_P
case $MKDIR_P in
[\\/$]* | ?:[\\/]* ) ;;
*/*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
esac
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# If the template does not know about datarootdir, expand it.
# FIXME: This hack should be removed a few years after 2.60.
ac_datarootdir_hack=; ac_datarootdir_seen=
ac_sed_dataroot='
/datarootdir/ {
p
q
}
/@datadir@/p
/@docdir@/p
/@infodir@/p
/@localedir@/p
/@mandir@/p
'
case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
*datarootdir*) ac_datarootdir_seen=yes;;
*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
{ $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_datarootdir_hack='
s&@datadir@&$datadir&g
s&@docdir@&$docdir&g
s&@infodir@&$infodir&g
s&@localedir@&$localedir&g
s&@mandir@&$mandir&g
s&\\\${datarootdir}&$datarootdir&g' ;;
esac
_ACEOF
# Neutralize VPATH when `$srcdir' = `.'.
# Shell code in configure.ac might set extrasub.
# FIXME: do we really want to maintain this feature?
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_sed_extra="$ac_vpsub
$extrasub
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
:t
/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
s|@configure_input@|$ac_sed_conf_input|;t t
s&@top_builddir@&$ac_top_builddir_sub&;t t
s&@top_build_prefix@&$ac_top_build_prefix&;t t
s&@srcdir@&$ac_srcdir&;t t
s&@abs_srcdir@&$ac_abs_srcdir&;t t
s&@top_srcdir@&$ac_top_srcdir&;t t
s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
s&@builddir@&$ac_builddir&;t t
s&@abs_builddir@&$ac_abs_builddir&;t t
s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
s&@INSTALL@&$ac_INSTALL&;t t
s&@MKDIR_P@&$ac_MKDIR_P&;t t
$ac_datarootdir_hack
"
eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
|| { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
$as_echo "$as_me: error: could not create $ac_file" >&2;}
{ (exit 1); exit 1; }; }
test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
{ ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
{ ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
{ $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
which seems to be undefined. Please make sure it is defined." >&5
$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
which seems to be undefined. Please make sure it is defined." >&2;}
rm -f "$tmp/stdin"
case $ac_file in
-) cat "$tmp/out" && rm -f "$tmp/out";;
*) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
esac \
|| { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
$as_echo "$as_me: error: could not create $ac_file" >&2;}
{ (exit 1); exit 1; }; }
;;
esac
done # for ac_tag
{ (exit 0); exit 0; }
_ACEOF
chmod +x $CONFIG_STATUS
ac_clean_files=$ac_clean_files_save
test $ac_write_fail = 0 ||
{ { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5
$as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;}
{ (exit 1); exit 1; }; }
# configure is writing to config.log, and then calls config.status.
# config.status does its own redirection, appending to config.log.
# Unfortunately, on DOS this fails, as config.log is still kept open
# by configure, so config.status won't be able to write to it; its
# output is simply discarded. So we exec the FD to /dev/null,
# effectively closing config.log, so it can be properly (re)opened and
# appended to by config.status. When coming back to configure, we
# need to make the FD available again.
if test "$no_create" != yes; then
ac_cs_success=:
ac_config_status_args=
test "$silent" = yes &&
ac_config_status_args="$ac_config_status_args --quiet"
exec 5>/dev/null
$SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
exec 5>>config.log
# Use ||, not &&, to avoid exiting from the if with $? = 1, which
# would make configure fail if this is the last instruction.
$ac_cs_success || { (exit 1); exit 1; }
fi
if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
{ $as_echo "$as_me:$LINENO: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
fi
firmware-tools-2.1.14/AUTHORS 0000664 0017654 0017654 00000000435 11452664733 022200 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 Author: Jeffrey L Mendoza
Author: Matt Domsch
Author: Michael E Brown
Author: Michael E Brown
Author: Sadhana B
Author: Ville Skyttä
firmware-tools-2.1.14/COPYING.LIB 0000664 0017654 0017654 00000063644 10756403330 022571 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations
below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
^L
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it
becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
^L
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control
compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
^L
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
^L
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
^L
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
^L
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply, and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License
may add an explicit geographical distribution limitation excluding those
countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
^L
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
^L
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms
of the ordinary General Public License).
To apply these terms, attach the following notices to the library.
It is safest to attach them to the start of each source file to most
effectively convey the exclusion of warranty; and each file should
have at least the "copyright" line and a pointer to where the full
notice is found.
Copyright (C)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Also add information on how to contact you by electronic and paper
mail.
You should also get your employer (if you work as a programmer) or
your
school, if any, to sign a "copyright disclaimer" for the library, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James
Random Hacker.
, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!
firmware-tools-2.1.14/ChangeLog 0000664 0017654 0017654 00000342103 11452664733 022703 0 ustar 00michael_e_brown michael_e_brown 0000000 0000000 commit a32b82f8b18f53569f94b2bcb3644a962572094a
Merge: 86a134f... 0451b50...
Author: Michael E Brown
Date: Tue Oct 5 13:10:08 2010 -0500
Merge branch 'master' of /var/ftp/pub/Applications/git/firmware-tools
* 'master' of /var/ftp/pub/Applications/git/firmware-tools:
version bump
dont open root-only firmware-updates.log until we actually need to write to it
update to latest version (3.3) of PLY lex/yacc.
commit 0451b507f6639da0e58f0896c8a70c6176b27d1d
Author: Michael E Brown
Date: Tue Oct 5 13:07:04 2010 -0500
version bump
commit 4b629ead7dcbcc9fe130ca71a431917687896421
Author: Michael E Brown
Date: Tue Oct 5 13:06:46 2010 -0500
dont open root-only firmware-updates.log until we actually need to write to it
commit 614b94d1bfa6b8d8d79e3f4625140f0b9a1913f7
Author: Michael E Brown
Date: Mon Sep 27 14:18:16 2010 -0500
update to latest version (3.3) of PLY lex/yacc.
commit 86a134fb508fff3881eb69fc3a3a7b5d4993fa93
Author: Michael E Brown
Date: Tue Jun 15 15:40:50 2010 -0500
fixup release script env var path construction
commit 7bc34895ccdcb2d83a0f55e7b4de9cab90b3d338
Author: Michael E Brown
Date: Mon May 24 15:40:54 2010 -0500
version bump
commit a6393f3e70086f9850460d37da9a0d603cc09f14
Author: Michael E Brown
Date: Mon May 24 15:40:47 2010 -0500
more sane way of replacing vars
commit fd068978be5476270956a585e0b7015fd09694a1
Author: Michael E Brown
Date: Mon May 24 14:05:41 2010 -0500
version bump
commit e4f34d6d242e2fb2e1479ba630243fe83a65547b
Author: Michael E Brown
Date: Mon May 24 14:05:27 2010 -0500
more build system updates
commit f7752e126d0ef7a016babcfdadb1bf9fe08864ac
Author: Michael E Brown
Date: Mon May 24 13:59:53 2010 -0500
version bump + update required version of autotools
commit 160f27f16f42c1086cf3bb99ee9c5444b18a87be
Author: Michael E Brown
Date: Mon May 24 13:48:24 2010 -0500
remove unused makefile targets
commit b07396b62b5c024461d940cdf3711fce1f5be28a
Author: Michael E Brown
Date: Mon May 24 13:37:28 2010 -0500
sync release script with libsmbios script
commit 418e686b426957153b91d4f6fc0d7db58725bca5
Author: Michael E Brown
Date: Mon May 24 13:31:24 2010 -0500
build system updates
commit 0f7f2f1ace0a9e62b22f8093e5487beeee6a097d
Author: Michael E Brown
Date: Mon May 24 12:51:06 2010 -0500
version bump
commit 2863d777a848a6c2e0cba6c02623537bc3e21933
Author: Jeffrey L Mendoza
Date: Fri May 14 11:22:55 2010 -0500
Updated compare logic so packages will match to shortname on devices.
commit 12f9784258a8bc60eabece959b0ebc4fd127583d
Author: Michael E Brown
Date: Wed Mar 31 12:01:22 2010 -0500
add localstatedir
commit 128fb7052db35abf8299d00ff6199d32f3e2f6ba
Author: Michael E Brown
Date: Thu Mar 4 15:05:06 2010 -0600
reclass DisablePlugin as a child of ImportError so it is easier to catch both at the same time.
commit e990845feaf2767192b4c7a15a38ca6f1944700c
Author: Michael E Brown
Date: Thu Mar 4 15:02:49 2010 -0600
rebuild spec if needed
commit 6156134e4a60fd6b2cc871a8886dc8fd5d047058
Author: Michael E Brown
Date: Thu Mar 4 15:02:36 2010 -0600
Dont use maintainer mode
commit 17f5e17abd3f7db2e551d8af896358d5d3037770
Author: Jeffrey L Mendoza
Date: Thu Nov 5 16:07:05 2009 -0600
Moved helloworld print to verbose.
Signed-off-by: Michael E Brown
commit e5b8382f539289ed52d4560b93f30b1ef8ab1095
Author: Jeffrey L Mendoza
Date: Thu Sep 24 13:46:25 2009 -0500
Changed bootstrap_firmware to output extra strings for pci devices with and without subven/subdev.
Signed-off-by: Michael E Brown
commit 9e666701e35e668f9a9d2620471bcdf376786288
Author: Michael E Brown
Date: Fri Jul 10 11:00:31 2009 -0500
version bump
commit bac6de0c91f09bb6c82a3f1f5824cf0970409a5a
Author: Jeffrey L Mendoza
Date: Wed Jul 1 10:42:02 2009 -0500
Changed update log to log time
Signed-off-by: Michael E Brown
commit 2b550f46e9a49fccd6c9502a17a228a984939bc0
Author: Jeffrey L Mendoza
Date: Tue Jun 30 19:50:28 2009 -0500
Fixed testcase that broke on SLES11 with last commit.
Signed-off-by: Michael E Brown
commit 4851c35478b4eacf578c1843d7c14d0b9a8349c0
Author: Jeffrey L Mendoza
Date: Tue Jun 30 15:30:36 2009 -0500
Added logger for updates.
Signed-off-by: Michael E Brown
commit 67fdb593a728a7eb42337910358252361d702717
Author: Michael E Brown
Date: Fri Jul 10 10:55:22 2009 -0500
make configure.ac work with rhel5 autotools
commit fc0ca490337a423172b14ab58205fa08e19b843c
Author: Michael E Brown
Date: Wed Jul 8 17:11:14 2009 -0500
simplify configure.ac
commit 477f4cffbd40e737599c05ea3195f834866d967a
Author: Jeffrey L Mendoza
Date: Thu Mar 26 15:34:23 2009 -0500
add override for storage-topdir to the cli --update command
Signed-off-by: Michael E Brown
commit 93dd58ad1b56eeffa2c8d6cea58a6d271ce3907e
Author: Michael E Brown
Date: Mon Mar 23 17:24:51 2009 -0500
version bump
commit 5680049c244fe68205c21848617d3b6315079c1d
Author: Michael E Brown
Date: Mon Mar 23 17:23:43 2009 -0500
pkg now produces .bz2 dist, change spec accordingly
commit 27f55514f748de28d7e915952c176fe46c783439
Author: Michael E Brown
Date: Mon Mar 23 16:53:17 2009 -0500
version bump
commit eba4d545a74bcc81b32ac1d7b8b794ef9e947cc9
Author: Michael E Brown
Date: Mon Mar 23 16:51:59 2009 -0500
sync autoconf with libsmbios
commit d99d5a07af459a8a2fe04b87ea93e9ba3500192c
Author: Michael E Brown
Date: Fri Dec 12 16:47:13 2008 -0600
update build infrastructure to match libsmbios release.
commit d76f9e2a23282e785ade2b10ee9bd2122656b5b6
Author: Michael E Brown
Date: Thu Sep 11 17:18:30 2008 -0500
add direct build support for osb upload
commit 78d924238e84ae1a851ac20de451f8592423261b
Author: Michael E Brown
Date: Thu Sep 11 17:18:03 2008 -0500
fixup suse build timestamp problem and suse rpmlint problem.
commit 91aec81b65dc4d5707a3a2c10391ab0623cbe7aa
Author: Michael E Brown
Date: Thu Sep 11 16:30:34 2008 -0500
fix rpmlint complaints about non-executable scripts. These files dont need #!/usr/bin/python, since they are not executed
commit be8a3c3a83a72eebf15caaa3678958181164b469
Author: Michael E Brown
Date: Thu Sep 11 13:47:51 2008 -0500
no longer use mock infrastructure to push rpms.
commit 014e791c92c8ce2828eb59e04c82f1d9c652ef23
Author: Michael E Brown
Date: Thu Sep 11 13:29:07 2008 -0500
version bump
commit fa612b12a0a25697cf1eb8fb55a2f2481f7f4eab
Author: Michael E Brown
Date: Thu Sep 11 13:28:59 2008 -0500
SUSE does not have noarch python pkgs, dont build noarch on suse.
commit 76c91f383653eaf25dcf141ec4affe4d9f58d776
Author: Michael E Brown
Date: Wed Aug 27 09:34:03 2008 -0500
non-interactive force remove of old files.
commit b3690fb76ff7d54e67b05fee57be721cae5c3472
Author: Michael E Brown
Date: Wed Aug 27 09:29:13 2008 -0500
update meta data from updated spec file as part of upload.
commit 3341c1c37cc2fa7ce5492f91108075ac82759d7c
Author: Michael E Brown
Date: Tue Aug 26 18:03:19 2008 -0500
bugfix updates for Makefile-std
commit 96ef7523dd75c234f0537943654b2f8f044c47d4
Author: Michael E Brown
Date: Tue Aug 19 17:33:47 2008 -0500
add script to upload to opensuse build service
commit 17c47ebe3fb2a5c1837d0f7284dc4086ee0b1112
Author: Michael E Brown
Date: Tue Aug 19 17:33:33 2008 -0500
add SPECFILE variable to shorted a couple of statements
commit 55d9db0eda9afbba53cc8cfbf03af2f95d4bfabd
Author: Michael E Brown
Date: Mon Aug 18 17:35:31 2008 -0500
allow rpm build from git-generated archive.
commit 92c7ab7b8b6c2a628fcb47d749a53667c00c89c1
Author: Michael E Brown
Date: Mon Aug 18 16:58:03 2008 -0500
more Makefile-std standardizations.
commit bee206f50c0b8c22448b0420ff03c3b8e8a207ee
Author: Michael E Brown
Date: Mon Aug 18 16:54:37 2008 -0500
start trying to standardize Makefile-std
commit 9329893d78d130f6cc47f8de2ea2d3b654f118f3
Author: Michael E Brown
Date: Wed Aug 13 16:45:39 2008 -0500
fixup mock configs for recent cleanups.
commit 6ddba88ea3622f941a9e38032a267ac8b2f16b1a
Author: Michael E Brown
Date: Wed Aug 13 13:57:37 2008 -0500
fixup opensuse config names.
commit 20c3331389e3997be58a0fb616378d979d8b0297
Author: Michael E Brown
Date: Wed Aug 13 13:44:17 2008 -0500
fix license tags
commit 013a78c573d684e053da50990872cc0387c3d29c
Merge: 941cfc9... acb3d2f...
Author: Michael E Brown
Date: Wed Aug 13 13:17:10 2008 -0500
Merge branch 'master' of ssh://michael_e_brown@mock.linuxdev.us.dell.com/var/ftp/pub/Applications/git/firmware-tools
* 'master' of ssh://michael_e_brown@mock.linuxdev.us.dell.com/var/ftp/pub/Applications/git/firmware-tools:
re-enable opensuse builds.
version bump
version bump
commit 941cfc93fe0e2f45dbce85b7af6c2cf5d4b382ab
Author: Michael E Brown
Date: Wed Aug 13 13:13:26 2008 -0500
update to build for f9
commit 4cdb2c179492c8099fcac5f949339860ae0fffcd
Author: Michael E Brown
Date: Tue Mar 18 17:42:44 2008 -0500
split button label text
commit acb3d2f150af24fdf228af7f6b6e606a8079814e
Author: Michael E Brown
Date: Mon Mar 17 01:14:47 2008 -0500
re-enable opensuse builds.
commit 487344ed36ec95f33e34d12f482f18d5082ddfae
Author: Michael E Brown
Date: Mon Mar 17 01:14:35 2008 -0500
version bump
commit 0945687d4581c14e3fbaee45b7a03b327ed96832
Merge: c97b955... cf2d246...
Author: Michael E Brown
Date: Mon Mar 17 00:17:01 2008 -0500
Merge branch 'master' of /var/ftp/pub/Applications/git/firmware-tools
* 'master' of /var/ftp/pub/Applications/git/firmware-tools:
work around super-idiotic dell inventory collector bug by making the PCI DBDF the unique identifier for pci devs.
hide unknown devices by default.
add support to hide unknown devices.
commit c97b955d77594d0c96ab799df7d2386dd2dafc7f
Author: Michael E Brown
Date: Sun Mar 16 23:17:57 2008 -0500
version bump
commit cf2d24661e281e65182d130cc56cb25fe8e02054
Author: Michael E Brown
Date: Thu Mar 13 13:17:38 2008 -0500
work around super-idiotic dell inventory collector bug by making the PCI DBDF the unique identifier for pci devs.
commit 91904aa69d0ce244005b92e14262c0d84dc618ff
Author: Michael E Brown
Date: Wed Mar 12 15:41:22 2008 -0500
hide unknown devices by default.
commit a7963c4184e660034cb8552673473bc50d85f9b1
Author: Michael E Brown
Date: Wed Mar 12 15:32:15 2008 -0500
add support to hide unknown devices.
commit 765f4da2089f96f38de60a191e69f7be5983212b
Author: Michael E Brown
Date: Wed Mar 12 12:06:58 2008 -0500
remove dell-lsiflash and dell-bmcflash conflicts: as they tickle a bug in up2date and prevent smooth updates.
commit c0c3eb7d86d4224c0293d70a474a416d5fe0d109
Author: Michael E Brown
Date: Tue Mar 11 20:10:34 2008 -0500
add compat subprocess module for use by firmware-tools and firmware-addon-dell.
commit 9b6af57ee20116a313cf5169e05a13326bafe40a
Author: Michael E Brown
Date: Tue Mar 11 20:02:18 2008 -0500
re-add system-specific packages to bootstrap inventory using new getSystemId() method. fixup GUI as well.
commit 7798f8f49ba37bc2b9bdc84233812030b0b70cb1
Author: Michael E Brown
Date: Tue Mar 11 19:32:14 2008 -0500
ensure we dont accidentally get installed with incompatible pkgs.
commit c95aa84687a84a3f673c3e42daf8de6a2d20cdf9
Author: Michael E Brown
Date: Tue Mar 11 19:31:58 2008 -0500
version bump
commit 5b9b90eaef95450bd0c9358bf010f02a6978e49e
Author: Michael E Brown
Date: Tue Mar 11 14:53:56 2008 -0500
fixup unit tests for new style.
commit 61e21b4f22c684e02a69c16fa2712ac1adf7dac7
Author: Michael E Brown
Date: Tue Mar 11 14:53:47 2008 -0500
remove TYPE_MOCK_BOOTSTRAP reference
commit e68c7b84d19efc1943c1307bf2430ca9bc5307ee
Author: Michael E Brown
Date: Tue Mar 11 14:39:34 2008 -0500
remove TYPE_BOOTSTRAP and TYPE_MOCK_BOOTSTRAP
commit 4a4304407d787fd80accb0bac0b3520fda653e94
Author: Michael E Brown
Date: Tue Mar 11 14:03:19 2008 -0500
convert inventory to use normal plugin hooks rather than separate, parallel system.
commit 0717175703d8424abcf8c06fe5e819bbb78516fe
Author: Michael E Brown
Date: Tue Mar 11 00:53:54 2008 -0500
add lspci string as displayname
commit 488a901f299f9ca7e15035b24ac2caf29f6fb372
Author: Michael E Brown
Date: Tue Mar 11 00:53:30 2008 -0500
make gui work with combined bootstrap/inventory
commit a34d6bfb3926905b3b95b969dc794c38056be453
Author: Michael E Brown
Date: Tue Mar 11 00:43:27 2008 -0500
add postinventory plugin conduit def
commit 9ed6199c1d63e98d2158e0ede70b26d66cd77e8f
Author: Michael E Brown
Date: Tue Mar 11 00:36:19 2008 -0500
add accessor to get device by unique instance.
commit bf17a4576dfb022327a76fabcc58ddb8f978fac9
Author: Michael E Brown
Date: Tue Mar 11 00:36:01 2008 -0500
make new inventory scheme compile.
commit 0cf41006012f1d76720a09ea448ac2e32b01def8
Author: Michael E Brown
Date: Mon Mar 10 10:50:16 2008 -0500
initial work to kill bootstrap separate inventoyr list. not complete/compiled
commit 782facfbdf0b62f77ad83889193afd3740939bb7
Author: Michael E Brown
Date: Tue Mar 4 08:59:01 2008 -0600
make sure spinprint doesnt scroll on default terminal sizes.
commit c75668861fc2083a7e00a0d99c64b5e8b6f08957
Author: Michael E Brown
Date: Thu Feb 28 16:37:17 2008 -0600
supress "no handlers could be found for..." message in cmdline code.
commit 7201d6fcae8a09bf58e8d25b7a189eab3bffd99e
Author: Ville Skyttä
Date: Tue Feb 5 20:38:30 2008 +0200
Improve plugin import error message.
Signed-off-by: Ville Skyttä
Signed-off-by: Michael E Brown
commit d76c4fba6d6fad6cd6642b9abaabee6fa14bc179
Author: Michael E Brown
Date: Wed Feb 20 10:56:53 2008 -0600
use upload overrides for official build too.
commit 6efda6eeb64fbf2d52040f606e2207947dd9db59
Author: Michael E Brown
Date: Wed Feb 20 10:34:49 2008 -0600
version bump
commit 6469635714344d0ca836901a158284cc8f998e44
Merge: 0f41ca3... a3fa352...
Author: Michael E Brown
Date: Wed Feb 20 10:34:02 2008 -0600
Merge branch 'master' of /var/ftp/pub/Applications/git/firmware-tools
* 'master' of /var/ftp/pub/Applications/git/firmware-tools:
fixup deprecation warning.
additional asserts to try to catch name/version/displayname incorrectly-set bugs.
fix serious displayname-set-incorrectly bug.
commit 0f41ca3bfca23c041375fd2eedc9b0f699df9d24
Author: Michael E Brown
Date: Wed Feb 20 01:27:28 2008 -0600
version bump
commit a3fa35235a0edbcbdc32f636834d5d053f03f161
Merge: 5c228c4... c03a07e...
Author: Michael E Brown
Date: Wed Feb 20 00:46:46 2008 -0600
Merge branch 'master' of ssh://mock/var/ftp/pub/Applications/git/firmware-tools
* 'master' of ssh://mock/var/ftp/pub/Applications/git/firmware-tools:
version bump
commit 5c228c4a7a2880defc1ea9809ca61b9010176abf
Author: Michael E Brown
Date: Wed Feb 20 00:46:36 2008 -0600
fixup deprecation warning.
commit 86dcf0f4765cc5d03d47a7adce41015eb9dc87d3
Author: Michael E Brown
Date: Wed Feb 20 00:46:27 2008 -0600
additional asserts to try to catch name/version/displayname incorrectly-set bugs.
commit 17270d40dc314fd7671dc16e64d9297595492857
Author: Michael E Brown
Date: Wed Feb 20 00:46:00 2008 -0600
fix serious displayname-set-incorrectly bug.
commit c03a07e94628eed5546567c22d06529d34ded88e
Author: Michael E Brown
Date: Tue Feb 19 17:24:20 2008 -0600
version bump
commit 8ac763ea48f27b57f5b4ae23cfa8bf21a60b03f7
Author: Michael E Brown
Date: Tue Feb 19 11:09:44 2008 -0600
fix typo... missing backslash at eol.
commit b571e00fe348537ff094afcc0bcc205eb04dfec7
Author: Michael E Brown
Date: Tue Feb 19 01:57:15 2008 -0600
add a method to drop into debugger via signal
commit 021b245681d1a072c9868f13630a24d07d1ff912
Author: Michael E Brown
Date: Tue Feb 19 00:55:02 2008 -0600
setup bootstrap so that it can take args as well (base/callback).
commit 9536e43995d75ba8403675c95d5fc41535be7084
Author: Michael E Brown
Date: Mon Feb 18 18:16:26 2008 -0600
version bump
commit 531d7346cb23a11f3c701cb50c58f849b30321d7
Author: Michael E Brown
Date: Mon Feb 18 17:48:35 2008 -0600
fixup small problem with rhel4 where some functions str(result) wind up with non-str output.
commit 645d924256457c791e5d9103cb1bab01a6344907
Author: Michael E Brown
Date: Fri Feb 15 16:28:50 2008 -0600
add sles compile since it now works.
commit a05373f8788f40c2cb60fdd23f01c75c422211c7
Author: Michael E Brown
Date: Fri Feb 15 16:27:13 2008 -0600
pull in python-decoratortools module to fix compile on SUSE where we dont have this module.
commit 24844def24b8a7c5d96ca05cf2fd771c344eae12
Author: Michael E Brown
Date: Fri Feb 15 16:25:30 2008 -0600
fix #! lines to point to python instead of python2. fixes suse compile.
commit fb3a03b7cc28472cf3e1c17d630d91626fb5891a
Author: Michael E Brown
Date: Fri Feb 15 14:28:01 2008 -0600
only build source for srpm, not all.
commit cd86518c1247e4ec9caeaa9e0f0086b407253ae2
Author: Michael E Brown
Date: Wed Feb 13 09:51:51 2008 -0600
version bump
commit 33bb2a572b9d6bb931ee50442dd6a7e5bcf4b5ba
Author: Michael E Brown
Date: Sun Feb 10 21:30:38 2008 -0600
fixup inventory to print spinner so user knows what is going on.
commit 93d04247d6eea277c53c401ff35d14ee6f8f98ac
Author: Michael E Brown
Date: Sun Feb 10 21:30:15 2008 -0600
fixup unknown callback to accept all args.
commit bfb0faf7e9eddfb9315eebb74fec54dcacefaf8f
Author: Michael E Brown
Date: Sun Feb 10 21:29:37 2008 -0600
allow override of which fd use to print spinner. default to print spinner on stderr.
commit 86c551ce390d5a1c393a1faabef585433a028aab
Author: Michael E Brown
Date: Sun Feb 10 21:28:25 2008 -0600
add callbacks to mock inventory so we can test printing messages.
commit 89cefb3470925c84aebffd55cb4ef5811a73a010
Author: Michael E Brown
Date: Sun Feb 10 21:27:41 2008 -0600
rename generator back so we dont break firmware_addon_dell. Fixup name to have leading zeros in all hex numbers.
commit 4a71079fcbfa263f9af89fe8632d201513898116
Author: Michael E Brown
Date: Sun Feb 10 21:27:00 2008 -0600
pass args as kargs
commit d53916a96478bae9c57d6b7dc269bc8108ddc7a7
Author: Michael E Brown
Date: Sun Feb 10 13:43:24 2008 -0600
move callback class out of line so it can be re-used by other code.
commit dd7576e0a8db68c44fb28f2dd4ef7a8dc425985e
Author: Michael E Brown
Date: Sun Feb 10 13:41:51 2008 -0600
use callback helpers from firmwaretools base.
commit 3d3f4626e296d247c32ce9c3e4efd15521db215a
Author: Michael E Brown
Date: Sun Feb 10 13:41:23 2008 -0600
redo mock mode to work with new bootstrap pci code.
commit 94d5a0c78b417bc92482bc6e6b6ecf60279092e4
Author: Michael E Brown
Date: Sun Feb 10 13:40:47 2008 -0600
rewrite bootstrap pci to use sysfs rather than hacky lspci.
commit 0c82f4727b53489fb69356f5db49564ded042947
Author: Michael E Brown
Date: Sun Feb 10 13:40:19 2008 -0600
pull callback base class and helper function into firmwaretools base.
commit 0631d9781bc69e21c90f02db91dd1c3a506c71de
Author: Michael E Brown
Date: Sat Feb 9 16:01:40 2008 -0600
be backwards compat with old python. fix typo.
commit 87d0f09eb9ab714dadd4d9a44577983d976c3dc5
Author: Michael E Brown
Date: Sat Feb 9 15:57:51 2008 -0600
small tweak to callback function.
commit 20f9849492ce2d2aff98e34b5753cec88bd8df0e
Author: Michael E Brown
Date: Sat Feb 9 15:57:28 2008 -0600
old style mock package wrapper not used.
commit 3bbcfec19c414bc6c1e01270581cd532f5eaedf6
Author: Michael E Brown
Date: Sat Feb 9 15:56:51 2008 -0600
fix typo
commit 0b1b2e528d31a850b5c577e43f69899d45199218
Author: Michael E Brown
Date: Sat Feb 9 12:42:08 2008 -0600
initial stab at reworking callbacks.
commit 57c43aa7a8e575867eb9df69b3985895a7e7fc13
Author: Michael E Brown
Date: Fri Feb 8 17:04:01 2008 -0600
build for rhel4
commit 7aed29833275276685e5a932801f9d07b034fc29
Author: Michael E Brown
Date: Fri Feb 8 16:36:38 2008 -0600
key off unique devices rather than names.
commit d7960a5a17bdae09eb0e76239fa71d5645055d50
Author: Michael E Brown
Date: Fri Feb 8 16:36:07 2008 -0600
make sure mock package has uniqueInstance.
commit 6f3610d7ca6fc92309975b854c89f487f426524e
Author: Michael E Brown
Date: Fri Feb 8 15:32:11 2008 -0600
pass FtBase self argument to inventory functions
commit bd47d2d35d866236df5616eb0d9b894792e8132a
Author: Michael E Brown
Date: Fri Feb 8 15:31:10 2008 -0600
bootstrap and inventory can take args.
commit 9d19c0d0d1ed84855fca242e4f9d630aef662adc
Author: Michael E Brown
Date: Fri Feb 8 15:30:53 2008 -0600
bootstrap can take args.
commit 23857701b9015bbbb380dd46df37e17e073af139
Author: Michael E Brown
Date: Thu Feb 7 23:47:32 2008 -0600
update unit tests to work with new api.
commit 278b482087b3f15b0fb01bfc870083a5d9e73ad2
Author: Michael E Brown
Date: Thu Feb 7 23:38:35 2008 -0600
add FtBase SystemInventory object so base handles repo and inventory. update clients to use it instead of manually calling generateUpdateSet().
commit a681dd52e022ad26d1e851ae38609c2df980c317
Merge: 86cf96e... 443f43b...
Author: Michael E Brown
Date: Thu Feb 7 15:28:32 2008 -0600
Merge branch 'master' of ssh://michael_e_brown@mock.linuxdev.us.dell.com/var/ftp/pub/Applications/git/firmware-tools
* 'master' of ssh://michael_e_brown@mock.linuxdev.us.dell.com/var/ftp/pub/Applications/git/firmware-tools:
version bump
fixup copyright headers and attributions.
add master search path for plugins. make conf items case-insensitive.
message for when plugin raises DisablePlugin exception.
fix typo in manual plugin disable codepath.
re-add -b switch to be alias for --bootstrap. was accidentally dropped in ft2
version bump
remove deprecated dprint calls.
disable futurewarnings to silence rhel4 warning in peak.decoratorutils. bump version.
remove dprints() as that function went away.
remove dead/unused function.
add log message for running inventory/bootstrap module functions.
ensure we always release from a clean git tree.
bump version
commit 86cf96e5a49379c58f9c40148032edc0ef7e36ed
Author: Michael E Brown
Date: Thu Feb 7 15:28:27 2008 -0600
remove bad copyright header
commit 443f43b9ab98ffc15ba2570045e31080cad85d4a
Merge: f73ebe9... 46e76b1...
Author: Michael E Brown
Date: Sat Feb 2 16:37:37 2008 -0600
Merge branch 'master' of /var/ftp/pub/Applications/git/firmware-tools
* 'master' of /var/ftp/pub/Applications/git/firmware-tools:
add master search path for plugins. make conf items case-insensitive.
message for when plugin raises DisablePlugin exception.
fix typo in manual plugin disable codepath.
remove dprints() as that function went away.
commit f73ebe933b91102c578fc55b8f50a2121beb923f
Author: Michael E Brown
Date: Sat Feb 2 16:25:35 2008 -0600
version bump
commit 7e22c616bfd22efe7b2deab22f6d9c0146f17ccd
Author: Michael E Brown
Date: Sat Feb 2 16:25:16 2008 -0600
fixup copyright headers and attributions.
commit 46e76b1cfba0d9d745b09e04bbbb6b0591c7816b
Author: Michael E Brown
Date: Sat Feb 2 00:02:08 2008 -0600
add master search path for plugins. make conf items case-insensitive.
commit bb56f7076c0a0b0f8fe7267f4a09af0a98858deb
Author: Michael E Brown
Date: Fri Feb 1 20:50:11 2008 -0600
message for when plugin raises DisablePlugin exception.
commit 3f78ba3761de0fd586950b93889211fa2201b377
Author: Michael E Brown
Date: Fri Feb 1 20:32:19 2008 -0600
fix typo in manual plugin disable codepath.
commit f85267448519fd739652565df459d9097ec324b8
Merge: 005d550... 41c5d25...
Author: Michael E Brown
Date: Wed Jan 30 22:04:04 2008 -0600
Merge branch 'master' of ssh://mock/var/ftp/pub/Applications/git/firmware-tools
* 'master' of ssh://mock/var/ftp/pub/Applications/git/firmware-tools:
re-add -b switch to be alias for --bootstrap. was accidentally dropped in ft2
commit 41c5d25aa4dbc3efea3217bbf1969f7c84607e8c
Author: Michael E Brown
Date: Wed Jan 30 13:15:58 2008 -0600
re-add -b switch to be alias for --bootstrap. was accidentally dropped in ft2
commit 005d5506146330d609d48377bfc3b16fb64c3af4
Merge: 83f13bc... ed9d9c1...
Author: Michael E Brown
Date: Tue Jan 29 22:50:56 2008 -0600
Merge branch 'master' of ssh://mock/var/ftp/pub/Applications/git/firmware-tools
* 'master' of ssh://mock/var/ftp/pub/Applications/git/firmware-tools:
version bump
remove deprecated dprint calls.
disable futurewarnings to silence rhel4 warning in peak.decoratorutils. bump version.
remove dead/unused function.
add log message for running inventory/bootstrap module functions.
ensure we always release from a clean git tree.
bump version
add ability to disable plugin from within plugin code.
fixup requires.
update release script.
commit ed9d9c1b47a4632edebf64b4bb9e5f636c2bc1ff
Author: Michael E Brown
Date: Tue Jan 29 21:03:00 2008 -0600
version bump
commit 8e3984c90a6bb82dfdef2811f17f440e7fdc7bfe
Author: Michael E Brown
Date: Tue Jan 29 21:02:35 2008 -0600
remove deprecated dprint calls.
commit b04a984aa7ea7f2f913bfb60232a7ba947639f0a
Author: Michael E Brown
Date: Tue Jan 29 09:48:52 2008 -0600
disable futurewarnings to silence rhel4 warning in peak.decoratorutils. bump version.
commit 83f13bc1f45c71f3c1225c9197d17303557c4553
Author: Michael E Brown
Date: Tue Jan 29 08:56:19 2008 -0600
remove dprints() as that function went away.
commit abdaf720906be4686598611d3204a782df5908b3
Author: Michael E Brown
Date: Mon Jan 28 16:38:16 2008 -0600
remove dead/unused function.
commit 687c41637291c96d7da798e74b629db2679e28bd
Author: Michael E Brown
Date: Mon Jan 28 16:37:55 2008 -0600
add log message for running inventory/bootstrap module functions.
commit b9e431dbdc34d4c123ec773bdae1ff98603b9b38
Author: Michael E Brown
Date: Mon Jan 28 14:43:07 2008 -0600
ensure we always release from a clean git tree.
commit fa587d6fa19b241df2bfc42c4fd5d6cbf7291bf7
Author: Michael E Brown
Date: Mon Jan 28 14:42:26 2008 -0600
bump version
commit 9c478e3a4e0d187d09ef7f8fe6d736b07469a3ec
Author: Michael E Brown
Date: Mon Jan 28 14:12:56 2008 -0600
add ability to disable plugin from within plugin code.
commit 44f2cfb2c64051dee19b352e85399bc2b48e613b
Author: Michael E Brown
Date: Mon Jan 28 12:57:58 2008 -0600
fixup requires.
commit 843d7750a47cd40479f254c87e4e0b52726b3416
Author: Michael E Brown
Date: Mon Jan 28 11:22:27 2008 -0600
update release script.
commit ecd829239e870bed42064babbdee023b35629079
Author: Michael E Brown
Date: Sat Jan 26 01:04:08 2008 -0600
add vim directive
commit 2c8d78ff5550e477599ce85c7085acb5d1aea761
Author: Michael E Brown
Date: Sat Jan 26 01:03:57 2008 -0600
sync Makefile-std with other projs: fix LIBDIR, add LIBEXECDIR substs.
commit 5199ef3a23980125c0a7b6c1602f32d395833f0d
Author: Michael E Brown
Date: Sat Jan 26 01:03:32 2008 -0600
make compat with older python generators.
commit 812e36ecaa3b2d725332dce179a88753cb81bf38
Author: Michael E Brown
Date: Fri Jan 25 14:14:55 2008 -0600
add hook to specify configs for autobuilder
commit 12bb77ae16ae97ff4059dd79ccdb92e7f3f94a78
Author: Michael E Brown
Date: Tue Jan 22 01:56:12 2008 -0600
remove useless comment lines in spec template.
commit a5549b6b613c6075ff916af3fee36329d079e7d0
Author: Michael E Brown
Date: Tue Jan 22 01:55:52 2008 -0600
always at least report summary for io error.
commit d3c62f55995ac3abcfbda384fccc4f8da37dad23
Author: Michael E Brown
Date: Tue Jan 22 01:55:28 2008 -0600
fixup usage to display command basename instead of hardcode. late-bind --help argument.
commit 4b3b1050644221585105fd8551d00785e59e18eb
Author: Michael E Brown
Date: Sun Jan 20 18:26:19 2008 -0600
only ignore import errors.
commit 292a645fd0c7a442235e23a9b75520d8cf9ffe9d
Author: Michael E Brown
Date: Sun Jan 20 18:26:08 2008 -0600
rename command plugins so you can tell by the module name it is a command.
commit c4e7496a4b150a054899997bd75f9a0d9e17aba8
Author: Michael E Brown
Date: Fri Jan 18 00:40:04 2008 -0600
add way for conduit to extract config information.
commit 076d425462b8340ca78486bc84d2c7e581c651ec
Author: Michael E Brown
Date: Fri Jan 18 00:36:08 2008 -0600
add a friendly error message for missing plugin error.
commit e08e6ae6256d484937b7a41ff6c75548e2651fc8
Author: Michael E Brown
Date: Thu Jan 17 23:29:10 2008 -0600
remove generated file.
commit f62bf3d3e5062e3e3a8a46d4b966617446d68ce1
Merge: ecce6a7... e56dead...
Author: Michael E Brown
Date: Thu Jan 17 23:22:20 2008 -0600
Merge branch 'master' of http://linux.dell.com/git/firmware-tools
* 'master' of http://linux.dell.com/git/firmware-tools:
more buildrequires for mock.
update buildrequires so it will build in mock.
no longer need VERSION in confdefs since we use no-define now.
symlinks instead of hardlinks for compat
back-compat symlink for /usr/bin/update_firmware
we have long filenames (>99chars), so specify ustar tar format.
remove generated file
we need to pull modules from source tree since they are not copied to build tree.
use __file__ to guarantee that paths will be relative to __init__.py
dont dist debian files.
add back compat symlink for /usr/bin/update_firmware because we still have bios updates floating around with this.
stdout instead of stderr for listplugins.
stdout instead of stderr for inventory.
print to stdout instead of stderr for bootstrap
fixup path in config file
load plugin config files.
rationalize configdir variables.
commit ecce6a743ec5375d06805e65c4206c936804ce97
Author: Michael E Brown
Date: Wed Jan 16 09:55:13 2008 -0600
make type allocation and slot/conduit assignment more dynamic.
commit 970153e58519ff1fe74062ac905224e108092c43
Author: Michael E Brown
Date: Wed Jan 16 09:54:58 2008 -0600
deprecate TYPE_INTERACTIVE plugin for now.
commit e56deadeed98f9efbf5b0c62c07139c26058e8c0
Author: Michael E Brown
Date: Wed Jan 16 00:49:58 2008 -0600
more buildrequires for mock.
commit fc0cb60a546cc370133e9761b8911432dc845936
Author: Michael E Brown
Date: Wed Jan 16 00:46:13 2008 -0600
update buildrequires so it will build in mock.
commit 058bfec807253709f89b856b0d01c1d6f0a92058
Author: Michael E Brown
Date: Wed Jan 16 00:34:16 2008 -0600
no longer need VERSION in confdefs since we use no-define now.
commit 956681e12334e720fef4a18ac97fb36a30fd46fb
Author: Michael E Brown
Date: Wed Jan 16 00:25:25 2008 -0600
symlinks instead of hardlinks for compat
commit b5a5dca9d90325f36621026b80aef5cf3ef975ad
Author: Michael E Brown
Date: Wed Jan 16 00:17:21 2008 -0600
back-compat symlink for /usr/bin/update_firmware
commit 7b83b265e048760398a3570d152077a7cec119f5
Author: Michael E Brown
Date: Wed Jan 16 00:16:56 2008 -0600
we have long filenames (>99chars), so specify ustar tar format.
commit b3bce39eacb9b959e05a08c8ce0e45f587035c29
Author: Michael E Brown
Date: Wed Jan 16 00:09:00 2008 -0600
remove generated file
commit 50fac2cf0b25377d8a86068e171563333da2014d
Author: Michael E Brown
Date: Tue Jan 15 18:46:46 2008 -0600
we need to pull modules from source tree since they are not copied to build tree.
commit aa69744d3acbe231d4c4afcfb1ea763a77796220
Author: Michael E Brown
Date: Tue Jan 15 18:46:22 2008 -0600
use __file__ to guarantee that paths will be relative to __init__.py
commit b39aca6a7655367aa1670e4ad19ea57c4c73d60f
Author: Michael E Brown
Date: Tue Jan 15 18:46:00 2008 -0600
dont dist debian files.
commit 35942a57b0aab45e539ee2f3349da125bb35724f
Author: Michael E Brown
Date: Tue Jan 15 17:54:20 2008 -0600
add back compat symlink for /usr/bin/update_firmware because we still have bios updates floating around with this.
commit b2944e1c7750bbccd849d00463af8af8bd3c2f42
Author: Michael E Brown
Date: Tue Jan 15 17:12:10 2008 -0600
stdout instead of stderr for listplugins.
commit cbac35e26790b17daeca77430773bce7f1b0834f
Author: Michael E Brown
Date: Tue Jan 15 17:11:35 2008 -0600
stdout instead of stderr for inventory.
commit fb94bc40dff7b264f982c51b8effc4b54f7c44c1
Author: Michael E Brown
Date: Tue Jan 15 17:11:01 2008 -0600
print to stdout instead of stderr for bootstrap
commit d8b748e8a61d3253607d5a10145480980a32f8de
Author: Michael E Brown
Date: Tue Jan 15 16:57:16 2008 -0600
fixup path in config file
commit d9bbf755add9ab86fee4288484a2431349dcbe85
Author: Michael E Brown
Date: Tue Jan 15 16:57:01 2008 -0600
load plugin config files.
commit e4f3e4c664a1ff12efc583c354d35ce328d477a1
Author: Michael E Brown
Date: Tue Jan 15 16:09:42 2008 -0600
rationalize configdir variables.
commit ef7c5462dad15846e827b700df400636756f9243
Author: Michael E Brown
Date: Tue Jan 15 15:14:04 2008 -0600
resolve installation directory issues so it runs both from build tree and installed.
commit db56c5e70de9ebe6ea0131dfff0633d46db7db9a
Author: Michael E Brown
Date: Tue Jan 15 12:02:42 2008 -0600
fixup paths to only set ones we actually use.
commit 4ead20d726de42a5eeb9480e86a6ef8913badf20
Author: Michael E Brown
Date: Tue Jan 15 10:35:26 2008 -0600
bump API version to 2.0 since it is conceptually incompatible.
commit 8876a139c369413340443c46e01e998a7d83af2f
Author: Michael E Brown
Date: Tue Jan 15 10:35:23 2008 -0600
missing import fcntl.
commit 2ef7f865b6fb2e31c5f3e14422d86c4959f2a087
Author: Michael E Brown
Date: Tue Jan 15 10:20:33 2008 -0600
add help for --update cmdline.
commit 2a08b75ef74ca00134a42a1c5d8191d0bf934c6c
Author: Michael E Brown
Date: Tue Jan 15 01:19:51 2008 -0600
initial port of gui to new framework.
commit b77bc97cea4a7ca3e5bc44cec53f1667695ad10e
Author: Michael E Brown
Date: Tue Jan 15 01:18:55 2008 -0600
remove unneeded variable that is only referenced once.
commit cc1899bd2af32ade8427ddca5317ea01d185409a
Author: Michael E Brown
Date: Mon Jan 14 23:59:44 2008 -0600
remove obsolete bins.
commit 10109391546016f016d081d140a4b8e0a672c9e3
Author: Michael E Brown
Date: Mon Jan 14 23:59:33 2008 -0600
small whitespace cleanups.
commit 2291223156118c53e8cbb7b66e82a168d39e0386
Author: Michael E Brown
Date: Mon Jan 14 23:59:04 2008 -0600
make backcompat symlinks in rpm
commit f6b787ed9a02f3809dba1de8e13c0cbae7719f60
Author: Michael E Brown
Date: Mon Jan 14 23:53:05 2008 -0600
atexit() not really needed right now so remove as it causes a traceback in unit tests on exit.
commit c4d8ad902991afe37b5e134b0be47d70fa71f75d
Author: Michael E Brown
Date: Mon Jan 14 23:52:25 2008 -0600
add new path for cli stuff to search path.
commit 9b24568a5685df570eff6c6080981e5befa2f537
Author: Michael E Brown
Date: Mon Jan 14 23:29:59 2008 -0600
add option to seamlessly emulate old binaries based on basename.
commit 7795889553a488265043be0ea0f4d69256d172da
Author: Michael E Brown
Date: Mon Jan 14 23:29:14 2008 -0600
get new stuff installed.
commit 10289c5db16b26e9bbed06bec3bd43424d2e6b5b
Author: Michael E Brown
Date: Mon Jan 14 23:23:26 2008 -0600
bump version
commit f61903cfc17624d3d81143b5aed1ab293c12454a
Author: Michael E Brown
Date: Mon Jan 14 23:03:56 2008 -0600
remove constants import.
commit 45e8c66e8fdfcf071cb9f5d54f176eb1289f7b19
Author: Michael E Brown
Date: Mon Jan 14 23:01:56 2008 -0600
remove underused constants module.
commit b68a7f0e4ce2dd4523afa37b14a666f782e19c90
Author: Michael E Brown
Date: Mon Jan 14 22:44:08 2008 -0600
Make the normal commands into plugins to prove the plugin system works ok.
Make a couple of adjustments in the config startup to let plugins add new modes.
commit 89ce5c72900ae85744237ebb01c8dc6e51629f7a
Author: Michael E Brown
Date: Mon Jan 14 20:40:12 2008 -0600
search path for plugins.
commit 756d9bb5f2763c8f4d76ce4fdb0f933d6748ae87
Author: Michael E Brown
Date: Mon Jan 14 20:39:58 2008 -0600
support search path for plugins.
commit 64c92617da8143d7135e77b8a142e9bc98a2303d
Author: Michael E Brown
Date: Mon Jan 14 20:35:35 2008 -0600
remove unused DEFAULT_CONFIG
commit c36b9f16c26474512197064e27a759989ec416fa
Author: Michael E Brown