summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Client
diff options
context:
space:
mode:
authorSol Jerome <sol.jerome@gmail.com>2012-03-24 11:20:07 -0500
committerSol Jerome <sol.jerome@gmail.com>2012-03-24 11:20:07 -0500
commitdab1d03d81c538966d03fb9318a4588a9e803b44 (patch)
treef51e27fa55887e9fb961766805fe43f0da56c5b9 /src/lib/Bcfg2/Client
parent5cd6238df496a3cea178e4596ecd87967cce1ce6 (diff)
downloadbcfg2-dab1d03d81c538966d03fb9318a4588a9e803b44.tar.gz
bcfg2-dab1d03d81c538966d03fb9318a4588a9e803b44.tar.bz2
bcfg2-dab1d03d81c538966d03fb9318a4588a9e803b44.zip
Allow to run directly from a git checkout (#1037)
Signed-off-by: Sol Jerome <sol.jerome@gmail.com>
Diffstat (limited to 'src/lib/Bcfg2/Client')
-rw-r--r--src/lib/Bcfg2/Client/Frame.py454
-rw-r--r--src/lib/Bcfg2/Client/Tools/APK.py61
-rw-r--r--src/lib/Bcfg2/Client/Tools/APT.py271
-rw-r--r--src/lib/Bcfg2/Client/Tools/Action.py107
-rw-r--r--src/lib/Bcfg2/Client/Tools/Blast.py32
-rw-r--r--src/lib/Bcfg2/Client/Tools/Chkconfig.py108
-rw-r--r--src/lib/Bcfg2/Client/Tools/DebInit.py125
-rw-r--r--src/lib/Bcfg2/Client/Tools/Encap.py53
-rw-r--r--src/lib/Bcfg2/Client/Tools/FreeBSDInit.py27
-rw-r--r--src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py46
-rw-r--r--src/lib/Bcfg2/Client/Tools/IPS.py60
-rw-r--r--src/lib/Bcfg2/Client/Tools/MacPorts.py59
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX.py943
-rw-r--r--src/lib/Bcfg2/Client/Tools/Pacman.py83
-rw-r--r--src/lib/Bcfg2/Client/Tools/Portage.py71
-rw-r--r--src/lib/Bcfg2/Client/Tools/RPMng.py1027
-rw-r--r--src/lib/Bcfg2/Client/Tools/RcUpdate.py97
-rw-r--r--src/lib/Bcfg2/Client/Tools/SMF.py137
-rw-r--r--src/lib/Bcfg2/Client/Tools/SYSV.py105
-rw-r--r--src/lib/Bcfg2/Client/Tools/Systemd.py59
-rw-r--r--src/lib/Bcfg2/Client/Tools/Upstart.py93
-rw-r--r--src/lib/Bcfg2/Client/Tools/VCS.py150
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUM24.py422
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUMng.py936
-rw-r--r--src/lib/Bcfg2/Client/Tools/__init__.py353
-rw-r--r--src/lib/Bcfg2/Client/Tools/launchd.py135
-rwxr-xr-xsrc/lib/Bcfg2/Client/Tools/rpmtools.py1114
-rw-r--r--src/lib/Bcfg2/Client/XML.py36
-rw-r--r--src/lib/Bcfg2/Client/__init__.py3
29 files changed, 7167 insertions, 0 deletions
diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py
new file mode 100644
index 000000000..baac70f52
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Frame.py
@@ -0,0 +1,454 @@
+"""
+Frame is the Client Framework that verifies and
+installs entries, and generates statistics.
+"""
+
+import logging
+import sys
+import time
+import Bcfg2.Client.Tools
+
+
+def cmpent(ent1, ent2):
+ """Sort entries."""
+ if ent1.tag != ent2.tag:
+ return cmp(ent1.tag, ent2.tag)
+ else:
+ return cmp(ent1.get('name'), ent2.get('name'))
+
+
+def promptFilter(prompt, entries):
+ """Filter a supplied list based on user input."""
+ ret = []
+ entries.sort(cmpent)
+ for entry in entries[:]:
+ if 'qtext' in entry.attrib:
+ iprompt = entry.get('qtext')
+ else:
+ iprompt = prompt % (entry.tag, entry.get('name'))
+ try:
+ # py3k compatibility
+ try:
+ ans = raw_input(iprompt.encode(sys.stdout.encoding, 'replace'))
+ except NameError:
+ ans = input(iprompt)
+ if ans in ['y', 'Y']:
+ ret.append(entry)
+ except EOFError:
+ # python 2.4.3 on CentOS doesn't like ^C for some reason
+ break
+ except:
+ print("Error while reading input")
+ continue
+ return ret
+
+
+def matches_entry(entryspec, entry):
+ # both are (tag, name)
+ if entryspec == entry:
+ return True
+ else:
+ for i in [0, 1]:
+ if entryspec[i] == entry[i]:
+ continue
+ elif entryspec[i] == '*':
+ continue
+ elif '*' in entryspec[i]:
+ starpt = entryspec[i].index('*')
+ if entry[i].startswith(entryspec[i][:starpt]):
+ continue
+ return False
+ return True
+
+
+def matches_white_list(entry, whitelist):
+ return True in [matches_entry(we, (entry.tag, entry.get('name')))
+ for we in whitelist]
+
+
+def passes_black_list(entry, blacklist):
+ return True not in [matches_entry(be, (entry.tag, entry.get('name')))
+ for be in blacklist]
+
+
+class Frame:
+ """Frame is the container for all Tool objects and state information."""
+ def __init__(self, config, setup, times, drivers, dryrun):
+ self.config = config
+ self.times = times
+ self.dryrun = dryrun
+ self.times['initialization'] = time.time()
+ self.setup = setup
+ self.tools = []
+ self.states = {}
+ self.whitelist = []
+ self.blacklist = []
+ self.removal = []
+ self.logger = logging.getLogger("Bcfg2.Client.Frame")
+ for driver in drivers[:]:
+ if driver not in Bcfg2.Client.Tools.drivers and \
+ isinstance(driver, str):
+ self.logger.error("Tool driver %s is not available" % driver)
+ drivers.remove(driver)
+
+ tclass = {}
+ for tool in drivers:
+ if not isinstance(tool, str):
+ tclass[time.time()] = tool
+ tool_class = "Bcfg2.Client.Tools.%s" % tool
+ try:
+ tclass[tool] = getattr(__import__(tool_class, globals(),
+ locals(), ['*']),
+ tool)
+ except ImportError:
+ continue
+ except:
+ self.logger.error("Tool %s unexpectedly failed to load" % tool,
+ exc_info=1)
+
+ for tool in list(tclass.values()):
+ try:
+ self.tools.append(tool(self.logger, setup, config))
+ except Bcfg2.Client.Tools.toolInstantiationError:
+ continue
+ except:
+ self.logger.error("Failed to instantiate tool %s" % \
+ (tool), exc_info=1)
+
+ for tool in self.tools[:]:
+ for conflict in getattr(tool, 'conflicts', []):
+ [self.tools.remove(item) for item in self.tools \
+ if item.name == conflict]
+
+ self.logger.info("Loaded tool drivers:")
+ self.logger.info([tool.name for tool in self.tools])
+
+ # find entries not handled by any tools
+ problems = [entry for struct in config for \
+ entry in struct if entry not in self.handled]
+
+ if problems:
+ self.logger.error("The following entries are not handled by any tool:")
+ self.logger.error(["%s:%s:%s" % (entry.tag, entry.get('type'), \
+ entry.get('name')) for entry in problems])
+ self.logger.error("")
+ entries = [(entry.tag, entry.get('name'))
+ for struct in config for entry in struct]
+ pkgs = [(entry.get('name'), entry.get('origin'))
+ for struct in config for entry in struct if entry.tag == 'Package']
+ multi = []
+ for entry in entries[:]:
+ if entries.count(entry) > 1:
+ multi.append(entry)
+ entries.remove(entry)
+ if multi:
+ self.logger.debug("The following entries are included multiple times:")
+ self.logger.debug(["%s:%s" % entry for entry in multi])
+ self.logger.debug("")
+ if pkgs:
+ self.logger.debug("The following packages are specified in bcfg2:")
+ self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == None])
+ self.logger.debug("The following packages are prereqs added by Packages:")
+ self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages'])
+
+ def __getattr__(self, name):
+ if name in ['extra', 'handled', 'modified', '__important__']:
+ ret = []
+ for tool in self.tools:
+ ret += getattr(tool, name)
+ return ret
+ elif name in self.__dict__:
+ return self.__dict__[name]
+ raise AttributeError(name)
+
+ def InstallImportant(self):
+ """Install important entries
+
+ We also process the decision mode stuff here because we want to prevent
+ non-whitelisted/blacklisted 'important' entries from being installed
+ prior to determining the decision mode on the client.
+ """
+ # Need to process decision stuff early so that dryrun mode works with it
+ self.whitelist = [entry for entry in self.states \
+ if not self.states[entry]]
+ if not self.setup['file']:
+ if self.setup['decision'] == 'whitelist':
+ dwl = self.setup['decision_list']
+ w_to_rem = [e for e in self.whitelist \
+ if not matches_white_list(e, dwl)]
+ if w_to_rem:
+ self.logger.info("In whitelist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem])
+ self.whitelist = [x for x in self.whitelist \
+ if x not in w_to_rem]
+ elif self.setup['decision'] == 'blacklist':
+ b_to_rem = [e for e in self.whitelist \
+ if not passes_black_list(e, self.setup['decision_list'])]
+ if b_to_rem:
+ self.logger.info("In blacklist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem])
+ self.whitelist = [x for x in self.whitelist if x not in b_to_rem]
+
+ # take care of important entries first
+ if not self.dryrun and not self.setup['bundle']:
+ for cfile in [cfl for cfl in self.config.findall(".//Path") \
+ if cfl.get('name') in self.__important__ and \
+ cfl.get('type') == 'file']:
+ if cfile not in self.whitelist:
+ continue
+ tl = [t for t in self.tools if t.handlesEntry(cfile) \
+ and t.canVerify(cfile)]
+ if tl:
+ if self.setup['interactive'] and not \
+ promptFilter("Install %s: %s? (y/N):", [cfile]):
+ self.whitelist.remove(cfile)
+ continue
+ try:
+ self.states[cfile] = tl[0].InstallPath(cfile)
+ if self.states[cfile]:
+ tl[0].modified.append(cfile)
+ except:
+ self.logger.error("Unexpected tool failure",
+ exc_info=1)
+ cfile.set('qtext', '')
+ if tl[0].VerifyPath(cfile, []):
+ self.whitelist.remove(cfile)
+
+ def Inventory(self):
+ """
+ Verify all entries,
+ find extra entries,
+ and build up workqueues
+
+ """
+ # initialize all states
+ for struct in self.config.getchildren():
+ for entry in struct.getchildren():
+ self.states[entry] = False
+ for tool in self.tools:
+ try:
+ tool.Inventory(self.states)
+ except:
+ self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1)
+
+ def Decide(self):
+ """Set self.whitelist based on user interaction."""
+ prompt = "Install %s: %s? (y/N): "
+ rprompt = "Remove %s: %s? (y/N): "
+ if self.setup['remove']:
+ if self.setup['remove'] == 'all':
+ self.removal = self.extra
+ elif self.setup['remove'] in ['services', 'Services']:
+ self.removal = [entry for entry in self.extra
+ if entry.tag == 'Service']
+ elif self.setup['remove'] in ['packages', 'Packages']:
+ self.removal = [entry for entry in self.extra
+ if entry.tag == 'Package']
+
+ candidates = [entry for entry in self.states
+ if not self.states[entry]]
+
+ if self.dryrun:
+ if self.whitelist:
+ self.logger.info("In dryrun mode: suppressing entry installation for:")
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
+ for entry in self.whitelist])
+ self.whitelist = []
+ if self.removal:
+ self.logger.info("In dryrun mode: suppressing entry removal for:")
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
+ for entry in self.removal])
+ self.removal = []
+ return
+ # Here is where most of the work goes
+ # first perform bundle filtering
+ if self.setup['bundle']:
+ all_bundle_names = [b.get('name') for b in
+ self.config.findall('./Bundle')]
+ # warn if non-existent bundle given
+ for bundle in self.setup['bundle']:
+ if bundle not in all_bundle_names:
+ self.logger.info("Warning: Bundle %s not found" % bundle)
+ bundles = [b for b in self.config.findall('./Bundle')
+ if b.get('name') in self.setup['bundle']]
+ self.whitelist = [e for e in self.whitelist
+ if True in [e in b for b in bundles]]
+ elif self.setup['indep']:
+ bundles = [nb for nb in self.config.getchildren()
+ if nb.tag != 'Bundle']
+ else:
+ bundles = self.config.getchildren()
+
+ # first process prereq actions
+ for bundle in bundles[:]:
+ if bundle.tag != 'Bundle':
+ continue
+ bmodified = len([item for item in bundle if item in self.whitelist])
+ actions = [a for a in bundle.findall('./Action')
+ if (a.get('timing') != 'post' and
+ (bmodified or a.get('when') == 'always'))]
+ # now we process all "always actions"
+ if self.setup['interactive']:
+ promptFilter(prompt, actions)
+ self.DispatchInstallCalls(actions)
+
+ # need to test to fail entries in whitelist
+ if False in [self.states[a] for a in actions]:
+ # then display bundles forced off with entries
+ self.logger.info("Bundle %s failed prerequisite action" %
+ (bundle.get('name')))
+ bundles.remove(bundle)
+ b_to_remv = [ent for ent in self.whitelist if ent in bundle]
+ if b_to_remv:
+ self.logger.info("Not installing entries from Bundle %s" %
+ (bundle.get('name')))
+ self.logger.info(["%s:%s" % (e.tag, e.get('name'))
+ for e in b_to_remv])
+ [self.whitelist.remove(ent) for ent in b_to_remv]
+
+ if self.setup['interactive']:
+ self.whitelist = promptFilter(prompt, self.whitelist)
+ self.removal = promptFilter(rprompt, self.removal)
+
+ for entry in candidates:
+ if entry not in self.whitelist:
+ self.blacklist.append(entry)
+
+ def DispatchInstallCalls(self, entries):
+ """Dispatch install calls to underlying tools."""
+ for tool in self.tools:
+ handled = [entry for entry in entries if tool.canInstall(entry)]
+ if not handled:
+ continue
+ try:
+ tool.Install(handled, self.states)
+ except:
+ self.logger.error("%s.Install() call failed:" % tool.name, exc_info=1)
+
+ def Install(self):
+ """Install all entries."""
+ self.DispatchInstallCalls(self.whitelist)
+ mods = self.modified
+ mbundles = [struct for struct in self.config.findall('Bundle') if \
+ [mod for mod in mods if mod in struct]]
+
+ if self.modified:
+ # Handle Bundle interdeps
+ if mbundles:
+ self.logger.info("The Following Bundles have been modified:")
+ self.logger.info([mbun.get('name') for mbun in mbundles])
+ self.logger.info("")
+ tbm = [(t, b) for t in self.tools for b in mbundles]
+ for tool, bundle in tbm:
+ try:
+ tool.Inventory(self.states, [bundle])
+ except:
+ self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1)
+ clobbered = [entry for bundle in mbundles for entry in bundle \
+ if not self.states[entry] and entry not in self.blacklist]
+ if clobbered:
+ self.logger.debug("Found clobbered entries:")
+ self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) \
+ for entry in clobbered])
+ if not self.setup['interactive']:
+ self.DispatchInstallCalls(clobbered)
+
+ for bundle in self.config.findall('.//Bundle'):
+ if self.setup['bundle'] and \
+ bundle.get('name') not in self.setup['bundle']:
+ # prune out unspecified bundles when running with -b
+ continue
+ for tool in self.tools:
+ try:
+ if bundle in mbundles:
+ tool.BundleUpdated(bundle, self.states)
+ else:
+ tool.BundleNotUpdated(bundle, self.states)
+ except:
+ self.logger.error("%s.BundleNotUpdated() call failed:" % \
+ (tool.name), exc_info=1)
+
+ def Remove(self):
+ """Remove extra entries."""
+ for tool in self.tools:
+ extras = [entry for entry in self.removal if tool.handlesEntry(entry)]
+ if extras:
+ try:
+ tool.Remove(extras)
+ except:
+ self.logger.error("%s.Remove() failed" % tool.name, exc_info=1)
+
+ def CondDisplayState(self, phase):
+ """Conditionally print tracing information."""
+ self.logger.info('\nPhase: %s' % phase)
+ self.logger.info('Correct entries:\t%d' % list(self.states.values()).count(True))
+ self.logger.info('Incorrect entries:\t%d' % list(self.states.values()).count(False))
+ if phase == 'final' and list(self.states.values()).count(False):
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for \
+ entry in self.states if not self.states[entry]])
+ self.logger.info('Total managed entries:\t%d' % len(list(self.states.values())))
+ self.logger.info('Unmanaged entries:\t%d' % len(self.extra))
+ if phase == 'final' and self.setup['extra']:
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) \
+ for entry in self.extra])
+
+ self.logger.info("")
+
+ if ((list(self.states.values()).count(False) == 0) and not self.extra):
+ self.logger.info('All entries correct.')
+
+ def ReInventory(self):
+ """Recheck everything."""
+ if not self.dryrun and self.setup['kevlar']:
+ self.logger.info("Rechecking system inventory")
+ self.Inventory()
+
+ def Execute(self):
+ """Run all methods."""
+ self.Inventory()
+ self.times['inventory'] = time.time()
+ self.CondDisplayState('initial')
+ self.InstallImportant()
+ self.Decide()
+ self.Install()
+ self.times['install'] = time.time()
+ self.Remove()
+ self.times['remove'] = time.time()
+ if self.modified:
+ self.ReInventory()
+ self.times['reinventory'] = time.time()
+ self.times['finished'] = time.time()
+ self.CondDisplayState('final')
+
+ def GenerateStats(self):
+ """Generate XML summary of execution statistics."""
+ feedback = Bcfg2.Client.XML.Element("upload-statistics")
+ stats = Bcfg2.Client.XML.SubElement(feedback,
+ 'Statistics',
+ total=str(len(self.states)),
+ client_version=__revision__,
+ version='2.0',
+ revision=self.config.get('revision', '-1'))
+ good = len([key for key, val in list(self.states.items()) if val])
+ stats.set('good', str(good))
+ if len([key for key, val in list(self.states.items()) if not val]) == 0:
+ stats.set('state', 'clean')
+ else:
+ stats.set('state', 'dirty')
+
+ # List bad elements of the configuration
+ for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), \
+ ([entry for entry in self.states if not \
+ self.states[entry]], "Bad")]:
+ container = Bcfg2.Client.XML.SubElement(stats, ename)
+ for item in data:
+ item.set('qtext', '')
+ container.append(item)
+ item.text = None
+
+ timeinfo = Bcfg2.Client.XML.Element("OpStamps")
+ feedback.append(stats)
+ for (event, timestamp) in list(self.times.items()):
+ timeinfo.set(event, str(timestamp))
+ stats.append(timeinfo)
+ return feedback
diff --git a/src/lib/Bcfg2/Client/Tools/APK.py b/src/lib/Bcfg2/Client/Tools/APK.py
new file mode 100644
index 000000000..aaaf2472f
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/APK.py
@@ -0,0 +1,61 @@
+"""This provides Bcfg2 support for Alpine Linux APK packages."""
+
+import Bcfg2.Client.Tools
+
+
+class APK(Bcfg2.Client.Tools.PkgTool):
+ """Support for Apk packages."""
+ name = 'APK'
+ __execs__ = ["/sbin/apk"]
+ __handles__ = [('Package', 'apk')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtype = 'apk'
+ pkgtool = ("/sbin/apk add %s", ("%s", ["name"]))
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ self.installed = {}
+ self.RefreshPackages()
+
+ def RefreshPackages(self):
+ """Refresh memory hashes of packages."""
+ names = self.cmd.run("/sbin/apk info")[1]
+ nameversions = self.cmd.run("/sbin/apk info -v")[1]
+ for pkg in zip(names, nameversions):
+ pkgname = pkg[0]
+ version = pkg[1][len(pkgname) + 1:]
+ self.logger.debug(" pkgname: %s\n version: %s" %
+ (pkgname, version))
+ self.installed[pkgname] = version
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify Package status for entry."""
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+
+ if entry.attrib['name'] in self.installed:
+ if entry.attrib['version'] in ['auto', self.installed[entry.attrib['name']]]:
+ #if not self.setup['quick'] and \
+ # entry.get('verify', 'true') == 'true':
+ #FIXME: Does APK have any sort of verification mechanism?
+ return True
+ else:
+ self.loggger.info(" pkg %s at version %s, not %s" %
+ (entry.attrib['name'],
+ self.installed[entry.attrib['name']],
+ entry.attrib['version']))
+ entry.set('current_version', self.installed[entry.get('name')])
+ return False
+ entry.set('current_exists', 'false')
+ return False
+
+ def RemovePackages(self, packages):
+ """Remove extra packages."""
+ names = [pkg.get('name') for pkg in packages]
+ self.logger.info("Removing packages: %s" % " ".join(names))
+ self.cmd.run("/sbin/apk del %s" % \
+ " ".join(names))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/src/lib/Bcfg2/Client/Tools/APT.py b/src/lib/Bcfg2/Client/Tools/APT.py
new file mode 100644
index 000000000..6b839ffbc
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/APT.py
@@ -0,0 +1,271 @@
+"""This is the Bcfg2 support for apt-get."""
+
+# suppress apt API warnings
+import warnings
+warnings.filterwarnings("ignore", "apt API not stable yet",
+ FutureWarning)
+import apt.cache
+import os
+
+import Bcfg2.Client.Tools
+import Bcfg2.Options
+
+# Options for tool locations
+opts = {'install_path': Bcfg2.Options.CLIENT_APT_TOOLS_INSTALL_PATH,
+ 'var_path': Bcfg2.Options.CLIENT_APT_TOOLS_VAR_PATH,
+ 'etc_path': Bcfg2.Options.CLIENT_SYSTEM_ETC_PATH}
+setup = Bcfg2.Options.OptionParser(opts)
+setup.parse([])
+install_path = setup['install_path']
+var_path = setup['var_path']
+etc_path = setup['etc_path']
+DEBSUMS = '%s/bin/debsums' % install_path
+APTGET = '%s/bin/apt-get' % install_path
+DPKG = '%s/bin/dpkg' % install_path
+
+class APT(Bcfg2.Client.Tools.Tool):
+ """The Debian toolset implements package and service operations and inherits
+ the rest from Toolset.Toolset.
+
+ """
+ name = 'APT'
+ __execs__ = [DEBSUMS, APTGET, DPKG]
+ __handles__ = [('Package', 'deb'), ('Path', 'ignore')]
+ __req__ = {'Package': ['name', 'version'], 'Path': ['type']}
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ path_entries = os.environ['PATH'].split(':')
+ for reqdir in ['/sbin', '/usr/sbin']:
+ if reqdir not in path_entries:
+ os.environ['PATH'] = os.environ['PATH'] + ':' + reqdir
+ self.pkgcmd = '%s ' % APTGET + \
+ '-o DPkg::Options::=--force-overwrite ' + \
+ '-o DPkg::Options::=--force-confold ' + \
+ '-o DPkg::Options::=--force-confmiss ' + \
+ '--reinstall ' + \
+ '--force-yes '
+ if not self.setup['debug']:
+ self.pkgcmd += '-q=2 '
+ self.pkgcmd += '-y install %s'
+ self.ignores = [entry.get('name') for struct in config \
+ for entry in struct \
+ if entry.tag == 'Path' and \
+ entry.get('type') == 'ignore']
+ self.__important__ = self.__important__ + \
+ ["%s/cache/debconf/config.dat" % var_path,
+ "%s/cache/debconf/templates.dat" % var_path,
+ '/etc/passwd', '/etc/group',
+ '%s/apt/apt.conf' % etc_path,
+ '%s/dpkg/dpkg.cfg' % etc_path] + \
+ [entry.get('name') for struct in config for entry in struct \
+ if entry.tag == 'Path' and \
+ entry.get('name').startswith('%s/apt/sources.list' % etc_path)]
+ self.nonexistent = [entry.get('name') for struct in config for entry in struct \
+ if entry.tag == 'Path' and entry.get('type') == 'nonexistent']
+ os.environ["DEBIAN_FRONTEND"] = 'noninteractive'
+ self.actions = {}
+ if self.setup['kevlar'] and not self.setup['dryrun']:
+ self.cmd.run("%s --force-confold --configure --pending" % DPKG)
+ self.cmd.run("%s clean" % APTGET)
+ try:
+ self.pkg_cache = apt.cache.Cache()
+ except SystemError:
+ e = sys.exc_info()[1]
+ self.logger.info("Failed to initialize APT cache: %s" % e)
+ raise Bcfg2.Client.Tools.toolInstantiationError
+ self.pkg_cache.update()
+ self.pkg_cache = apt.cache.Cache()
+ if 'req_reinstall_pkgs' in dir(self.pkg_cache):
+ self._newapi = True
+ else:
+ self._newapi = False
+
+ def FindExtra(self):
+ """Find extra packages."""
+ packages = [entry.get('name') for entry in self.getSupportedEntries()]
+ if self._newapi:
+ extras = [(p.name, p.installed.version) for p in self.pkg_cache
+ if p.is_installed and p.name not in packages]
+ else:
+ extras = [(p.name, p.installedVersion) for p in self.pkg_cache
+ if p.isInstalled and p.name not in packages]
+ return [Bcfg2.Client.XML.Element('Package', name=name, \
+ type='deb', version=version) \
+ for (name, version) in extras]
+
+ def VerifyDebsums(self, entry, modlist):
+ output = self.cmd.run("%s -as %s" % (DEBSUMS, entry.get('name')))[1]
+ if len(output) == 1 and "no md5sums for" in output[0]:
+ self.logger.info("Package %s has no md5sums. Cannot verify" % \
+ entry.get('name'))
+ entry.set('qtext', "Reinstall Package %s-%s to setup md5sums? (y/N) " \
+ % (entry.get('name'), entry.get('version')))
+ return False
+ files = []
+ for item in output:
+ if "checksum mismatch" in item:
+ files.append(item.split()[-1])
+ elif "changed file" in item:
+ files.append(item.split()[3])
+ elif "can't open" in item:
+ if item.split()[5] not in self.nonexistent:
+ files.append(item.split()[5])
+ elif "missing file" in item and \
+ item.split()[3] in self.nonexistent:
+ # these files should not exist
+ continue
+ elif "is not installed" in item or "missing file" in item:
+ self.logger.error("Package %s is not fully installed" \
+ % entry.get('name'))
+ else:
+ self.logger.error("Got Unsupported pattern %s from debsums" \
+ % item)
+ files.append(item)
+ files = list(set(files) - set(self.ignores))
+ # We check if there is file in the checksum to do
+ if files:
+ # if files are found there we try to be sure our modlist is sane
+ # with erroneous symlinks
+ modlist = [os.path.realpath(filename) for filename in modlist]
+ bad = [filename for filename in files if filename not in modlist]
+ if bad:
+ self.logger.debug("It is suggested that you either manage these "
+ "files, revert the changes, or ignore false "
+ "failures:")
+ self.logger.info("Package %s failed validation. Bad files are:" % \
+ entry.get('name'))
+ self.logger.info(bad)
+ entry.set('qtext',
+ "Reinstall Package %s-%s to fix failing files? (y/N) " % \
+ (entry.get('name'), entry.get('version')))
+ return False
+ return True
+
+ def VerifyPackage(self, entry, modlist, checksums=True):
+ """Verify package for entry."""
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+ pkgname = entry.get('name')
+ if self.pkg_cache.has_key(pkgname):
+ if self._newapi:
+ is_installed = self.pkg_cache[pkgname].is_installed
+ else:
+ is_installed = self.pkg_cache[pkgname].isInstalled
+ if not self.pkg_cache.has_key(pkgname) or not is_installed:
+ self.logger.info("Package %s not installed" % (entry.get('name')))
+ entry.set('current_exists', 'false')
+ return False
+
+ pkg = self.pkg_cache[pkgname]
+ if self._newapi:
+ installed_version = pkg.installed.version
+ candidate_version = pkg.candidate.version
+ else:
+ installed_version = pkg.installedVersion
+ candidate_version = pkg.candidateVersion
+ if entry.get('version') == 'auto':
+ if self._newapi:
+ is_upgradable = self.pkg_cache._depcache.is_upgradable(pkg._pkg)
+ else:
+ is_upgradable = self.pkg_cache._depcache.IsUpgradable(pkg._pkg)
+ if is_upgradable:
+ desiredVersion = candidate_version
+ else:
+ desiredVersion = installed_version
+ elif entry.get('version') == 'any':
+ desiredVersion = installed_version
+ else:
+ desiredVersion = entry.get('version')
+ if desiredVersion != installed_version:
+ entry.set('current_version', installed_version)
+ entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % \
+ (entry.get('name'), entry.get('current_version'),
+ desiredVersion))
+ return False
+ else:
+ # version matches
+ if not self.setup['quick'] and entry.get('verify', 'true') == 'true' \
+ and checksums:
+ pkgsums = self.VerifyDebsums(entry, modlist)
+ return pkgsums
+ return True
+
+ def Remove(self, packages):
+ """Deal with extra configuration detected."""
+ pkgnames = " ".join([pkg.get('name') for pkg in packages])
+ self.pkg_cache = apt.cache.Cache()
+ if len(packages) > 0:
+ self.logger.info('Removing packages:')
+ self.logger.info(pkgnames)
+ for pkg in pkgnames.split(" "):
+ try:
+ if self._newapi:
+ self.pkg_cache[pkg].mark_delete(purge=True)
+ else:
+ self.pkg_cache[pkg].markDelete(purge=True)
+ except:
+ if self._newapi:
+ self.pkg_cache[pkg].mark_delete()
+ else:
+ self.pkg_cache[pkg].markDelete()
+ try:
+ self.pkg_cache.commit()
+ except SystemExit:
+ # thank you python-apt 0.6
+ pass
+ self.pkg_cache = apt.cache.Cache()
+ self.modified += packages
+ self.extra = self.FindExtra()
+
+ def Install(self, packages, states):
+ # it looks like you can't install arbitrary versions of software
+ # out of the pkg cache, we will still need to call apt-get
+ ipkgs = []
+ bad_pkgs = []
+ for pkg in packages:
+ if not self.pkg_cache.has_key(pkg.get('name')):
+ self.logger.error("APT has no information about package %s" % (pkg.get('name')))
+ continue
+ if pkg.get('version') in ['auto', 'any']:
+ if self._newapi:
+ ipkgs.append("%s=%s" % (pkg.get('name'),
+ self.pkg_cache[pkg.get('name')].candidate.version))
+ else:
+ ipkgs.append("%s=%s" % (pkg.get('name'),
+ self.pkg_cache[pkg.get('name')].candidateVersion))
+ continue
+ if self._newapi:
+ avail_vers = [x.ver_str for x in \
+ self.pkg_cache[pkg.get('name')]._pkg.version_list]
+ else:
+ avail_vers = [x.VerStr for x in \
+ self.pkg_cache[pkg.get('name')]._pkg.VersionList]
+ if pkg.get('version') in avail_vers:
+ ipkgs.append("%s=%s" % (pkg.get('name'), pkg.get('version')))
+ continue
+ else:
+ self.logger.error("Package %s: desired version %s not in %s" \
+ % (pkg.get('name'), pkg.get('version'),
+ avail_vers))
+ bad_pkgs.append(pkg.get('name'))
+ if bad_pkgs:
+ self.logger.error("Cannot find correct versions of packages:")
+ self.logger.error(bad_pkgs)
+ if not ipkgs:
+ return
+ rc = self.cmd.run(self.pkgcmd % (" ".join(ipkgs)))[0]
+ if rc:
+ self.logger.error("APT command failed")
+ self.pkg_cache = apt.cache.Cache()
+ self.extra = self.FindExtra()
+ for package in packages:
+ states[package] = self.VerifyPackage(package, [], checksums=False)
+ if states[package]:
+ self.modified.append(package)
+
+ def VerifyPath(self, entry, _):
+ """Do nothing here since we only verify Path type=ignore."""
+ return True
diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py
new file mode 100644
index 000000000..dc49347e9
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Action.py
@@ -0,0 +1,107 @@
+"""Action driver"""
+
+import Bcfg2.Client.Tools
+from Bcfg2.Client.Frame import matches_white_list, passes_black_list
+
+"""
+<Action timing='pre|post|both'
+ name='name'
+ command='cmd text'
+ when='always|modified'
+ status='ignore|check'/>
+<PostInstall name='foo'/>
+ => <Action timing='post'
+ when='modified'
+ name='n'
+ command='foo'
+ status='ignore'/>
+"""
+
+
+class Action(Bcfg2.Client.Tools.Tool):
+ """Implement Actions"""
+ name = 'Action'
+ __handles__ = [('PostInstall', None), ('Action', None)]
+ __req__ = {'PostInstall': ['name'],
+ 'Action': ['name', 'timing', 'when', 'command', 'status']}
+
+ def _action_allowed(self, action):
+ if self.setup['decision'] == 'whitelist' and \
+ not matches_white_list(action, self.setup['decision_list']):
+ self.logger.info("In whitelist mode: suppressing Action:" + \
+ action.get('name'))
+ return False
+ if self.setup['decision'] == 'blacklist' and \
+ not passes_black_list(action, self.setup['decision_list']):
+ self.logger.info("In blacklist mode: suppressing Action:" + \
+ action.get('name'))
+ return False
+ return True
+
+ def RunAction(self, entry):
+ """This method handles command execution and status return."""
+ if not self.setup['dryrun']:
+ if self.setup['interactive']:
+ prompt = ('Run Action %s, %s: (y/N): ' %
+ (entry.get('name'), entry.get('command')))
+ # py3k compatibility
+ try:
+ ans = raw_input(prompt)
+ except NameError:
+ ans = input(prompt)
+ if ans not in ['y', 'Y']:
+ return False
+ if self.setup['servicemode'] == 'build':
+ if entry.get('build', 'true') == 'false':
+ self.logger.debug("Action: Deferring execution of %s due to build mode" % (entry.get('command')))
+ return False
+ self.logger.debug("Running Action %s" % (entry.get('name')))
+ rc = self.cmd.run(entry.get('command'))[0]
+ self.logger.debug("Action: %s got rc %s" % (entry.get('command'), rc))
+ entry.set('rc', str(rc))
+ if entry.get('status', 'check') == 'ignore':
+ return True
+ else:
+ return rc == 0
+ else:
+ self.logger.debug("In dryrun mode: not running action:\n %s" %
+ (entry.get('name')))
+ return False
+
+ def VerifyAction(self, dummy, _):
+ """Actions always verify true."""
+ return True
+
+ def VerifyPostInstall(self, dummy, _):
+ """Actions always verify true."""
+ return True
+
+ def InstallAction(self, entry):
+ """Run actions as pre-checks for bundle installation."""
+ if entry.get('timing') != 'post':
+ return self.RunAction(entry)
+ return True
+
+ def InstallPostInstall(self, entry):
+ return self.InstallAction(self, entry)
+
+ def BundleUpdated(self, bundle, states):
+ """Run postinstalls when bundles have been updated."""
+ for postinst in bundle.findall("PostInstall"):
+ if not self._action_allowed(postinst):
+ continue
+ self.cmd.run(postinst.get('name'))
+ for action in bundle.findall("Action"):
+ if action.get('timing') in ['post', 'both']:
+ if not self._action_allowed(action):
+ continue
+ states[action] = self.RunAction(action)
+
+ def BundleNotUpdated(self, bundle, states):
+ """Run Actions when bundles have not been updated."""
+ for action in bundle.findall("Action"):
+ if action.get('timing') in ['post', 'both'] and \
+ action.get('when') != 'modified':
+ if not self._action_allowed(action):
+ continue
+ states[action] = self.RunAction(action)
diff --git a/src/lib/Bcfg2/Client/Tools/Blast.py b/src/lib/Bcfg2/Client/Tools/Blast.py
new file mode 100644
index 000000000..5d5e74ab2
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Blast.py
@@ -0,0 +1,32 @@
+"""This provides Bcfg2 support for Blastwave."""
+
+import tempfile
+import Bcfg2.Client.Tools.SYSV
+
+
+class Blast(Bcfg2.Client.Tools.SYSV.SYSV):
+ """Support for Blastwave packages."""
+ pkgtype = 'blast'
+ pkgtool = ("/opt/csw/bin/pkg-get install %s", ("%s", ["bname"]))
+ name = 'Blast'
+ __execs__ = ['/opt/csw/bin/pkg-get', "/usr/bin/pkginfo"]
+ __handles__ = [('Package', 'blast')]
+ __ireq__ = {'Package': ['name', 'version', 'bname']}
+
+ def __init__(self, logger, setup, config):
+ # dont use the sysv constructor
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ noaskfile = tempfile.NamedTemporaryFile()
+ self.noaskname = noaskfile.name
+ try:
+ noaskfile.write(Bcfg2.Client.Tools.SYSV.noask)
+ except:
+ pass
+
+ # VerifyPackage comes from Bcfg2.Client.Tools.SYSV
+ # Install comes from Bcfg2.Client.Tools.PkgTool
+ # Extra comes from Bcfg2.Client.Tools.Tool
+ # Remove comes from Bcfg2.Client.Tools.SYSV
+ def FindExtraPackages(self):
+ """Pass through to null FindExtra call."""
+ return []
diff --git a/src/lib/Bcfg2/Client/Tools/Chkconfig.py b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
new file mode 100644
index 000000000..12ea5f132
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
@@ -0,0 +1,108 @@
+# This is the bcfg2 support for chkconfig
+
+"""This is chkconfig support."""
+
+import os
+
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+class Chkconfig(Bcfg2.Client.Tools.SvcTool):
+ """Chkconfig support for Bcfg2."""
+ name = 'Chkconfig'
+ __execs__ = ['/sbin/chkconfig']
+ __handles__ = [('Service', 'chkconfig')]
+ __req__ = {'Service': ['name', 'status']}
+ os.environ['LANG'] = 'C'
+
+ def get_svc_command(self, service, action):
+ return "/sbin/service %s %s" % (service.get('name'), action)
+
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry."""
+ try:
+ cmd = "/sbin/chkconfig --list %s " % (entry.get('name'))
+ raw = self.cmd.run(cmd)[1]
+ patterns = ["error reading information", "unknown service"]
+ srvdata = [line.split() for line in raw for pattern in patterns \
+ if pattern not in line][0]
+ except IndexError:
+ # Ocurrs when no lines are returned (service not installed)
+ entry.set('current_status', 'off')
+ return False
+ if len(srvdata) == 2:
+ # This is an xinetd service
+ if entry.get('status') == srvdata[1]:
+ return True
+ else:
+ entry.set('current_status', srvdata[1])
+ return False
+
+ try:
+ onlevels = [level.split(':')[0] for level in srvdata[1:] \
+ if level.split(':')[1] == 'on']
+ except IndexError:
+ onlevels = []
+
+ if entry.get('status') == 'on':
+ status = (len(onlevels) > 0)
+ command = 'start'
+ else:
+ status = (len(onlevels) == 0)
+ command = 'stop'
+
+ if entry.get('mode', 'default') == 'supervised':
+ # turn on or off the service in supervised mode
+ pstatus = self.cmd.run('/sbin/service %s status' % \
+ entry.get('name'))[0]
+ needs_modification = ((command == 'start' and pstatus) or \
+ (command == 'stop' and not pstatus))
+ if (not self.setup.get('dryrun') and
+ self.setup['servicemode'] != 'disabled' and
+ needs_modification):
+ self.cmd.run(self.get_svc_command(entry, command))
+ # service was modified, so it failed
+ pstatus = False
+
+ # chkconfig/init.d service
+ if entry.get('status') == 'on':
+ status = status and not pstatus
+
+ if not status:
+ if entry.get('status') == 'on':
+ entry.set('current_status', 'off')
+ else:
+ entry.set('current_status', 'on')
+ return status
+
+ def InstallService(self, entry):
+ """Install Service entry."""
+ # don't take any actions for mode='manual'
+ if entry.get('mode', 'default') == 'manual':
+ self.logger.info("Service %s mode set to manual. Skipping "
+ "installation." % (entry.get('name')))
+ return False
+ rcmd = "/sbin/chkconfig %s %s"
+ self.cmd.run("/sbin/chkconfig --add %s" % (entry.attrib['name']))
+ self.logger.info("Installing Service %s" % (entry.get('name')))
+ pass1 = True
+ if entry.get('status') == 'off':
+ rc = self.cmd.run(rcmd % (entry.get('name'),
+ entry.get('status')) + \
+ " --level 0123456")[0]
+ pass1 = rc == 0
+ rc = self.cmd.run(rcmd % (entry.get('name'), entry.get('status')))[0]
+ return pass1 and rc == 0
+
+ def FindExtra(self):
+ """Locate extra chkconfig Services."""
+ allsrv = [line.split()[0] for line in \
+ self.cmd.run("/sbin/chkconfig --list 2>/dev/null|grep :on")[1]]
+ self.logger.debug('Found active services:')
+ self.logger.debug(allsrv)
+ specified = [srv.get('name') for srv in self.getSupportedEntries()]
+ return [Bcfg2.Client.XML.Element('Service',
+ type='chkconfig',
+ name=name) \
+ for name in allsrv if name not in specified]
diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py
new file mode 100644
index 000000000..ca6fc439e
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/DebInit.py
@@ -0,0 +1,125 @@
+"""Debian Init Support for Bcfg2"""
+
+import glob
+import os
+import re
+import Bcfg2.Client.Tools
+
+# Debian squeeze and beyond uses a dependecy based boot sequence
+DEBIAN_OLD_STYLE_BOOT_SEQUENCE = ('etch', '4.0', 'lenny')
+
+
+class DebInit(Bcfg2.Client.Tools.SvcTool):
+ """Debian Service Support for Bcfg2."""
+ name = 'DebInit'
+ __execs__ = ['/usr/sbin/update-rc.d', '/usr/sbin/invoke-rc.d']
+ __handles__ = [('Service', 'deb')]
+ __req__ = {'Service': ['name', 'status']}
+ svcre = re.compile("/etc/.*/(?P<action>[SK])(?P<sequence>\d+)(?P<name>\S+)")
+
+ # implement entry (Verify|Install) ops
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry."""
+
+ if entry.get('status') == 'ignore':
+ return True
+
+ rawfiles = glob.glob("/etc/rc*.d/[SK]*%s" % (entry.get('name')))
+ files = []
+
+ try:
+ deb_version = open('/etc/debian_version', 'r').read().split('/', 1)[0]
+ except IOError:
+ deb_version = 'unknown'
+
+ if entry.get('sequence'):
+ if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or
+ deb_version.startswith('5') or
+ os.path.exists('/etc/init.d/.legacy-bootordering')):
+ start_sequence = int(entry.get('sequence'))
+ kill_sequence = 100 - start_sequence
+ else:
+ start_sequence = None
+ self.logger.warning("Your debian version boot sequence is "
+ "dependency based \"sequence\" attribute "
+ "will be ignored.")
+ else:
+ start_sequence = None
+
+ for filename in rawfiles:
+ match = self.svcre.match(filename)
+ if not match:
+ self.logger.error("Failed to match file: %s" % filename)
+ continue
+ if match.group('name') == entry.get('name'):
+ files.append(filename)
+ if entry.get('status') == 'off':
+ if files:
+ entry.set('current_status', 'on')
+ return False
+ else:
+ return True
+ else:
+ if files:
+ if start_sequence:
+ for filename in files:
+ match = self.svcre.match(filename)
+ file_sequence = int(match.group('sequence'))
+ if match.group('action') == 'S' and file_sequence != start_sequence:
+ return False
+ if match.group('action') == 'K' and file_sequence != kill_sequence:
+ return False
+ return True
+ else:
+ entry.set('current_status', 'off')
+ return False
+
+ def InstallService(self, entry):
+ """Install Service for entry."""
+ # don't take any actions for mode='manual'
+ if entry.get('mode', 'default') == 'manual':
+ self.logger.info("Service %s mode set to manual. Skipping "
+ "installation." % (entry.get('name')))
+ return False
+ self.logger.info("Installing Service %s" % (entry.get('name')))
+ try:
+ os.stat('/etc/init.d/%s' % entry.get('name'))
+ except OSError:
+ self.logger.debug("Init script for service %s does not exist" % entry.get('name'))
+ return False
+
+ if entry.get('status') == 'off':
+ self.cmd.run("/usr/sbin/invoke-rc.d %s stop" % (entry.get('name')))
+ cmdrc = self.cmd.run("/usr/sbin/update-rc.d -f %s remove" % entry.get('name'))[0]
+ else:
+ command = "/usr/sbin/update-rc.d %s defaults" % (entry.get('name'))
+ if entry.get('sequence'):
+ cmdrc = self.cmd.run("/usr/sbin/update-rc.d -f %s remove" % entry.get('name'))[0]
+ if cmdrc != 0:
+ return False
+ start_sequence = int(entry.get('sequence'))
+ kill_sequence = 100 - start_sequence
+ command = "%s %d %d" % (command, start_sequence, kill_sequence)
+ cmdrc = self.cmd.run(command)[0]
+ return cmdrc == 0
+
+ def FindExtra(self):
+ """Find Extra Debian Service entries."""
+ specified = [entry.get('name') for entry in self.getSupportedEntries()]
+ extra = []
+ for name in [self.svcre.match(fname).group('name') for fname in
+ glob.glob("/etc/rc[12345].d/S*") \
+ if self.svcre.match(fname).group('name') not in specified]:
+ if name not in extra:
+ extra.append(name)
+ return [Bcfg2.Client.XML.Element('Service', name=name, type='deb') for name \
+ in extra]
+
+ def Remove(self, _):
+ """Remove extra service entries."""
+ # Extra service removal is nonsensical
+ # Extra services need to be reflected in the config
+ return
+
+ def get_svc_command(self, service, action):
+ return '/usr/sbin/invoke-rc.d %s %s' % (service.get('name'), action)
diff --git a/src/lib/Bcfg2/Client/Tools/Encap.py b/src/lib/Bcfg2/Client/Tools/Encap.py
new file mode 100644
index 000000000..fa09c3ec7
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Encap.py
@@ -0,0 +1,53 @@
+"""Bcfg2 Support for Encap Packages"""
+
+import glob
+import re
+import Bcfg2.Client.Tools
+
+class Encap(Bcfg2.Client.Tools.PkgTool):
+ """Support for Encap packages."""
+ name = 'Encap'
+ __execs__ = ['/usr/local/bin/epkg']
+ __handles__ = [('Package', 'encap')]
+ __req__ = {'Package': ['version', 'url']}
+ pkgtype = 'encap'
+ pkgtool = ("/usr/local/bin/epkg -l -f -q %s", ("%s", ["url"]))
+ splitter = re.compile('.*/(?P<name>[\w-]+)\-(?P<version>[\w\.+-]+)')
+
+# If you define self.pkgtool and self.pkgname it will [use] the Pkgtool.Install
+# method will do the installation stuff for you
+
+ def RefreshPackages(self):
+ """Try to find encap packages."""
+ self.installed = {}
+ for pkg in glob.glob("/usr/local/encap/*"):
+ match = self.splitter.match(pkg)
+ if match:
+ self.installed[match.group('name')] = match.group('version')
+ else:
+ print("Failed to split name %s" % pkg)
+ self.logger.debug("Encap.py: RefreshPackages: self.installed.keys() are:")
+ self.logger.debug("%s" % list(self.installed.keys()))
+
+ def VerifyPackage(self, entry, _):
+ """Verify Package status for entry."""
+ if not entry.get('version'):
+ self.logger.info("Insufficient information of Package %s; cannot Verify" % entry.get('name'))
+ return False
+ cmdrc = self.cmd.run("/usr/local/bin/epkg -q -S -k %s-%s >/dev/null" %
+ (entry.get('name'), entry.get('version')))[0]
+ if cmdrc != 0:
+ self.logger.debug("Package %s version incorrect" % entry.get('name'))
+ else:
+ return True
+ return False
+
+ # Can use the FindExtraPackages method from Bcfg2.Client.Tools.PkgTool
+
+ def RemovePackages(self, packages):
+ """Deal with extra configuration detected."""
+ names = " ".join([pkg.get('name') for pkg in packages])
+ self.logger.info("Removing packages: %s" % (names))
+ self.cmd.run("/usr/local/bin/epkg -l -q -r %s" % (names))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
new file mode 100644
index 000000000..10f0f2e93
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
@@ -0,0 +1,27 @@
+"""FreeBSD Init Support for Bcfg2."""
+__revision__ = '$Rev$'
+
+# TODO
+# - hardcoded path to ports rc.d
+# - doesn't know about /etc/rc.d/
+
+import os
+import Bcfg2.Client.Tools
+
+
+class FreeBSDInit(Bcfg2.Client.Tools.SvcTool):
+ """FreeBSD service support for Bcfg2."""
+ name = 'FreeBSDInit'
+ __handles__ = [('Service', 'freebsd')]
+ __req__ = {'Service': ['name', 'status']}
+
+ def __init__(self, logger, cfg, setup):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup)
+ if os.uname()[0] != 'FreeBSD':
+ raise Bcfg2.Client.Tools.toolInstantiationError
+
+ def VerifyService(self, entry, _):
+ return True
+
+ def get_svc_command(self, service, action):
+ return "/usr/local/etc/rc.d/%s %s" % (service.get('name'), action)
diff --git a/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py b/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py
new file mode 100644
index 000000000..3e6f2b6bb
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py
@@ -0,0 +1,46 @@
+"""This is the Bcfg2 tool for the FreeBSD package system."""
+
+# TODO
+# - actual package installation
+# - verification of package files
+
+import re
+import Bcfg2.Client.Tools
+
+
+class FreeBSDPackage(Bcfg2.Client.Tools.PkgTool):
+ """The FreeBSD toolset implements package operations and inherits
+ the rest from Toolset.Toolset."""
+ name = 'FreeBSDPackage'
+ __execs__ = ['/usr/sbin/pkg_add', '/usr/sbin/pkg_info']
+ __handles__ = [('Package', 'freebsdpkg')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtool = ('/usr/sbin/pkg_add -r %s', ('%s-%s', ['name', 'version']))
+ pkgtype = 'freebsdpkg'
+
+ def RefreshPackages(self):
+ self.installed = {}
+ packages = self.cmd.run("/usr/sbin/pkg_info -a -E")[1]
+ pattern = re.compile('(.*)-(\d.*)')
+ for pkg in packages:
+ if pattern.match(pkg):
+ name = pattern.match(pkg).group(1)
+ version = pattern.match(pkg).group(2)
+ self.installed[name] = version
+
+ def VerifyPackage(self, entry, modlist):
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+ if entry.attrib['name'] in self.installed:
+ if self.installed[entry.attrib['name']] == entry.attrib['version']:
+ # TODO: verfification
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ return False
+
+ self.logger.info("Package %s not installed" % (entry.get('name')))
+ entry.set('current_exists', 'false')
+ return False
diff --git a/src/lib/Bcfg2/Client/Tools/IPS.py b/src/lib/Bcfg2/Client/Tools/IPS.py
new file mode 100644
index 000000000..e30bbd2a4
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/IPS.py
@@ -0,0 +1,60 @@
+"""This is the Bcfg2 support for OpenSolaris packages."""
+
+import pkg.client.image as image
+import pkg.client.progress as progress
+
+import Bcfg2.Client.Tools
+
+
+class IPS(Bcfg2.Client.Tools.PkgTool):
+ """The IPS driver implements OpenSolaris package operations."""
+ name = 'IPS'
+ pkgtype = 'ips'
+ conflicts = ['SYSV']
+ __handles__ = [('Package', 'ips')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtool = ('pkg install --no-refresh %s', ('%s', ['name']))
+
+ def __init__(self, logger, setup, cfg):
+ self.installed = {}
+ self.pending_upgrades = set()
+ self.image = image.Image()
+ self.image.find_root('/', False)
+ self.image.load_config()
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, cfg)
+ self.cfg = cfg
+
+ def RefreshPackages(self):
+ self.installed = dict()
+ self.image.history.operation_name = "list"
+ self.image.load_catalogs(progress.NullProgressTracker())
+ for (pfmri, pinfo) in self.image.inventory([], False):
+ pname = pfmri.pkg_name
+ pversion = pfmri.version.get_short_version()
+ self.installed[pname] = pversion
+ if pinfo['upgradable']:
+ self.pending_upgrades.add(pname)
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify package for entry."""
+ pname = entry.get('name')
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" % (pname))
+ return False
+ if pname not in self.installed:
+ self.logger.debug("IPS: Package %s not installed" % pname)
+ return False
+ if entry.get('version') == 'auto':
+ if pname in self.pending_upgrades:
+ return False
+ elif entry.get('version') == 'any':
+ pass
+ else:
+ if entry.get('version') != self.installed[pname]:
+ self.logger.debug("IPS: Package %s: have %s want %s" \
+ % (pname, self.installed[pname],
+ entry.get('version')))
+ return False
+
+ # need to implement pkg chksum validation
+ return True
diff --git a/src/lib/Bcfg2/Client/Tools/MacPorts.py b/src/lib/Bcfg2/Client/Tools/MacPorts.py
new file mode 100644
index 000000000..e8c911390
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/MacPorts.py
@@ -0,0 +1,59 @@
+"""This provides Bcfg2 support for macports packages."""
+
+import Bcfg2.Client.Tools
+
+
+class MacPorts(Bcfg2.Client.Tools.PkgTool):
+ """macports package support."""
+ name = 'MacPorts'
+ __execs__ = ["/opt/local/bin/port"]
+ __handles__ = [('Package', 'macport')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtype = 'macport'
+ pkgtool = ("/opt/local/bin/port install %s")
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ self.installed = {}
+ self.RefreshPackages()
+
+ def RefreshPackages(self):
+ """Refresh memory hashes of packages."""
+ pkgcache = self.cmd.run("/opt/local/bin/port installed")[1]
+ self.installed = {}
+ for pkg in pkgcache:
+ if pkg.startswith("The following ports are currently installed"):
+ continue
+ pkgname = pkg.split('@')[0].strip()
+ version = pkg.split('@')[1].split(' ')[0]
+ self.logger.info(" pkgname: %s\n version: %s" % (pkgname, version))
+ self.installed[pkgname] = version
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify Package status for entry."""
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+
+ if entry.attrib['name'] in self.installed:
+ if self.installed[entry.attrib['name']] == entry.attrib['version']:
+ #if not self.setup['quick'] and \
+ # entry.get('verify', 'true') == 'true':
+ #FIXME: We should be able to check this once
+ # http://trac.macports.org/ticket/15709 is implemented
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ return False
+ entry.set('current_exists', 'false')
+ return False
+
+ def RemovePackages(self, packages):
+ """Remove extra packages."""
+ names = [pkg.get('name') for pkg in packages]
+ self.logger.info("Removing packages: %s" % " ".join(names))
+ self.cmd.run("/opt/local/bin/port uninstall %s" % \
+ " ".join(names))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX.py b/src/lib/Bcfg2/Client/Tools/POSIX.py
new file mode 100644
index 000000000..9dd0362f3
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX.py
@@ -0,0 +1,943 @@
+"""All POSIX Type client support for Bcfg2."""
+
+import binascii
+from datetime import datetime
+import difflib
+import errno
+import grp
+import logging
+import os
+import pwd
+import shutil
+import stat
+import sys
+import time
+# py3k compatibility
+if sys.hexversion >= 0x03000000:
+ unicode = str
+
+import Bcfg2.Client.Tools
+import Bcfg2.Options
+from Bcfg2.Client import XML
+
+log = logging.getLogger('posix')
+
+# map between dev_type attribute and stat constants
+device_map = {'block': stat.S_IFBLK,
+ 'char': stat.S_IFCHR,
+ 'fifo': stat.S_IFIFO}
+
+
+def calcPerms(initial, perms):
+ """This compares ondisk permissions with specified ones."""
+ pdisp = [{1:stat.S_ISVTX, 2:stat.S_ISGID, 4:stat.S_ISUID},
+ {1:stat.S_IXUSR, 2:stat.S_IWUSR, 4:stat.S_IRUSR},
+ {1:stat.S_IXGRP, 2:stat.S_IWGRP, 4:stat.S_IRGRP},
+ {1:stat.S_IXOTH, 2:stat.S_IWOTH, 4:stat.S_IROTH}]
+ tempperms = initial
+ if len(perms) == 3:
+ perms = '0%s' % (perms)
+ pdigits = [int(perms[digit]) for digit in range(4)]
+ for index in range(4):
+ for (num, perm) in list(pdisp[index].items()):
+ if pdigits[index] & num:
+ tempperms |= perm
+ return tempperms
+
+
+def normGid(entry):
+ """
+ This takes a group name or gid and
+ returns the corresponding gid or False.
+ """
+ try:
+ try:
+ return int(entry.get('group'))
+ except:
+ return int(grp.getgrnam(entry.get('group'))[2])
+ except (OSError, KeyError):
+ log.error('GID normalization failed for %s. Does group %s exist?'
+ % (entry.get('name'), entry.get('group')))
+ return False
+
+
+def normUid(entry):
+ """
+ This takes a user name or uid and
+ returns the corresponding uid or False.
+ """
+ try:
+ try:
+ return int(entry.get('owner'))
+ except:
+ return int(pwd.getpwnam(entry.get('owner'))[2])
+ except (OSError, KeyError):
+ log.error('UID normalization failed for %s. Does owner %s exist?'
+ % (entry.get('name'), entry.get('owner')))
+ return False
+
+
+def isString(strng, encoding):
+ """
+ Returns true if the string contains no ASCII control characters
+ and can be decoded from the specified encoding.
+ """
+ for char in strng:
+ if ord(char) < 9 or ord(char) > 13 and ord(char) < 32:
+ return False
+ try:
+ strng.decode(encoding)
+ return True
+ except:
+ return False
+
+
+class POSIX(Bcfg2.Client.Tools.Tool):
+ """POSIX File support code."""
+ name = 'POSIX'
+ __handles__ = [('Path', 'device'),
+ ('Path', 'directory'),
+ ('Path', 'file'),
+ ('Path', 'hardlink'),
+ ('Path', 'nonexistent'),
+ ('Path', 'permissions'),
+ ('Path', 'symlink')]
+ __req__ = {'Path': ['name', 'type']}
+
+ # grab paranoid options from /etc/bcfg2.conf
+ opts = {'ppath': Bcfg2.Options.PARANOID_PATH,
+ 'max_copies': Bcfg2.Options.PARANOID_MAX_COPIES}
+ setup = Bcfg2.Options.OptionParser(opts)
+ setup.parse([])
+ ppath = setup['ppath']
+ max_copies = setup['max_copies']
+
+ def canInstall(self, entry):
+ """Check if entry is complete for installation."""
+ if Bcfg2.Client.Tools.Tool.canInstall(self, entry):
+ if (entry.tag,
+ entry.get('type'),
+ entry.text,
+ entry.get('empty', 'false')) == ('Path',
+ 'file',
+ None,
+ 'false'):
+ return False
+ return True
+ else:
+ return False
+
+ def gatherCurrentData(self, entry):
+ if entry.tag == 'Path' and entry.get('type') == 'file':
+ try:
+ ondisk = os.stat(entry.get('name'))
+ except OSError:
+ entry.set('current_exists', 'false')
+ self.logger.debug("%s %s does not exist" %
+ (entry.tag, entry.get('name')))
+ return False
+ try:
+ entry.set('current_owner', str(ondisk[stat.ST_UID]))
+ entry.set('current_group', str(ondisk[stat.ST_GID]))
+ except (OSError, KeyError):
+ pass
+ entry.set('perms', str(oct(ondisk[stat.ST_MODE])[-4:]))
+
+ def Verifydevice(self, entry, _):
+ """Verify device entry."""
+ if entry.get('dev_type') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % (entry.get('name')))
+ return False
+ if entry.get('dev_type') in ['block', 'char']:
+ # check if major/minor are properly specified
+ if entry.get('major') == None or \
+ entry.get('minor') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % (entry.get('name')))
+ return False
+ try:
+ # check for file existence
+ filestat = os.stat(entry.get('name'))
+ except OSError:
+ entry.set('current_exists', 'false')
+ self.logger.debug("%s %s does not exist" %
+ (entry.tag, entry.get('name')))
+ return False
+
+ try:
+ # attempt to verify device properties as specified in config
+ dev_type = entry.get('dev_type')
+ mode = calcPerms(device_map[dev_type],
+ entry.get('mode', '0600'))
+ owner = normUid(entry)
+ group = normGid(entry)
+ if dev_type in ['block', 'char']:
+ # check for incompletely specified entries
+ if entry.get('major') == None or \
+ entry.get('minor') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % (entry.get('name')))
+ return False
+ major = int(entry.get('major'))
+ minor = int(entry.get('minor'))
+ if major == os.major(filestat.st_rdev) and \
+ minor == os.minor(filestat.st_rdev) and \
+ mode == filestat.st_mode and \
+ owner == filestat.st_uid and \
+ group == filestat.st_gid:
+ return True
+ else:
+ return False
+ elif dev_type == 'fifo' and \
+ mode == filestat.st_mode and \
+ owner == filestat.st_uid and \
+ group == filestat.st_gid:
+ return True
+ else:
+ self.logger.info('Device properties for %s incorrect' % \
+ entry.get('name'))
+ return False
+ except OSError:
+ self.logger.debug("%s %s failed to verify" %
+ (entry.tag, entry.get('name')))
+ return False
+
+ def Installdevice(self, entry):
+ """Install device entries."""
+ try:
+ # check for existing paths and remove them
+ os.lstat(entry.get('name'))
+ try:
+ os.unlink(entry.get('name'))
+ exists = False
+ except OSError:
+ self.logger.info('Failed to unlink %s' % \
+ entry.get('name'))
+ return False
+ except OSError:
+ exists = False
+
+ if not exists:
+ try:
+ dev_type = entry.get('dev_type')
+ mode = calcPerms(device_map[dev_type],
+ entry.get('mode', '0600'))
+ if dev_type in ['block', 'char']:
+ # check if major/minor are properly specified
+ if entry.get('major') == None or \
+ entry.get('minor') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % (entry.get('name')))
+ return False
+ major = int(entry.get('major'))
+ minor = int(entry.get('minor'))
+ device = os.makedev(major, minor)
+ os.mknod(entry.get('name'), mode, device)
+ else:
+ os.mknod(entry.get('name'), mode)
+ """
+ Python uses the OS mknod(2) implementation which modifies the
+ mode based on the umask of the running process. Therefore, the
+ following chmod(2) call is needed to make sure the permissions
+ are set as specified by the user.
+ """
+ os.chmod(entry.get('name'), mode)
+ os.chown(entry.get('name'), normUid(entry), normGid(entry))
+ return True
+ except KeyError:
+ self.logger.error('Failed to install %s' % entry.get('name'))
+ except OSError:
+ self.logger.error('Failed to install %s' % entry.get('name'))
+ return False
+
+ def Verifydirectory(self, entry, modlist):
+ """Verify Path type='directory' entry."""
+ if entry.get('perms') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % (entry.get('name')))
+ return False
+ while len(entry.get('perms', '')) < 4:
+ entry.set('perms', '0' + entry.get('perms', ''))
+ try:
+ ondisk = os.stat(entry.get('name'))
+ except OSError:
+ entry.set('current_exists', 'false')
+ self.logger.debug("%s %s does not exist" %
+ (entry.tag, entry.get('name')))
+ return False
+ try:
+ owner = str(ondisk[stat.ST_UID])
+ group = str(ondisk[stat.ST_GID])
+ except (OSError, KeyError):
+ self.logger.error('User/Group resolution failed for path %s' % \
+ entry.get('name'))
+ owner = 'root'
+ group = '0'
+ finfo = os.stat(entry.get('name'))
+ perms = oct(finfo[stat.ST_MODE])[-4:]
+ if entry.get('mtime', '-1') != '-1':
+ mtime = str(finfo[stat.ST_MTIME])
+ else:
+ mtime = '-1'
+ pTrue = ((owner == str(normUid(entry))) and
+ (group == str(normGid(entry))) and
+ (perms == entry.get('perms')) and
+ (mtime == entry.get('mtime', '-1')))
+
+ pruneTrue = True
+ ex_ents = []
+ if entry.get('prune', 'false') == 'true' \
+ and (entry.tag == 'Path' and entry.get('type') == 'directory'):
+ # check for any extra entries when prune='true' attribute is set
+ try:
+ entries = ['/'.join([entry.get('name'), ent]) \
+ for ent in os.listdir(entry.get('name'))]
+ ex_ents = [e for e in entries if e not in modlist]
+ if ex_ents:
+ pruneTrue = False
+ self.logger.debug("Directory %s contains extra entries:" % \
+ entry.get('name'))
+ self.logger.debug(ex_ents)
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "Directory %s contains extra entries:" % \
+ entry.get('name')
+ nqtext += ":".join(ex_ents)
+ entry.set('qtest', nqtext)
+ [entry.append(XML.Element('Prune', path=x)) \
+ for x in ex_ents]
+ except OSError:
+ ex_ents = []
+ pruneTrue = True
+
+ if not pTrue:
+ if owner != str(normUid(entry)):
+ entry.set('current_owner', owner)
+ self.logger.debug("%s %s ownership wrong" % \
+ (entry.tag, entry.get('name')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s owner wrong. is %s should be %s" % \
+ (entry.get('name'), owner, entry.get('owner'))
+ entry.set('qtext', nqtext)
+ if group != str(normGid(entry)):
+ entry.set('current_group', group)
+ self.logger.debug("%s %s group wrong" % \
+ (entry.tag, entry.get('name')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s group is %s should be %s" % \
+ (entry.get('name'), group, entry.get('group'))
+ entry.set('qtext', nqtext)
+ if perms != entry.get('perms'):
+ entry.set('current_perms', perms)
+ self.logger.debug("%s %s permissions are %s should be %s" %
+ (entry.tag,
+ entry.get('name'),
+ perms,
+ entry.get('perms')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s %s perms are %s should be %s" % \
+ (entry.tag,
+ entry.get('name'),
+ perms,
+ entry.get('perms'))
+ entry.set('qtext', nqtext)
+ if mtime != entry.get('mtime', '-1'):
+ entry.set('current_mtime', mtime)
+ self.logger.debug("%s %s mtime is %s should be %s" \
+ % (entry.tag, entry.get('name'), mtime,
+ entry.get('mtime')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s mtime is %s should be %s" % \
+ (entry.get('name'), mtime, entry.get('mtime'))
+ entry.set('qtext', nqtext)
+ if entry.get('type') != 'file':
+ nnqtext = entry.get('qtext')
+ nnqtext += '\nInstall %s %s: (y/N) ' % (entry.get('type'),
+ entry.get('name'))
+ entry.set('qtext', nnqtext)
+ return pTrue and pruneTrue
+
+ def Installdirectory(self, entry):
+ """Install Path type='directory' entry."""
+ if entry.get('perms') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % \
+ (entry.get('name')))
+ return False
+ self.logger.info("Installing directory %s" % (entry.get('name')))
+ try:
+ fmode = os.lstat(entry.get('name'))
+ if not stat.S_ISDIR(fmode[stat.ST_MODE]):
+ self.logger.debug("Found a non-directory entry at %s" % \
+ (entry.get('name')))
+ try:
+ os.unlink(entry.get('name'))
+ exists = False
+ except OSError:
+ self.logger.info("Failed to unlink %s" % \
+ (entry.get('name')))
+ return False
+ else:
+ self.logger.debug("Found a pre-existing directory at %s" % \
+ (entry.get('name')))
+ exists = True
+ except OSError:
+ # stat failed
+ exists = False
+
+ if not exists:
+ parent = "/".join(entry.get('name').split('/')[:-1])
+ if parent:
+ try:
+ os.stat(parent)
+ except:
+ self.logger.debug('Creating parent path for directory %s' % (entry.get('name')))
+ for idx in range(len(parent.split('/')[:-1])):
+ current = '/'+'/'.join(parent.split('/')[1:2+idx])
+ try:
+ sloc = os.stat(current)
+ except OSError:
+ try:
+ os.mkdir(current)
+ continue
+ except OSError:
+ return False
+ if not stat.S_ISDIR(sloc[stat.ST_MODE]):
+ try:
+ os.unlink(current)
+ os.mkdir(current)
+ except OSError:
+ return False
+
+ try:
+ os.mkdir(entry.get('name'))
+ except OSError:
+ self.logger.error('Failed to create directory %s' % \
+ (entry.get('name')))
+ return False
+ if entry.get('prune', 'false') == 'true' and entry.get("qtest"):
+ for pent in entry.findall('Prune'):
+ pname = pent.get('path')
+ ulfailed = False
+ if os.path.isdir(pname):
+ self.logger.info("Not removing extra directory %s, "
+ "please check and remove manually" % pname)
+ continue
+ try:
+ self.logger.debug("Unlinking file %s" % pname)
+ os.unlink(pname)
+ except OSError:
+ self.logger.error("Failed to unlink path %s" % pname)
+ ulfailed = True
+ if ulfailed:
+ return False
+ return self.Installpermissions(entry)
+
+ def Verifyfile(self, entry, _):
+ """Verify Path type='file' entry."""
+ # permissions check + content check
+ permissionStatus = self.Verifydirectory(entry, _)
+ tbin = False
+ if entry.text == None and entry.get('empty', 'false') == 'false':
+ self.logger.error("Cannot verify incomplete Path type='%s' %s" %
+ (entry.get('type'), entry.get('name')))
+ return False
+ if entry.get('encoding', 'ascii') == 'base64':
+ tempdata = binascii.a2b_base64(entry.text)
+ tbin = True
+ elif entry.get('empty', 'false') == 'true':
+ tempdata = ''
+ else:
+ tempdata = entry.text
+ if type(tempdata) == unicode:
+ try:
+ tempdata = tempdata.encode(self.setup['encoding'])
+ except UnicodeEncodeError:
+ e = sys.exc_info()[1]
+ self.logger.error("Error encoding file %s:\n %s" % \
+ (entry.get('name'), e))
+
+ different = False
+ content = None
+ if not os.path.exists(entry.get("name")):
+ # first, see if the target file exists at all; if not,
+ # they're clearly different
+ different = True
+ content = ""
+ else:
+ # next, see if the size of the target file is different
+ # from the size of the desired content
+ try:
+ estat = os.stat(entry.get('name'))
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to stat %s: %s" %
+ (err.filename, err))
+ return False
+ if len(tempdata) != estat[stat.ST_SIZE]:
+ different = True
+ else:
+ # finally, read in the target file and compare them
+ # directly. comparison could be done with a checksum,
+ # which might be faster for big binary files, but
+ # slower for everything else
+ try:
+ content = open(entry.get('name')).read()
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to read %s: %s" %
+ (err.filename, err))
+ return False
+ different = content != tempdata
+
+ if different:
+ if self.setup['interactive']:
+ prompt = [entry.get('qtext', '')]
+ if not tbin and content is None:
+ # it's possible that we figured out the files are
+ # different without reading in the local file. if
+ # the supplied version of the file is not binary,
+ # we now have to read in the local file to figure
+ # out if _it_ is binary, and either include that
+ # fact or the diff in our prompts for -I
+ try:
+ content = open(entry.get('name')).read()
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to read %s: %s" %
+ (err.filename, err))
+ return False
+ if tbin or not isString(content, self.setup['encoding']):
+ # don't compute diffs if the file is binary
+ prompt.append('Binary file, no printable diff')
+ else:
+ diff = self._diff(content, tempdata,
+ difflib.unified_diff,
+ filename=entry.get("name"))
+ if diff:
+ udiff = '\n'.join(diff)
+ try:
+ prompt.append(udiff.decode(self.setup['encoding']))
+ except UnicodeDecodeError:
+ prompt.append("Binary file, no printable diff")
+ else:
+ prompt.append("Diff took too long to compute, no "
+ "printable diff")
+ prompt.append("Install %s %s: (y/N): " % (entry.tag,
+ entry.get('name')))
+ entry.set("qtext", "\n".join(prompt))
+
+ if entry.get('sensitive', 'false').lower() != 'true':
+ if content is None:
+ # it's possible that we figured out the files are
+ # different without reading in the local file. we
+ # now have to read in the local file to figure out
+ # if _it_ is binary, and either include the whole
+ # file or the diff for reports
+ try:
+ content = open(entry.get('name')).read()
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to read %s: %s" %
+ (err.filename, err))
+ return False
+
+ if tbin or not isString(content, self.setup['encoding']):
+ # don't compute diffs if the file is binary
+ entry.set('current_bfile', binascii.b2a_base64(content))
+ else:
+ diff = self._diff(content, tempdata, difflib.ndiff,
+ filename=entry.get("name"))
+ if diff:
+ entry.set("current_bdiff",
+ binascii.b2a_base64("\n".join(diff)))
+ elif not tbin and isString(content, self.setup['encoding']):
+ entry.set('current_bfile', binascii.b2a_base64(content))
+ elif permissionStatus == False and self.setup['interactive']:
+ prompt = [entry.get('qtext', '')]
+ prompt.append("Install %s %s: (y/N): " % (entry.tag,
+ entry.get('name')))
+ entry.set("qtext", "\n".join(prompt))
+
+
+ return permissionStatus and not different
+
+ def Installfile(self, entry):
+ """Install Path type='file' entry."""
+ self.logger.info("Installing file %s" % (entry.get('name')))
+
+ parent = "/".join(entry.get('name').split('/')[:-1])
+ if parent:
+ try:
+ os.stat(parent)
+ except:
+ self.logger.debug('Creating parent path for config file %s' % \
+ (entry.get('name')))
+ current = '/'
+ for next in parent.split('/')[1:]:
+ current += next + '/'
+ try:
+ sloc = os.stat(current)
+ try:
+ if not stat.S_ISDIR(sloc[stat.ST_MODE]):
+ self.logger.debug('%s is not a directory; recreating' \
+ % (current))
+ os.unlink(current)
+ os.mkdir(current)
+ except OSError:
+ return False
+ except OSError:
+ try:
+ self.logger.debug("Creating non-existent path %s" % current)
+ os.mkdir(current)
+ except OSError:
+ return False
+
+ # If we get here, then the parent directory should exist
+ if (entry.get("paranoid", False) in ['true', 'True']) and \
+ self.setup.get("paranoid", False) and not \
+ (entry.get('current_exists', 'true') == 'false'):
+ bkupnam = entry.get('name').replace('/', '_')
+ # current list of backups for this file
+ try:
+ bkuplist = [f for f in os.listdir(self.ppath) if
+ f.startswith(bkupnam)]
+ except OSError:
+ e = sys.exc_info()[1]
+ self.logger.error("Failed to create backup list in %s: %s" %
+ (self.ppath, e.strerror))
+ return False
+ bkuplist.sort()
+ while len(bkuplist) >= int(self.max_copies):
+ # remove the oldest backup available
+ oldest = bkuplist.pop(0)
+ self.logger.info("Removing %s" % oldest)
+ try:
+ os.remove("%s/%s" % (self.ppath, oldest))
+ except:
+ self.logger.error("Failed to remove %s/%s" % \
+ (self.ppath, oldest))
+ return False
+ try:
+ # backup existing file
+ shutil.copy(entry.get('name'),
+ "%s/%s_%s" % (self.ppath, bkupnam,
+ datetime.isoformat(datetime.now())))
+ self.logger.info("Backup of %s saved to %s" %
+ (entry.get('name'), self.ppath))
+ except IOError:
+ e = sys.exc_info()[1]
+ self.logger.error("Failed to create backup file for %s" % \
+ (entry.get('name')))
+ self.logger.error(e)
+ return False
+ try:
+ newfile = open("%s.new"%(entry.get('name')), 'w')
+ if entry.get('encoding', 'ascii') == 'base64':
+ filedata = binascii.a2b_base64(entry.text)
+ elif entry.get('empty', 'false') == 'true':
+ filedata = ''
+ else:
+ if type(entry.text) == unicode:
+ filedata = entry.text.encode(self.setup['encoding'])
+ else:
+ filedata = entry.text
+ newfile.write(filedata)
+ newfile.close()
+ try:
+ os.chown(newfile.name, normUid(entry), normGid(entry))
+ except KeyError:
+ self.logger.error("Failed to chown %s to %s:%s" %
+ (newfile.name, entry.get('owner'),
+ entry.get('group')))
+ os.chown(newfile.name, 0, 0)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("Could not chown %s: %s" % (newfile.name,
+ err))
+ os.chmod(newfile.name, calcPerms(stat.S_IFREG, entry.get('perms')))
+ os.rename(newfile.name, entry.get('name'))
+ if entry.get('mtime', '-1') != '-1':
+ try:
+ os.utime(entry.get('name'), (int(entry.get('mtime')),
+ int(entry.get('mtime'))))
+ except:
+ self.logger.error("File %s mtime fix failed" \
+ % (entry.get('name')))
+ return False
+ return True
+ except (OSError, IOError):
+ err = sys.exc_info()[1]
+ if err.errno == errno.EACCES:
+ self.logger.info("Failed to open %s for writing" % (entry.get('name')))
+ else:
+ print(err)
+ return False
+
+ def Verifyhardlink(self, entry, _):
+ """Verify HardLink entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % \
+ (entry.get('name')))
+ return False
+ try:
+ if os.path.samefile(entry.get('name'), entry.get('to')):
+ return True
+ self.logger.debug("Hardlink %s is incorrect" % \
+ entry.get('name'))
+ entry.set('qtext', "Link %s to %s? [y/N] " % \
+ (entry.get('name'),
+ entry.get('to')))
+ return False
+ except OSError:
+ entry.set('current_exists', 'false')
+ entry.set('qtext', "Link %s to %s? [y/N] " % \
+ (entry.get('name'),
+ entry.get('to')))
+ return False
+
+ def Installhardlink(self, entry):
+ """Install HardLink entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % \
+ (entry.get('name')))
+ return False
+ self.logger.info("Installing Hardlink %s" % (entry.get('name')))
+ if os.path.lexists(entry.get('name')):
+ try:
+ fmode = os.lstat(entry.get('name'))[stat.ST_MODE]
+ if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode):
+ self.logger.debug("Non-directory entry already exists at "
+ "%s. Unlinking entry." % (entry.get('name')))
+ os.unlink(entry.get('name'))
+ elif stat.S_ISDIR(fmode):
+ self.logger.debug("Directory already exists at %s" % \
+ (entry.get('name')))
+ self.cmd.run("mv %s/ %s.bak" % \
+ (entry.get('name'),
+ entry.get('name')))
+ else:
+ os.unlink(entry.get('name'))
+ except OSError:
+ self.logger.info("Hardlink %s cleanup failed" % \
+ (entry.get('name')))
+ try:
+ os.link(entry.get('to'), entry.get('name'))
+ return True
+ except OSError:
+ return False
+
+ def Verifynonexistent(self, entry, _):
+ """Verify nonexistent entry."""
+ # return true if path does _not_ exist
+ return not os.path.lexists(entry.get('name'))
+
+ def Installnonexistent(self, entry):
+ '''Remove nonexistent entries'''
+ ename = entry.get('name')
+ if entry.get('recursive') in ['True', 'true']:
+ # ensure that configuration spec is consistent first
+ if [e for e in self.buildModlist() \
+ if e.startswith(ename) and e != ename]:
+ self.logger.error('Not installing %s. One or more files '
+ 'in this directory are specified in '
+ 'your configuration.' % ename)
+ return False
+ try:
+ shutil.rmtree(ename)
+ except OSError:
+ e = sys.exc_info()[1]
+ self.logger.error('Failed to remove %s: %s' % (ename,
+ e.strerror))
+ else:
+ if os.path.isdir(ename):
+ try:
+ os.rmdir(ename)
+ return True
+ except OSError:
+ e = sys.exc_info()[1]
+ self.logger.error('Failed to remove %s: %s' % (ename,
+ e.strerror))
+ return False
+ try:
+ os.remove(ename)
+ return True
+ except OSError:
+ e = sys.exc_info()[1]
+ self.logger.error('Failed to remove %s: %s' % (ename,
+ e.strerror))
+ return False
+
+ def Verifypermissions(self, entry, _):
+ """Verify Path type='permissions' entry"""
+ if entry.get('perms') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % (entry.get('name')))
+ return False
+ if entry.get('recursive') in ['True', 'true']:
+ # verify ownership information recursively
+ owner = normUid(entry)
+ group = normGid(entry)
+
+ for root, dirs, files in os.walk(entry.get('name')):
+ for p in dirs + files:
+ path = os.path.join(root, p)
+ pstat = os.stat(path)
+ if owner != pstat.st_uid:
+ # owner mismatch for path
+ entry.set('current_owner', str(pstat.st_uid))
+ self.logger.debug("%s %s ownership wrong" % \
+ (entry.tag, path))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += ("Owner for path %s is incorrect. "
+ "Current owner is %s but should be %s\n" % \
+ (path, pstat.st_uid, entry.get('owner')))
+ nqtext += ("\nInstall %s %s: (y/N): " %
+ (entry.tag, entry.get('name')))
+ entry.set('qtext', nqtext)
+ return False
+ if group != pstat.st_gid:
+ # group mismatch for path
+ entry.set('current_group', str(pstat.st_gid))
+ self.logger.debug("%s %s group wrong" % \
+ (entry.tag, path))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += ("Group for path %s is incorrect. "
+ "Current group is %s but should be %s\n" % \
+ (path, pstat.st_gid, entry.get('group')))
+ nqtext += ("\nInstall %s %s: (y/N): " %
+ (entry.tag, entry.get('name')))
+ entry.set('qtext', nqtext)
+ return False
+ return self.Verifydirectory(entry, _)
+
+ def _diff(self, content1, content2, difffunc, filename=None):
+ rv = []
+ start = time.time()
+ longtime = False
+ for diffline in difffunc(content1.split('\n'),
+ content2.split('\n')):
+ now = time.time()
+ rv.append(diffline)
+ if now - start > 5 and not longtime:
+ if filename:
+ self.logger.info("Diff of %s taking a long time" %
+ filename)
+ else:
+ self.logger.info("Diff taking a long time")
+ longtime = True
+ elif now - start > 30:
+ if filename:
+ self.logger.error("Diff of %s took too long; giving up" %
+ filename)
+ else:
+ self.logger.error("Diff took too long; giving up")
+ return False
+ return rv
+
+ def Installpermissions(self, entry):
+ """Install POSIX permissions"""
+ if entry.get('perms') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % (entry.get('name')))
+ return False
+ plist = [entry.get('name')]
+ if entry.get('recursive') in ['True', 'true']:
+ # verify ownership information recursively
+ owner = normUid(entry)
+ group = normGid(entry)
+
+ for root, dirs, files in os.walk(entry.get('name')):
+ for p in dirs + files:
+ path = os.path.join(root, p)
+ pstat = os.stat(path)
+ if owner != pstat.st_uid or group != pstat.st_gid:
+ # owner mismatch for path
+ plist.append(path)
+ try:
+ for p in plist:
+ os.chown(p, normUid(entry), normGid(entry))
+ os.chmod(p, calcPerms(stat.S_IFDIR, entry.get('perms')))
+ return True
+ except (OSError, KeyError):
+ self.logger.error('Permission fixup failed for %s' % \
+ (entry.get('name')))
+ return False
+
+ def Verifysymlink(self, entry, _):
+ """Verify Path type='symlink' entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % \
+ (entry.get('name')))
+ return False
+ try:
+ sloc = os.readlink(entry.get('name'))
+ if sloc == entry.get('to'):
+ return True
+ self.logger.debug("Symlink %s points to %s, should be %s" % \
+ (entry.get('name'), sloc, entry.get('to')))
+ entry.set('current_to', sloc)
+ entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'),
+ entry.get('to')))
+ return False
+ except OSError:
+ entry.set('current_exists', 'false')
+ entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'),
+ entry.get('to')))
+ return False
+
+ def Installsymlink(self, entry):
+ """Install Path type='symlink' entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-lint.' % \
+ (entry.get('name')))
+ return False
+ self.logger.info("Installing symlink %s" % (entry.get('name')))
+ if os.path.lexists(entry.get('name')):
+ try:
+ fmode = os.lstat(entry.get('name'))[stat.ST_MODE]
+ if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode):
+ self.logger.debug("Non-directory entry already exists at "
+ "%s. Unlinking entry." % \
+ (entry.get('name')))
+ os.unlink(entry.get('name'))
+ elif stat.S_ISDIR(fmode):
+ self.logger.debug("Directory already exists at %s" %\
+ (entry.get('name')))
+ self.cmd.run("mv %s/ %s.bak" % \
+ (entry.get('name'),
+ entry.get('name')))
+ else:
+ os.unlink(entry.get('name'))
+ except OSError:
+ self.logger.info("Symlink %s cleanup failed" %\
+ (entry.get('name')))
+ try:
+ os.symlink(entry.get('to'), entry.get('name'))
+ return True
+ except OSError:
+ return False
+
+ def InstallPath(self, entry):
+ """Dispatch install to the proper method according to type"""
+ ret = getattr(self, 'Install%s' % entry.get('type'))
+ return ret(entry)
+
+ def VerifyPath(self, entry, _):
+ """Dispatch verify to the proper method according to type"""
+ ret = getattr(self, 'Verify%s' % entry.get('type'))
+ return ret(entry, _)
diff --git a/src/lib/Bcfg2/Client/Tools/Pacman.py b/src/lib/Bcfg2/Client/Tools/Pacman.py
new file mode 100644
index 000000000..c8c05061c
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Pacman.py
@@ -0,0 +1,83 @@
+"""This is the bcfg2 support for pacman"""
+
+import Bcfg2.Client.Tools
+
+
+class Pacman(Bcfg2.Client.Tools.PkgTool):
+ '''Archlinux package support'''
+ name = 'Pacman'
+ __execs__ = ["/usr/bin/pacman"]
+ __handles__ = [('Package', 'pacman')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtype = 'pacman'
+ pkgtool = ("/usr/bin/pacman --needed --noconfirm --noprogressbar")
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ self.installed = {}
+ self.RefreshPackages()
+
+ def RefreshPackages(self):
+ '''Refresh memory hashes of packages'''
+ pkgcache = self.cmd.run("/usr/bin/pacman -Q")[1]
+ self.installed = {}
+ for pkg in pkgcache:
+ pkgname = pkg.split(' ')[0].strip()
+ version = pkg.split(' ')[1].strip()
+ #self.logger.info(" pkgname: %s, version: %s" % (pkgname, version))
+ self.installed[pkgname] = version
+
+ def VerifyPackage(self, entry, modlist):
+ '''Verify Package status for entry'''
+
+ self.logger.info("VerifyPackage : %s : %s" % entry.get('name'),
+ entry.get('version'))
+
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+
+ if entry.attrib['name'] in self.installed:
+ if entry.attrib['version'] == 'auto':
+ return True
+ elif self.installed[entry.attrib['name']] == entry.attrib['version']:
+ #if not self.setup['quick'] and \
+ # entry.get('verify', 'true') == 'true':
+ #FIXME: need to figure out if pacman
+ # allows you to verify packages
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ self.logger.info("attribname: %s" % (entry.attrib['name']))
+ self.logger.info("attribname: %s" % (entry.attrib['name']))
+ return False
+ entry.set('current_exists', 'false')
+ self.logger.info("attribname: %s" % (entry.attrib['name']))
+ return False
+
+ def RemovePackages(self, packages):
+ '''Remove extra packages'''
+ names = [pkg.get('name') for pkg in packages]
+ self.logger.info("Removing packages: %s" % " ".join(names))
+ self.cmd.run("%s --noconfirm --noprogressbar -R %s" % \
+ (self.pkgtool, " ".join(names)))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
+
+ def Install(self, packages, states):
+ '''
+ Pacman Install
+ '''
+ pkgline = ""
+ for pkg in packages:
+ pkgline += " " + pkg.get('name')
+
+ self.logger.info("packages : " + pkgline)
+
+ try:
+ self.logger.debug("Running : %s -S %s" % (self.pkgtool, pkgline))
+ self.cmd.run("%s -S %s" % (self.pkgtool, pkgline))
+ except Exception:
+ e = sys.exc_info()[1]
+ self.logger.error("Error occurred during installation: %s" % e)
diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py
new file mode 100644
index 000000000..646995f4e
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Portage.py
@@ -0,0 +1,71 @@
+"""This is the Bcfg2 tool for the Gentoo Portage system."""
+
+import re
+import Bcfg2.Client.Tools
+
+
+class Portage(Bcfg2.Client.Tools.PkgTool):
+ """The Gentoo toolset implements package and service operations and
+ inherits the rest from Toolset.Toolset."""
+ name = 'Portage'
+ __execs__ = ['/usr/bin/emerge', '/usr/bin/equery']
+ __handles__ = [('Package', 'ebuild')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtype = 'ebuild'
+ # requires a working PORTAGE_BINHOST in make.conf
+ pkgtool = ('emerge --getbinpkgonly %s', ('=%s-%s', ['name', 'version']))
+
+ def __init__(self, logger, cfg, setup):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, cfg, setup)
+ self.__important__ = self.__important__ + ['/etc/make.conf']
+ self.cfg = cfg
+ self.installed = {}
+ self.RefreshPackages()
+
+ def RefreshPackages(self):
+ """Refresh memory hashes of packages."""
+ ret, cache = self.cmd.run("equery -q list '*'")
+ if ret == 2:
+ cache = self.cmd.run("equery -q list '*'")[1]
+ pattern = re.compile('(.*)-(\d.*)')
+ self.installed = {}
+ for pkg in cache:
+ if pattern.match(pkg):
+ name = pattern.match(pkg).group(1)
+ version = pattern.match(pkg).group(2)
+ self.installed[name] = version
+ else:
+ self.logger.info("Failed to parse pkg name %s" % pkg)
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify package for entry."""
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+ if entry.attrib['name'] in self.installed:
+ if self.installed[entry.attrib['name']] == entry.attrib['version']:
+ if not self.setup['quick'] and \
+ entry.get('verify', 'true') == 'true':
+ output = self.cmd.run("/usr/bin/equery -N check '=%s-%s' 2>&1 "
+ "| grep '!!!' | awk '{print $2}'" \
+ % (entry.get('name'), entry.get('version')))[1]
+ if [filename for filename in output \
+ if filename not in modlist]:
+ return False
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ return False
+ entry.set('current_exists', 'false')
+ return False
+
+ def RemovePackages(self, packages):
+ """Deal with extra configuration detected."""
+ pkgnames = " ".join([pkg.get('name') for pkg in packages])
+ if len(packages) > 0:
+ self.logger.info('Removing packages:')
+ self.logger.info(pkgnames)
+ self.cmd.run("emerge --unmerge --quiet %s" % " ".join(pkgnames.split(' ')))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/src/lib/Bcfg2/Client/Tools/RPMng.py b/src/lib/Bcfg2/Client/Tools/RPMng.py
new file mode 100644
index 000000000..00dd00d71
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/RPMng.py
@@ -0,0 +1,1027 @@
+"""Bcfg2 Support for RPMS"""
+
+import os.path
+import rpm
+import rpmtools
+import Bcfg2.Client.Tools
+# Compatibility import
+from Bcfg2.Bcfg2Py3k import ConfigParser
+
+class RPMng(Bcfg2.Client.Tools.PkgTool):
+ """Support for RPM packages."""
+ name = 'RPMng'
+
+ __execs__ = ['/bin/rpm', '/var/lib/rpm']
+ __handles__ = [('Package', 'rpm')]
+
+ __req__ = {'Package': ['name', 'version']}
+ __ireq__ = {'Package': ['url']}
+
+ __new_req__ = {'Package': ['name'], 'Instance': ['version', 'release', 'arch']}
+ __new_ireq__ = {'Package': ['uri'], \
+ 'Instance': ['simplefile']}
+
+ __gpg_req__ = {'Package': ['name', 'version']}
+ __gpg_ireq__ = {'Package': ['name', 'version']}
+
+ __new_gpg_req__ = {'Package': ['name'], 'Instance': ['version', 'release']}
+ __new_gpg_ireq__ = {'Package': ['name'], 'Instance': ['version', 'release']}
+
+ conflicts = ['RPM']
+
+ pkgtype = 'rpm'
+ pkgtool = ("rpm --oldpackage --replacepkgs --quiet -U %s", ("%s", ["url"]))
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+
+ # create a global ignore list used when ignoring particular
+ # files during package verification
+ self.ignores = [entry.get('name') for struct in config for entry in struct \
+ if entry.get('type') == 'ignore']
+ self.instance_status = {}
+ self.extra_instances = []
+ self.modlists = {}
+ self.gpg_keyids = self.getinstalledgpg()
+
+ # Process thee RPMng section from the config file.
+ RPMng_CP = ConfigParser.ConfigParser()
+ RPMng_CP.read(self.setup.get('setup'))
+
+ # installonlypackages
+ self.installOnlyPkgs = []
+ if RPMng_CP.has_option(self.name, 'installonlypackages'):
+ for i in RPMng_CP.get(self.name, 'installonlypackages').split(','):
+ self.installOnlyPkgs.append(i.strip())
+ if self.installOnlyPkgs == []:
+ self.installOnlyPkgs = ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp',
+ 'kernel-modules', 'kernel-debug', 'kernel-unsupported',
+ 'kernel-source', 'kernel-devel', 'kernel-default',
+ 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen',
+ 'gpg-pubkey']
+ if 'gpg-pubkey' not in self.installOnlyPkgs:
+ self.installOnlyPkgs.append('gpg-pubkey')
+ self.logger.debug('installOnlyPackages = %s' % self.installOnlyPkgs)
+
+ # erase_flags
+ self.erase_flags = []
+ if RPMng_CP.has_option(self.name, 'erase_flags'):
+ for i in RPMng_CP.get(self.name, 'erase_flags').split(','):
+ self.erase_flags.append(i.strip())
+ if self.erase_flags == []:
+ self.erase_flags = ['allmatches']
+ self.logger.debug('erase_flags = %s' % self.erase_flags)
+
+ # pkg_checks
+ if RPMng_CP.has_option(self.name, 'pkg_checks'):
+ self.pkg_checks = RPMng_CP.get(self.name, 'pkg_checks').lower()
+ else:
+ self.pkg_checks = 'true'
+ self.logger.debug('pkg_checks = %s' % self.pkg_checks)
+
+ # pkg_verify
+ if RPMng_CP.has_option(self.name, 'pkg_verify'):
+ self.pkg_verify = RPMng_CP.get(self.name, 'pkg_verify').lower()
+ else:
+ self.pkg_verify = 'true'
+ self.logger.debug('pkg_verify = %s' % self.pkg_verify)
+
+ # installed_action
+ if RPMng_CP.has_option(self.name, 'installed_action'):
+ self.installed_action = RPMng_CP.get(self.name, 'installed_action').lower()
+ else:
+ self.installed_action = 'install'
+ self.logger.debug('installed_action = %s' % self.installed_action)
+
+ # version_fail_action
+ if RPMng_CP.has_option(self.name, 'version_fail_action'):
+ self.version_fail_action = RPMng_CP.get(self.name, 'version_fail_action').lower()
+ else:
+ self.version_fail_action = 'upgrade'
+ self.logger.debug('version_fail_action = %s' % self.version_fail_action)
+
+ # verify_fail_action
+ if self.name == "RPMng":
+ if RPMng_CP.has_option(self.name, 'verify_fail_action'):
+ self.verify_fail_action = RPMng_CP.get(self.name, 'verify_fail_action').lower()
+ else:
+ self.verify_fail_action = 'reinstall'
+ else: # yum can't reinstall packages.
+ self.verify_fail_action = 'none'
+ self.logger.debug('verify_fail_action = %s' % self.verify_fail_action)
+
+ # version_fail_action
+ if RPMng_CP.has_option(self.name, 'verify_flags'):
+ self.verify_flags = RPMng_CP.get(self.name, 'verify_flags').lower().split(',')
+ else:
+ self.verify_flags = []
+ if '' in self.verify_flags:
+ self.verify_flags.remove('')
+ self.logger.debug('version_fail_action = %s' % self.version_fail_action)
+ # Force a re- prelink of all packages if prelink exists.
+ # Many, if not most package verifies can be caused by out of date prelinking.
+ if os.path.isfile('/usr/sbin/prelink') and not self.setup['dryrun']:
+ cmdrc, output = self.cmd.run('/usr/sbin/prelink -a -mR')
+ if cmdrc == 0:
+ self.logger.debug('Pre-emptive prelink succeeded')
+ else:
+ # FIXME : this is dumb - what if the output is huge?
+ self.logger.error('Pre-emptive prelink failed: %s' % output)
+
+
+ def RefreshPackages(self):
+ """
+ Creates self.installed{} which is a dict of installed packages.
+
+ The dict items are lists of nevra dicts. This loosely matches the
+ config from the server and what rpmtools uses to specify pacakges.
+
+ e.g.
+
+ self.installed['foo'] = [ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'i386'},
+ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'x86_64'} ]
+ """
+ self.installed = {}
+ refresh_ts = rpmtools.rpmtransactionset()
+ # Don't bother with signature checks at this stage. The GPG keys might
+ # not be installed.
+ refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
+ for nevra in rpmtools.rpmpackagelist(refresh_ts):
+ self.installed.setdefault(nevra['name'], []).append(nevra)
+ if self.setup['debug']:
+ print("The following package instances are installed:")
+ for name, instances in list(self.installed.items()):
+ self.logger.debug(" " + name)
+ for inst in instances:
+ self.logger.debug(" %s" %self.str_evra(inst))
+ refresh_ts.closeDB()
+ del refresh_ts
+
+ def VerifyPackage(self, entry, modlist, pinned_version=None):
+ """
+ Verify Package status for entry.
+ Performs the following:
+ - Checks for the presence of required Package Instances.
+ - Compares the evra 'version' info against self.installed{}.
+ - RPM level package verify (rpm --verify).
+ - Checks for the presence of unrequired package instances.
+
+ Produces the following dict and list for RPMng.Install() to use:
+ For installs/upgrades/fixes of required instances:
+ instance_status = { <Instance Element Object>:
+ { 'installed': True|False,
+ 'version_fail': True|False,
+ 'verify_fail': True|False,
+ 'pkg': <Package Element Object>,
+ 'modlist': [ <filename>, ... ],
+ 'verify' : [ <rpm --verify results> ]
+ }, ......
+ }
+
+ For deletions of unrequired instances:
+ extra_instances = [ <Package Element Object>, ..... ]
+
+ Constructs the text prompts for interactive mode.
+ """
+ instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ if instances == []:
+ # We have an old style no Instance entry. Convert it to new style.
+ instance = Bcfg2.Client.XML.SubElement(entry, 'Package')
+ for attrib in list(entry.attrib.keys()):
+ instance.attrib[attrib] = entry.attrib[attrib]
+ if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true':
+ if 'any' in [entry.get('version'), pinned_version]:
+ version, release = 'any', 'any'
+ elif entry.get('version') == 'auto':
+ if pinned_version != None:
+ version, release = pinned_version.split('-')
+ else:
+ return False
+ else:
+ version, release = entry.get('version').split('-')
+ instance.set('version', version)
+ instance.set('release', release)
+ if entry.get('verify', 'true') == 'false':
+ instance.set('verify', 'false')
+ instances = [ instance ]
+
+ self.logger.debug("Verifying package instances for %s" % entry.get('name'))
+ package_fail = False
+ qtext_versions = ''
+
+ if entry.get('name') in self.installed:
+ # There is at least one instance installed.
+ if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true':
+ rpmTs = rpm.TransactionSet()
+ rpmHeader = None
+ for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')):
+ if rpmHeader is None or rpm.versionCompare(h, rpmHeader) > 0:
+ rpmHeader = h
+ rpmProvides = [ h['provides'] for h in \
+ rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')) ]
+ rpmIntersection = set(rpmHeader['provides']) & \
+ set(self.installOnlyPkgs)
+ if len(rpmIntersection) > 0:
+ # Packages that should only be installed or removed.
+ # e.g. kernels.
+ self.logger.debug(" Install only package.")
+ for inst in instances:
+ self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status[inst]['version_fail'] = False
+ if inst.tag == 'Package' and len(self.installed[entry.get('name')]) > 1:
+ self.logger.error("WARNING: Multiple instances of package %s are installed." % \
+ (entry.get('name')))
+ for pkg in self.installed[entry.get('name')]:
+ if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) \
+ or self.inst_evra_equal(inst, pkg):
+ if inst.get('version') == 'any':
+ self.logger.error("got any version")
+ self.logger.debug(" %s" % self.str_evra(inst))
+ self.instance_status[inst]['installed'] = True
+
+ if self.pkg_verify == 'true' and \
+ inst.get('pkg_verify', 'true') == 'true':
+ flags = inst.get('verify_flags', '').split(',') + self.verify_flags
+ if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
+ entry.get('name') != 'gpg-pubkey':
+ flags += ['nosignature', 'nodigest']
+ self.logger.debug('WARNING: Package %s %s requires GPG Public key with ID %s'\
+ % (pkg.get('name'), self.str_evra(pkg), \
+ pkg.get('gpgkeyid', '')))
+ self.logger.debug(' Disabling signature check.')
+
+ if self.setup.get('quick', False):
+ if rpmtools.prelink_exists:
+ flags += ['nomd5', 'nosize']
+ else:
+ flags += ['nomd5']
+ self.logger.debug(" verify_flags = %s" % flags)
+
+ if inst.get('verify', 'true') == 'false':
+ self.instance_status[inst]['verify'] = None
+ else:
+ vp_ts = rpmtools.rpmtransactionset()
+ self.instance_status[inst]['verify'] = \
+ rpmtools.rpm_verify( vp_ts, pkg, flags)
+ vp_ts.closeDB()
+ del vp_ts
+
+ if self.instance_status[inst]['installed'] == False:
+ self.logger.info(" Package %s %s not installed." % \
+ (entry.get('name'), self.str_evra(inst)))
+
+ qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ entry.set('current_exists', 'false')
+ else:
+ # Normal Packages that can be upgraded.
+ for inst in instances:
+ self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status[inst]['version_fail'] = False
+
+ # Only installed packages with the same architecture are
+ # relevant.
+ if inst.get('arch', None) == None:
+ arch_match = self.installed[entry.get('name')]
+ else:
+ arch_match = [pkg for pkg in self.installed[entry.get('name')] \
+ if pkg.get('arch', None) == inst.get('arch', None)]
+
+ if len(arch_match) > 1:
+ self.logger.error("Multiple instances of package %s installed with the same achitecture." % \
+ (entry.get('name')))
+ elif len(arch_match) == 1:
+ # There is only one installed like there should be.
+ # Check that it is the right version.
+ for pkg in arch_match:
+ if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) or \
+ self.inst_evra_equal(inst, pkg):
+ self.logger.debug(" %s" % self.str_evra(inst))
+ self.instance_status[inst]['installed'] = True
+
+ if self.pkg_verify == 'true' and \
+ inst.get('pkg_verify', 'true') == 'true':
+ flags = inst.get('verify_flags', '').split(',') + self.verify_flags
+ if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
+ 'nosignature' not in flags:
+ flags += ['nosignature', 'nodigest']
+ self.logger.info('WARNING: Package %s %s requires GPG Public key with ID %s'\
+ % (pkg.get('name'), self.str_evra(pkg), \
+ pkg.get('gpgkeyid', '')))
+ self.logger.info(' Disabling signature check.')
+
+ if self.setup.get('quick', False):
+ if rpmtools.prelink_exists:
+ flags += ['nomd5', 'nosize']
+ else:
+ flags += ['nomd5']
+ self.logger.debug(" verify_flags = %s" % flags)
+
+ if inst.get('verify', 'true') == 'false':
+ self.instance_status[inst]['verify'] = None
+ else:
+ vp_ts = rpmtools.rpmtransactionset()
+ self.instance_status[inst]['verify'] = \
+ rpmtools.rpm_verify( vp_ts, pkg, flags )
+ vp_ts.closeDB()
+ del vp_ts
+
+ else:
+ # Wrong version installed.
+ self.instance_status[inst]['version_fail'] = True
+ self.logger.info(" Wrong version installed. Want %s, but have %s"\
+ % (self.str_evra(inst), self.str_evra(pkg)))
+
+ qtext_versions = qtext_versions + 'U(%s -> %s) ' % \
+ (self.str_evra(pkg), self.str_evra(inst))
+ elif len(arch_match) == 0:
+ # This instance is not installed.
+ self.instance_status[inst]['installed'] = False
+ self.logger.info(" %s is not installed." % self.str_evra(inst))
+ qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+
+ # Check the rpm verify results.
+ for inst in instances:
+ instance_fail = False
+ # Dump the rpm verify results.
+ #****Write something to format this nicely.*****
+ if self.setup['debug'] and self.instance_status[inst].get('verify', None):
+ self.logger.debug(self.instance_status[inst]['verify'])
+
+ self.instance_status[inst]['verify_fail'] = False
+ if self.instance_status[inst].get('verify', None):
+ if len(self.instance_status[inst].get('verify')) > 1:
+ self.logger.info("WARNING: Verification of more than one package instance.")
+
+ for result in self.instance_status[inst]['verify']:
+
+ # Check header results
+ if result.get('hdr', None):
+ instance_fail = True
+ self.instance_status[inst]['verify_fail'] = True
+
+ # Check dependency results
+ if result.get('deps', None):
+ instance_fail = True
+ self.instance_status[inst]['verify_fail'] = True
+
+ # Check the rpm verify file results against the modlist
+ # and entry and per Instance Ignores.
+ ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \
+ [ig.get('name') for ig in inst.findall('Ignore')] + \
+ self.ignores
+ for file_result in result.get('files', []):
+ if file_result[-1] not in modlist + ignores:
+ instance_fail = True
+ self.instance_status[inst]['verify_fail'] = True
+ else:
+ self.logger.debug(" Modlist/Ignore match: %s" % \
+ (file_result[-1]))
+
+ if instance_fail == True:
+ self.logger.debug("*** Instance %s failed RPM verification ***" % \
+ self.str_evra(inst))
+ qtext_versions = qtext_versions + 'R(%s) ' % self.str_evra(inst)
+ self.modlists[entry] = modlist
+
+ # Attach status structure for return to server for reporting.
+ inst.set('verify_status', str(self.instance_status[inst]))
+
+ if self.instance_status[inst]['installed'] == False or \
+ self.instance_status[inst].get('version_fail', False)== True or \
+ self.instance_status[inst].get('verify_fail', False) == True:
+ package_fail = True
+ self.instance_status[inst]['pkg'] = entry
+ self.modlists[entry] = modlist
+
+ # Find Installed Instances that are not in the Config.
+ extra_installed = self.FindExtraInstances(entry, self.installed[entry.get('name')])
+ if extra_installed != None:
+ package_fail = True
+ self.extra_instances.append(extra_installed)
+ for inst in extra_installed.findall('Instance'):
+ qtext_versions = qtext_versions + 'D(%s) ' % self.str_evra(inst)
+ self.logger.debug("Found Extra Instances %s" % qtext_versions)
+
+ if package_fail == True:
+ self.logger.info(" Package %s failed verification." % \
+ (entry.get('name')))
+ qtext = 'Install/Upgrade/delete Package %s instance(s) - %s (y/N) ' % \
+ (entry.get('name'), qtext_versions)
+ entry.set('qtext', qtext)
+
+ bcfg2_versions = ''
+ for bcfg2_inst in [inst for inst in instances if inst.tag == 'Instance']:
+ bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(bcfg2_inst)
+ if bcfg2_versions != '':
+ entry.set('version', bcfg2_versions)
+ installed_versions = ''
+
+ for installed_inst in self.installed[entry.get('name')]:
+ installed_versions = installed_versions + '(%s) ' % \
+ self.str_evra(installed_inst)
+
+ entry.set('current_version', installed_versions)
+ return False
+
+ else:
+ # There are no Instances of this package installed.
+ self.logger.debug("Package %s has no instances installed" % (entry.get('name')))
+ entry.set('current_exists', 'false')
+ bcfg2_versions = ''
+ for inst in instances:
+ qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ self.instance_status.setdefault(inst, {})['installed'] = False
+ self.modlists[entry] = modlist
+ self.instance_status[inst]['pkg'] = entry
+ if inst.tag == 'Instance':
+ bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(inst)
+ if bcfg2_versions != '':
+ entry.set('version', bcfg2_versions)
+ entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % \
+ (entry.get('name'), qtext_versions))
+
+ return False
+ return True
+
+ def RemovePackages(self, packages):
+ """
+ Remove specified entries.
+
+ packages is a list of Package Entries with Instances generated
+ by FindExtraPackages().
+
+ """
+ self.logger.debug('Running RPMng.RemovePackages()')
+
+ pkgspec_list = []
+ for pkg in packages:
+ for inst in pkg:
+ if pkg.get('name') != 'gpg-pubkey':
+ pkgspec = { 'name':pkg.get('name'),
+ 'epoch':inst.get('epoch', None),
+ 'version':inst.get('version'),
+ 'release':inst.get('release'),
+ 'arch':inst.get('arch') }
+ pkgspec_list.append(pkgspec)
+ else:
+ pkgspec = { 'name':pkg.get('name'),
+ 'version':inst.get('version'),
+ 'release':inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
+ % (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.info(" This package will be deleted in a future version of the RPMng driver.")
+ #pkgspec_list.append(pkg_spec)
+
+ erase_results = rpmtools.rpm_erase(pkgspec_list, self.erase_flags)
+ if erase_results == []:
+ self.modified += packages
+ for pkg in pkgspec_list:
+ self.logger.info("Deleted %s %s" % (pkg.get('name'), self.str_evra(pkg)))
+ else:
+ self.logger.info("Bulk erase failed with errors:")
+ self.logger.debug("Erase results = %s" % erase_results)
+ self.logger.info("Attempting individual erase for each package.")
+ pkgspec_list = []
+ for pkg in packages:
+ pkg_modified = False
+ for inst in pkg:
+ if pkg.get('name') != 'gpg-pubkey':
+ pkgspec = { 'name':pkg.get('name'),
+ 'epoch':inst.get('epoch', None),
+ 'version':inst.get('version'),
+ 'release':inst.get('release'),
+ 'arch':inst.get('arch') }
+ pkgspec_list.append(pkgspec)
+ else:
+ pkgspec = { 'name':pkg.get('name'),
+ 'version':inst.get('version'),
+ 'release':inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
+ % (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.info(" This package will be deleted in a future version of the RPMng driver.")
+ continue # Don't delete the gpg-pubkey packages for now.
+ erase_results = rpmtools.rpm_erase([pkgspec], self.erase_flags)
+ if erase_results == []:
+ pkg_modified = True
+ self.logger.info("Deleted %s %s" % \
+ (pkgspec.get('name'), self.str_evra(pkgspec)))
+ else:
+ self.logger.error("unable to delete %s %s" % \
+ (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.debug("Failure = %s" % erase_results)
+ if pkg_modified == True:
+ self.modified.append(pkg)
+
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
+
+ def FixInstance(self, instance, inst_status):
+ """"
+ Control if a reinstall of a package happens or not based on the
+ results from RPMng.VerifyPackage().
+
+ Return True to reinstall, False to not reintstall.
+
+ """
+ fix = False
+
+ if inst_status.get('installed', False) == False:
+ if instance.get('installed_action', 'install') == "install" and \
+ self.installed_action == "install":
+ fix = True
+ else:
+ self.logger.debug('Installed Action for %s %s is to not install' % \
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
+
+ elif inst_status.get('version_fail', False) == True:
+ if instance.get('version_fail_action', 'upgrade') == "upgrade" and \
+ self.version_fail_action == "upgrade":
+ fix = True
+ else:
+ self.logger.debug('Version Fail Action for %s %s is to not upgrade' % \
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
+
+ elif inst_status.get('verify_fail', False) == True and self.name == "RPMng":
+ # yum can't reinstall packages so only do this for rpm.
+ if instance.get('verify_fail_action', 'reinstall') == "reinstall" and \
+ self.verify_fail_action == "reinstall":
+ for inst in inst_status.get('verify'):
+ # This needs to be a for loop rather than a straight get()
+ # because the underlying routines handle multiple packages
+ # and return a list of results.
+ self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % inst.get('nevra'))
+
+ if inst.get("hdr", False):
+ fix = True
+
+ elif inst.get('files', False):
+ # Parse rpm verify file results
+ for file_result in inst.get('files', []):
+ self.logger.debug('reinstall_check: file: %s' % file_result)
+ if file_result[-2] != 'c':
+ fix = True
+ break
+
+ # Shouldn't really need this, but included for clarity.
+ elif inst.get("deps", False):
+ fix = False
+ else:
+ self.logger.debug('Verify Fail Action for %s %s is to not reinstall' % \
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
+
+ return fix
+
+ def Install(self, packages, states):
+ """
+ Try and fix everything that RPMng.VerifyPackages() found wrong for
+ each Package Entry. This can result in individual RPMs being
+ installed (for the first time), reinstalled, deleted, downgraded
+ or upgraded.
+
+ packages is a list of Package Elements that has
+ states[<Package Element>] == False
+
+ The following effects occur:
+ - states{} is conditionally updated for each package.
+ - self.installed{} is rebuilt, possibly multiple times.
+ - self.instance_statusi{} is conditionally updated for each instance
+ of a package.
+ - Each package will be added to self.modified[] if its states{}
+ entry is set to True.
+
+ """
+ self.logger.info('Runing RPMng.Install()')
+
+ install_only_pkgs = []
+ gpg_keys = []
+ upgrade_pkgs = []
+
+ # Remove extra instances.
+ # Can not reverify because we don't have a package entry.
+ if len(self.extra_instances) > 0:
+ if (self.setup.get('remove') == 'all' or \
+ self.setup.get('remove') == 'packages') and\
+ not self.setup.get('dryrun'):
+ self.RemovePackages(self.extra_instances)
+ else:
+ self.logger.info("The following extra package instances will be removed by the '-r' option:")
+ for pkg in self.extra_instances:
+ for inst in pkg:
+ self.logger.info(" %s %s" % (pkg.get('name'), self.str_evra(inst)))
+
+ # Figure out which instances of the packages actually need something
+ # doing to them and place in the appropriate work 'queue'.
+ for pkg in packages:
+ for inst in [instn for instn in pkg if instn.tag \
+ in ['Instance', 'Package']]:
+ if self.FixInstance(inst, self.instance_status[inst]):
+ if pkg.get('name') == 'gpg-pubkey':
+ gpg_keys.append(inst)
+ elif pkg.get('name') in self.installOnlyPkgs:
+ install_only_pkgs.append(inst)
+ else:
+ upgrade_pkgs.append(inst)
+
+ # Fix installOnlyPackages
+ if len(install_only_pkgs) > 0:
+ self.logger.info("Attempting to install 'install only packages'")
+ install_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile')) \
+ for inst in install_only_pkgs])
+ self.logger.debug("rpm --install --quiet --oldpackage %s" % install_args)
+ cmdrc, output = self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs %s" % \
+ install_args)
+ if cmdrc == 0:
+ # The rpm command succeeded. All packages installed.
+ self.logger.info("Single Pass for InstallOnlyPkgs Succeded")
+ self.RefreshPackages()
+
+ else:
+ # The rpm command failed. No packages installed.
+ # Try installing instances individually.
+ self.logger.error("Single Pass for InstallOnlyPackages Failed")
+ installed_instances = []
+ for inst in install_only_pkgs:
+ install_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ self.logger.debug("rpm --install --quiet --oldpackage %s" % install_args)
+ cmdrc, output = self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs %s" % \
+ install_args)
+ if cmdrc == 0:
+ installed_instances.append(inst)
+ else:
+ self.logger.debug("InstallOnlyPackage %s %s would not install." % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+
+ install_pkg_set = set([self.instance_status[inst].get('pkg') \
+ for inst in install_only_pkgs])
+ self.RefreshPackages()
+
+ # Install GPG keys.
+ if len(gpg_keys) > 0:
+ for inst in gpg_keys:
+ self.logger.info("Installing GPG keys.")
+ key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ cmdrc, output = self.cmd.run("rpm --import %s" % key_arg)
+ if cmdrc != 0:
+ self.logger.debug("Unable to install %s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+ else:
+ self.logger.debug("Installed %s-%s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ inst.get('version'), inst.get('release')))
+ self.RefreshPackages()
+ self.gpg_keyids = self.getinstalledgpg()
+ pkg = self.instance_status[gpg_keys[0]].get('pkg')
+ states[pkg] = self.VerifyPackage(pkg, [])
+
+ # Fix upgradeable packages.
+ if len(upgrade_pkgs) > 0:
+ self.logger.info("Attempting to upgrade packages")
+ upgrade_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile')) \
+ for inst in upgrade_pkgs])
+ cmdrc, output = self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \
+ upgrade_args)
+ if cmdrc == 0:
+ # The rpm command succeeded. All packages upgraded.
+ self.logger.info("Single Pass for Upgraded Packages Succeded")
+ upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \
+ for inst in upgrade_pkgs])
+ self.RefreshPackages()
+ else:
+ # The rpm command failed. No packages upgraded.
+ # Try upgrading instances individually.
+ self.logger.error("Single Pass for Upgrading Packages Failed")
+ upgraded_instances = []
+ for inst in upgrade_pkgs:
+ upgrade_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ #self.logger.debug("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \
+ # upgrade_args)
+ cmdrc, output = self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % upgrade_args)
+ if cmdrc == 0:
+ upgraded_instances.append(inst)
+ else:
+ self.logger.debug("Package %s %s would not upgrade." % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+
+ upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \
+ for inst in upgrade_pkgs])
+ self.RefreshPackages()
+
+ if not self.setup['kevlar']:
+ for pkg_entry in packages:
+ self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name')))
+ states[pkg_entry] = self.VerifyPackage(pkg_entry, \
+ self.modlists.get(pkg_entry, []))
+
+ for entry in [ent for ent in packages if states[ent]]:
+ self.modified.append(entry)
+
+ def canInstall(self, entry):
+ """Test if entry has enough information to be installed."""
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Cannot install entry %s:%s with bind failure" % \
+ (entry.tag, entry.get('name')))
+ return False
+
+
+ instances = entry.findall('Instance')
+
+ # If the entry wasn't verifiable, then we really don't want to try and fix something
+ # that we don't know is broken.
+ if not self.canVerify(entry):
+ self.logger.debug("WARNING: Package %s was not verifiable, not passing to Install()" \
+ % entry.get('name'))
+ return False
+
+ if not instances:
+ # Old non Instance format, unmodified.
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__gpg_ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ return False
+ else:
+ if [attr for attr in self.__ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ return False
+ else:
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_gpg_ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if [attr for attr in self.__new_gpg_ireq__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install"\
+ % (inst.tag, entry.get('name')))
+ return False
+ else:
+ # New format with Instances.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ self.logger.error(" Required attributes that may not be present are %s" \
+ % (self.__new_ireq__[entry.tag]))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if inst.tag == 'Instance':
+ if [attr for attr in self.__new_ireq__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for %s of package %s; cannot install" \
+ % (inst.tag, entry.get('name')))
+ self.logger.error(" Required attributes that may not be present are %s" \
+ % (self.__new_ireq__[inst.tag]))
+ return False
+ return True
+
+ def canVerify(self, entry):
+ """
+ Test if entry has enough information to be verified.
+
+ Three types of entries are checked.
+ Old style Package
+ New style Package with Instances
+ pgp-pubkey packages
+
+ Also the old style entries get modified after the first
+ VerifyPackage() run, so there needs to be a second test.
+
+ """
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Entry %s:%s reports bind failure: %s" % \
+ (entry.tag, entry.get('name'), entry.get('failure')))
+ return False
+
+ # We don't want to do any checks so we don't care what the entry has in it.
+ if self.pkg_checks == 'false' or \
+ entry.get('pkg_checks', 'true').lower() == 'false':
+ return True
+
+ instances = entry.findall('Instance')
+
+ if not instances:
+ # Old non Instance format, unmodified.
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__gpg_req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ elif entry.tag == 'Path' and entry.get('type') == 'ignore':
+ # ignored Paths are only relevant during failed package
+ # verification
+ pass
+ else:
+ if [attr for attr in self.__req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ else:
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_gpg_req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if [attr for attr in self.__new_gpg_req__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (inst.tag, inst.get('name')))
+ return False
+ else:
+ # New format with Instances, or old style modified.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if inst.tag == 'Instance':
+ if [attr for attr in self.__new_req__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (inst.tag, inst.get('name')))
+ return False
+ return True
+
+ def FindExtraPackages(self):
+ """Find extra packages."""
+ packages = [entry.get('name') for entry in self.getSupportedEntries()]
+ extras = []
+
+ for (name, instances) in list(self.installed.items()):
+ if name not in packages:
+ extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
+ for installed_inst in instances:
+ if self.setup['extra']:
+ self.logger.info("Extra Package %s %s." % \
+ (name, self.str_evra(installed_inst)))
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
+ version = installed_inst.get('version'), \
+ release = installed_inst.get('release'))
+ if installed_inst.get('epoch', None) != None:
+ tmp_entry.set('epoch', str(installed_inst.get('epoch')))
+ if installed_inst.get('arch', None) != None:
+ tmp_entry.set('arch', installed_inst.get('arch'))
+ extras.append(extra_entry)
+ return extras
+
+
+ def FindExtraInstances(self, pkg_entry, installed_entry):
+ """
+ Check for installed instances that are not in the config.
+ Return a Package Entry with Instances to remove, or None if there
+ are no Instances to remove.
+
+ """
+ name = pkg_entry.get('name')
+ extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
+ instances = [inst for inst in pkg_entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ if name in self.installOnlyPkgs:
+ for installed_inst in installed_entry:
+ not_found = True
+ for inst in instances:
+ if self.pkg_vr_equal(inst, installed_inst) or \
+ self.inst_evra_equal(inst, installed_inst):
+ not_found = False
+ break
+ if not_found == True:
+ # Extra package.
+ self.logger.info("Extra InstallOnlyPackage %s %s." % \
+ (name, self.str_evra(installed_inst)))
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
+ version = installed_inst.get('version'), \
+ release = installed_inst.get('release'))
+ if installed_inst.get('epoch', None) != None:
+ tmp_entry.set('epoch', str(installed_inst.get('epoch')))
+ if installed_inst.get('arch', None) != None:
+ tmp_entry.set('arch', installed_inst.get('arch'))
+ else:
+ # Normal package, only check arch.
+ for installed_inst in installed_entry:
+ not_found = True
+ for inst in instances:
+ if installed_inst.get('arch', None) == inst.get('arch', None) or\
+ inst.tag == 'Package':
+ not_found = False
+ break
+ if not_found:
+ self.logger.info("Extra Normal Package Instance %s %s" % \
+ (name, self.str_evra(installed_inst)))
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
+ version = installed_inst.get('version'), \
+ release = installed_inst.get('release'))
+ if installed_inst.get('epoch', None) != None:
+ tmp_entry.set('epoch', str(installed_inst.get('epoch')))
+ if installed_inst.get('arch', None) != None:
+ tmp_entry.set('arch', installed_inst.get('arch'))
+
+ if len(extra_entry) == 0:
+ extra_entry = None
+
+ return extra_entry
+
+ def str_evra(self, instance):
+ """Convert evra dict entries to a string."""
+ if instance.get('epoch', '*') in ['*', None]:
+ return '%s-%s.%s' % (instance.get('version', '*'),
+ instance.get('release', '*'),
+ instance.get('arch', '*'))
+ else:
+ return '%s:%s-%s.%s' % (instance.get('epoch', '*'),
+ instance.get('version', '*'),
+ instance.get('release', '*'),
+ instance.get('arch', '*'))
+
+ def pkg_vr_equal(self, config_entry, installed_entry):
+ '''
+ Compare old style entry to installed entry. Which means ignore
+ the epoch and arch.
+ '''
+ if (config_entry.tag == 'Package' and \
+ config_entry.get('version') == installed_entry.get('version') and \
+ config_entry.get('release') == installed_entry.get('release')):
+ return True
+ else:
+ return False
+
+ def inst_evra_equal(self, config_entry, installed_entry):
+ """Compare new style instance to installed entry."""
+
+ if config_entry.get('epoch', None) != None:
+ epoch = int(config_entry.get('epoch'))
+ else:
+ epoch = None
+
+ if (config_entry.tag == 'Instance' and \
+ (epoch == installed_entry.get('epoch', 0) or \
+ (epoch == 0 and installed_entry.get('epoch', 0) == None) or \
+ (epoch == None and installed_entry.get('epoch', 0) == 0)) and \
+ config_entry.get('version') == installed_entry.get('version') and \
+ config_entry.get('release') == installed_entry.get('release') and \
+ config_entry.get('arch', None) == installed_entry.get('arch', None)):
+ return True
+ else:
+ return False
+
+ def getinstalledgpg(self):
+ """
+ Create a list of installed GPG key IDs.
+
+ The pgp-pubkey package version is the least significant 4 bytes
+ (big-endian) of the key ID which is good enough for our purposes.
+
+ """
+ init_ts = rpmtools.rpmtransactionset()
+ init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
+ gpg_hdrs = rpmtools.getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'})
+ keyids = [ header[rpm.RPMTAG_VERSION] for header in gpg_hdrs]
+ keyids.append('None')
+ init_ts.closeDB()
+ del init_ts
+ return keyids
+
+ def VerifyPath(self, entry, _):
+ """
+ We don't do anything here since all
+ Paths are processed in __init__
+ """
+ return True
diff --git a/src/lib/Bcfg2/Client/Tools/RcUpdate.py b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
new file mode 100644
index 000000000..1b9a29478
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
@@ -0,0 +1,97 @@
+"""This is rc-update support."""
+
+import os
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+class RcUpdate(Bcfg2.Client.Tools.SvcTool):
+ """RcUpdate support for Bcfg2."""
+ name = 'RcUpdate'
+ __execs__ = ['/sbin/rc-update', '/bin/rc-status']
+ __handles__ = [('Service', 'rc-update')]
+ __req__ = {'Service': ['name', 'status']}
+
+ def VerifyService(self, entry, _):
+ """
+ Verify Service status for entry.
+ Assumes we run in the "default" runlevel.
+
+ """
+ # check if service is enabled
+ cmd = '/sbin/rc-update show default | grep %s'
+ rc = self.cmd.run(cmd % entry.get('name'))[0]
+ is_enabled = (rc == 0)
+
+ if entry.get('mode', 'default') == 'supervised':
+ # check if init script exists
+ try:
+ os.stat('/etc/init.d/%s' % entry.get('name'))
+ except OSError:
+ self.logger.debug('Init script for service %s does not exist' %
+ entry.get('name'))
+ return False
+
+ # check if service is enabled
+ cmd = '/etc/init.d/%s status | grep started'
+ rc = self.cmd.run(cmd % entry.attrib['name'])[0]
+ is_running = (rc == 0)
+ else:
+ # we don't care
+ is_running = is_enabled
+
+ if entry.get('status') == 'on' and not (is_enabled and is_running):
+ entry.set('current_status', 'off')
+ return False
+
+ elif entry.get('status') == 'off' and (is_enabled or is_running):
+ entry.set('current_status', 'on')
+ return False
+
+ return True
+
+ def InstallService(self, entry):
+ """
+ Install Service entry
+ In supervised mode we also take care it's (not) running.
+
+ """
+ # don't take any actions for mode='manual'
+ if entry.get('mode', 'default') == 'manual':
+ self.logger.info("Service %s mode set to manual. Skipping "
+ "installation." % (entry.get('name')))
+ return False
+ self.logger.info('Installing Service %s' % entry.get('name'))
+ if entry.get('status') == 'on':
+ # make sure it's running if in supervised mode
+ if entry.get('mode', 'default') == 'supervised' \
+ and entry.get('current_status') == 'off':
+ self.start_service(entry)
+ # make sure it's enabled
+ cmd = '/sbin/rc-update add %s default'
+ rc = self.cmd.run(cmd % entry.get('name'))[0]
+ return (rc == 0)
+
+ elif entry.get('status') == 'off':
+ # make sure it's not running if in supervised mode
+ if entry.get('mode', 'default') == 'supervised' \
+ and entry.get('current_status') == 'on':
+ self.stop_service(entry)
+ # make sure it's disabled
+ cmd = '/sbin/rc-update del %s default'
+ rc = self.cmd.run(cmd % entry.get('name'))[0]
+ return (rc == 0)
+
+ return False
+
+ def FindExtra(self):
+ """Locate extra rc-update services."""
+ cmd = '/bin/rc-status -s | grep started'
+ allsrv = [line.split()[0] for line in self.cmd.run(cmd)[1]]
+ self.logger.debug('Found active services:')
+ self.logger.debug(allsrv)
+ specified = [srv.get('name') for srv in self.getSupportedEntries()]
+ return [Bcfg2.Client.XML.Element('Service',
+ type='rc-update',
+ name=name) \
+ for name in allsrv if name not in specified]
diff --git a/src/lib/Bcfg2/Client/Tools/SMF.py b/src/lib/Bcfg2/Client/Tools/SMF.py
new file mode 100644
index 000000000..f824410ad
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/SMF.py
@@ -0,0 +1,137 @@
+"""SMF support for Bcfg2"""
+
+import glob
+import os
+
+import Bcfg2.Client.Tools
+
+
+class SMF(Bcfg2.Client.Tools.SvcTool):
+ """Support for Solaris SMF Services."""
+ __handles__ = [('Service', 'smf')]
+ __execs__ = ['/usr/sbin/svcadm', '/usr/bin/svcs']
+ name = 'SMF'
+ __req__ = {'Service': ['name', 'status']}
+ __ireq__ = {'Service': ['name', 'status', 'FMRI']}
+
+ def get_svc_command(self, service, action):
+ if service.get('type') == 'lrc':
+ return Bcfg2.Client.Tools.SvcTool.get_svc_command(self,
+ service, action)
+ if action == 'stop':
+ return "/usr/sbin/svcadm disable %s" % (service.get('FMRI'))
+ elif action == 'restart':
+ return "/usr/sbin/svcadm restart %s" % (service.get('FMRI'))
+ elif action == 'start':
+ return "/usr/sbin/svcadm enable %s" % (service.get('FMRI'))
+
+ def GetFMRI(self, entry):
+ """Perform FMRI resolution for service."""
+ if not 'FMRI' in entry.attrib:
+ name = self.cmd.run("/usr/bin/svcs -H -o FMRI %s 2>/dev/null" % \
+ entry.get('name'))[1]
+ if name:
+ entry.set('FMRI', name[0])
+ return True
+ else:
+ self.logger.info('Failed to locate FMRI for service %s' % \
+ entry.get('name'))
+ return False
+ return True
+
+ def VerifyService(self, entry, _):
+ """Verify SMF Service entry."""
+ if not self.GetFMRI(entry):
+ self.logger.error("smf service %s doesn't have FMRI set" % \
+ entry.get('name'))
+ return False
+ if entry.get('FMRI').startswith('lrc'):
+ filename = entry.get('FMRI').split('/')[-1]
+ # this is a legacy service
+ gname = "/etc/rc*.d/%s" % filename
+ files = glob.glob(gname.replace('_', '.'))
+ if files:
+ self.logger.debug("Matched %s with %s" % \
+ (entry.get("FMRI"), ":".join(files)))
+ return entry.get('status') == 'on'
+ else:
+ self.logger.debug("No service matching %s" % \
+ (entry.get("FMRI")))
+ return entry.get('status') == 'off'
+ try:
+ srvdata = self.cmd.run("/usr/bin/svcs -H -o STA %s" % \
+ entry.get('FMRI'))[1][0].split()
+ except IndexError:
+ # Occurs when no lines are returned (service not installed)
+ return False
+
+ entry.set('current_status', srvdata[0])
+ if entry.get('status') == 'on':
+ return srvdata[0] == 'ON'
+ else:
+ return srvdata[0] in ['OFF', 'UN', 'MNT', 'DIS', 'DGD']
+
+ def InstallService(self, entry):
+ """Install SMF Service entry."""
+ # don't take any actions for mode='manual'
+ if entry.get('mode', 'default') == 'manual':
+ self.logger.info("Service %s mode set to manual. Skipping "
+ "installation." % (entry.get('name')))
+ return False
+ self.logger.info("Installing Service %s" % (entry.get('name')))
+ if entry.get('status') == 'off':
+ if entry.get("FMRI").startswith('lrc'):
+ try:
+ loc = entry.get("FMRI")[4:].replace('_', '.')
+ self.logger.debug("Renaming file %s to %s" % \
+ (loc, loc.replace('/S', '/DISABLED.S')))
+ os.rename(loc, loc.replace('/S', '/DISABLED.S'))
+ return True
+ except OSError:
+ self.logger.error("Failed to rename init script %s" % \
+ (loc))
+ return False
+ else:
+ cmdrc = self.cmd.run("/usr/sbin/svcadm disable %s" % \
+ (entry.get('FMRI')))[0]
+ else:
+ if entry.get('FMRI').startswith('lrc'):
+ loc = entry.get("FMRI")[4:].replace('_', '.')
+ try:
+ os.stat(loc.replace('/S', '/Disabled.'))
+ self.logger.debug("Renaming file %s to %s" % \
+ (loc.replace('/S', '/DISABLED.S'), loc))
+ os.rename(loc.replace('/S', '/DISABLED.S'), loc)
+ cmdrc = 0
+ except OSError:
+ self.logger.debug("Failed to rename %s to %s" % \
+ (loc.replace('/S', '/DISABLED.S'), loc))
+ cmdrc = 1
+ else:
+ srvdata = self.cmd.run("/usr/bin/svcs -H -o STA %s" %
+ entry.get('FMRI'))[1] [0].split()
+ if srvdata[0] == 'MNT':
+ cmdarg = 'clear'
+ else:
+ cmdarg = 'enable'
+ cmdrc = self.cmd.run("/usr/sbin/svcadm %s -r %s" % \
+ (cmdarg, entry.get('FMRI')))[0]
+ return cmdrc == 0
+
+ def Remove(self, svcs):
+ """Remove Extra SMF entries."""
+ # Extra service entry removal is nonsensical
+ # Extra service entries should be reflected in config, even if disabled
+ pass
+
+ def FindExtra(self):
+ """Find Extra SMF Services."""
+ allsrv = [name for name, version in \
+ [srvc.split() for srvc in
+ self.cmd.run("/usr/bin/svcs -a -H -o FMRI,STATE")[1]]
+ if version != 'disabled']
+
+ [allsrv.remove(svc.get('FMRI')) for svc in self.getSupportedEntries() \
+ if svc.get("FMRI") in allsrv]
+ return [Bcfg2.Client.XML.Element("Service", type='smf', name=name) \
+ for name in allsrv]
diff --git a/src/lib/Bcfg2/Client/Tools/SYSV.py b/src/lib/Bcfg2/Client/Tools/SYSV.py
new file mode 100644
index 000000000..eb4a13dfb
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/SYSV.py
@@ -0,0 +1,105 @@
+"""This provides bcfg2 support for Solaris SYSV packages."""
+
+import tempfile
+
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+noask = '''
+mail=
+instance=overwrite
+partial=nocheck
+runlevel=nocheck
+idepend=nocheck
+rdepend=nocheck
+space=ask
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+basedir=default
+'''
+
+
+class SYSV(Bcfg2.Client.Tools.PkgTool):
+ """Solaris SYSV package support."""
+ __execs__ = ["/usr/sbin/pkgadd", "/usr/bin/pkginfo"]
+ __handles__ = [('Package', 'sysv')]
+ __req__ = {'Package': ['name', 'version']}
+ __ireq__ = {'Package': ['name', 'url', 'version']}
+ name = 'SYSV'
+ pkgtype = 'sysv'
+ pkgtool = ("/usr/sbin/pkgadd %s -n -d %%s", (('%s %s', ['url', 'name'])))
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ # noaskfile needs to live beyond __init__ otherwise file is removed
+ self.noaskfile = tempfile.NamedTemporaryFile()
+ self.noaskname = self.noaskfile.name
+ try:
+ self.noaskfile.write(noask)
+ # flush admin file contents to disk
+ self.noaskfile.flush()
+ self.pkgtool = (self.pkgtool[0] % ("-a %s" % (self.noaskname)), \
+ self.pkgtool[1])
+ except:
+ self.pkgtool = (self.pkgtool[0] % (""), self.pkgtool[1])
+
+ def RefreshPackages(self):
+ """Refresh memory hashes of packages."""
+ self.installed = {}
+ # Build list of packages
+ lines = self.cmd.run("/usr/bin/pkginfo -x")[1]
+ while lines:
+ # Splitting on whitespace means that packages with spaces in
+ # their version numbers don't work right. Found this with
+ # IBM TSM software with package versions like
+ # "Version 6 Release 1 Level 0.0"
+ # Should probably be done with a regex but this works.
+ version = lines.pop().split(') ')[1]
+ pkg = lines.pop().split()[0]
+ self.installed[pkg] = version
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify Package status for entry."""
+ if not entry.get('version'):
+ self.logger.info("Insufficient information of Package %s; cannot Verify" % entry.get('name'))
+ return False
+
+ desiredVersion = entry.get('version')
+ if desiredVersion == 'any':
+ desiredVersion = self.installed.get(entry.get('name'), desiredVersion)
+
+ cmdrc = self.cmd.run("/usr/bin/pkginfo -q -v \"%s\" %s" % \
+ (desiredVersion, entry.get('name')))[0]
+
+ if cmdrc != 0:
+ if entry.get('name') in self.installed:
+ self.logger.debug("Package %s version incorrect: have %s want %s" \
+ % (entry.get('name'), self.installed[entry.get('name')],
+ desiredVersion))
+ else:
+ self.logger.debug("Package %s not installed" % (entry.get("name")))
+ else:
+ if self.setup['quick'] or entry.attrib.get('verify', 'true') == 'false':
+ return True
+ (vstat, odata) = self.cmd.run("/usr/sbin/pkgchk -n %s" % (entry.get('name')))
+ if vstat == 0:
+ return True
+ else:
+ output = [line for line in odata if line[:5] == 'ERROR']
+ if len([name for name in output if name.split()[-1] not in modlist]):
+ self.logger.debug("Package %s content verification failed" % \
+ (entry.get('name')))
+ else:
+ return True
+ return False
+
+ def RemovePackages(self, packages):
+ """Remove specified Sysv packages."""
+ names = [pkg.get('name') for pkg in packages]
+ self.logger.info("Removing packages: %s" % (names))
+ self.cmd.run("/usr/sbin/pkgrm -a %s -n %s" % \
+ (self.noaskname, names))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/src/lib/Bcfg2/Client/Tools/Systemd.py b/src/lib/Bcfg2/Client/Tools/Systemd.py
new file mode 100644
index 000000000..e3f6a4169
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Systemd.py
@@ -0,0 +1,59 @@
+# This is the bcfg2 support for systemd
+
+"""This is systemd support."""
+
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+class Systemd(Bcfg2.Client.Tools.SvcTool):
+ """Systemd support for Bcfg2."""
+ name = 'Systemd'
+ __execs__ = ['/bin/systemctl']
+ __handles__ = [('Service', 'systemd')]
+ __req__ = {'Service': ['name', 'status']}
+
+ def get_svc_command(self, service, action):
+ return "/bin/systemctl %s %s.service" % (action, service.get('name'))
+
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry."""
+ cmd = "/bin/systemctl status %s.service " % (entry.get('name'))
+ raw = ''.join(self.cmd.run(cmd)[1])
+
+ if raw.find('Loaded: error') >= 0:
+ entry.set('current_status', 'off')
+ status = False
+
+ elif raw.find('Active: active') >= 0:
+ entry.set('current_status', 'on')
+ if entry.get('status') == 'off':
+ status = False
+ else:
+ status = True
+
+ else:
+ entry.set('current_status', 'off')
+ if entry.get('status') == 'on':
+ status = False
+ else:
+ status = True
+
+ return status
+
+ def InstallService(self, entry):
+ """Install Service entry."""
+ # don't take any actions for mode = 'manual'
+ if entry.get('mode', 'default') == 'manual':
+ self.logger.info("Service %s mode set to manual. Skipping "
+ "installation." % (entry.get('name')))
+ return True
+
+ if entry.get('status') == 'on':
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'enable'))[0]
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0]
+
+ else:
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0]
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'disable'))[0]
+
+ return not pstatus
diff --git a/src/lib/Bcfg2/Client/Tools/Upstart.py b/src/lib/Bcfg2/Client/Tools/Upstart.py
new file mode 100644
index 000000000..7afc8edd7
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/Upstart.py
@@ -0,0 +1,93 @@
+"""Upstart support for Bcfg2."""
+
+import glob
+import re
+
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+class Upstart(Bcfg2.Client.Tools.SvcTool):
+ """Upstart service support for Bcfg2."""
+ name = 'Upstart'
+ __execs__ = ['/lib/init/upstart-job',
+ '/sbin/initctl',
+ '/usr/sbin/service']
+ __handles__ = [('Service', 'upstart')]
+ __req__ = {'Service': ['name', 'status']}
+ svcre = re.compile("/etc/init/(?P<name>.*).conf")
+
+ def get_svc_command(self, service, action):
+ return "/usr/sbin/service %s %s" % (service.get('name'), action)
+
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry
+
+ Verifying whether or not the service is enabled can be done
+ at the file level with upstart using the contents of
+ /etc/init/servicename.conf. All we need to do is make sure
+ the service is running when it should be.
+ """
+
+ if entry.get('status') == 'ignore':
+ return True
+
+ if entry.get('parameters'):
+ params = entry.get('parameters')
+ else:
+ params = ''
+
+ try:
+ output = self.cmd.run('/usr/sbin/service %s status %s' % \
+ ( entry.get('name'), params ))[1][0]
+ except IndexError:
+ self.logger.error("Service %s not an Upstart service" % \
+ entry.get('name'))
+ return False
+
+ match = re.compile("%s( \(.*\))? (start|stop)/(running|waiting)" %entry.get('name') ).match( output )
+ if match == None:
+ # service does not exist
+ entry.set('current_status', 'off')
+ status = False
+ elif match.group(3) == 'running':
+ # service is running
+ entry.set('current_status', 'on')
+ if entry.get('status') == 'off':
+ status = False
+ else:
+ status = True
+ else:
+ # service is not running
+ entry.set('current_status', 'off')
+ if entry.get('status') == 'on':
+ status = False
+ else:
+ status = True
+
+ return status
+
+ def InstallService(self, entry):
+ """Install Service for entry."""
+ # don't take any actions for mode='manual'
+ if entry.get('mode', 'default') == 'manual':
+ self.logger.info("Service %s mode set to manual. Skipping "
+ "installation." % (entry.get('name')))
+ return False
+ if entry.get('status') == 'on':
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0]
+ elif entry.get('status') == 'off':
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0]
+ # pstatus is true if command failed
+ return not pstatus
+
+ def FindExtra(self):
+ """Locate extra Upstart services."""
+ specified = [entry.get('name') for entry in self.getSupportedEntries()]
+ extra = []
+ for name in [self.svcre.match(fname).group('name') for fname in
+ glob.glob("/etc/init/*.conf") \
+ if self.svcre.match(fname).group('name') not in specified]:
+ extra.append(name)
+ return [Bcfg2.Client.XML.Element('Service', type='upstart', name=name) \
+ for name in extra]
diff --git a/src/lib/Bcfg2/Client/Tools/VCS.py b/src/lib/Bcfg2/Client/Tools/VCS.py
new file mode 100644
index 000000000..e6081dc1c
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/VCS.py
@@ -0,0 +1,150 @@
+"""VCS support."""
+
+# TODO:
+# * git_write_index
+# * add svn support
+# * integrate properly with reports
+missing = []
+
+import os
+import shutil
+import sys
+# python-dulwich git imports
+try:
+ import dulwich
+ import dulwich.index
+ from dulwich.errors import NotGitRepository
+except:
+ missing.append('git')
+# subversion import
+try:
+ import pysvn
+except:
+ missing.append('svn')
+
+import Bcfg2.Client.Tools
+
+
+class VCS(Bcfg2.Client.Tools.Tool):
+ """VCS support."""
+ name = 'VCS'
+ __handles__ = [('Path', 'vcs')]
+ __req__ = {'Path': ['name',
+ 'type',
+ 'vcstype',
+ 'sourceurl',
+ 'revision']}
+
+ def __init__(self, logger, cfg, setup):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup)
+ self.cfg = cfg
+
+ def git_write_index(self, entry):
+ """Write the git index"""
+ pass
+
+ def Verifygit(self, entry, _):
+ """Verify git repositories"""
+ try:
+ repo = dulwich.repo.Repo(entry.get('name'))
+ except NotGitRepository:
+ self.logger.info("Repository %s does not exist" %
+ entry.get('name'))
+ return False
+ cur_rev = repo.head()
+
+ if cur_rev != entry.get('revision'):
+ self.logger.info("At revision %s need to go to revision %s" %
+ (cur_rev, entry.get('revision')))
+ return False
+
+ return True
+
+ def Installgit(self, entry):
+ """Checkout contents from a git repository"""
+ destname = entry.get('name')
+ if os.path.lexists(destname):
+ # remove incorrect contents
+ try:
+ if os.path.isdir(destname):
+ shutil.rmtree(destname)
+ else:
+ os.remove(destname)
+ except OSError:
+ self.logger.info('Failed to remove %s' % \
+ destname)
+ return False
+
+ destr = dulwich.repo.Repo.init(destname, mkdir=True)
+ cl, host_path = dulwich.client.get_transport_and_path(entry.get('sourceurl'))
+ remote_refs = cl.fetch(host_path,
+ destr,
+ determine_wants=destr.object_store.determine_wants_all,
+ progress=sys.stdout.write)
+ destr.refs['refs/heads/master'] = entry.get('revision')
+ dtree = destr[entry.get('revision')].tree
+ obj_store = destr.object_store
+ for fname, mode, sha in obj_store.iter_tree_contents(dtree):
+ fullpath = os.path.join(destname, fname)
+ try:
+ f = open(os.path.join(destname, fname), 'wb')
+ except IOError:
+ dir = os.path.split(fullpath)[0]
+ os.makedirs(dir)
+ f = open(os.path.join(destname, fname), 'wb')
+ f.write(destr[sha].data)
+ f.close()
+ os.chmod(os.path.join(destname, fname), mode)
+ return True
+ # FIXME: figure out how to write the git index properly
+ #iname = "%s/.git/index" % entry.get('name')
+ #f = open(iname, 'w+')
+ #entries = obj_store[sha].iteritems()
+ #try:
+ # dulwich.index.write_index(f, entries)
+ #finally:
+ # f.close()
+
+ def Verifysvn(self, entry, _):
+ """Verify svn repositories"""
+ client = pysvn.Client()
+ try:
+ cur_rev = str(client.info(entry.get('name')).revision.number)
+ except:
+ self.logger.info("Repository %s does not exist" % entry.get('name'))
+ return False
+
+ if cur_rev != entry.get('revision'):
+ self.logger.info("At revision %s need to go to revision %s" %
+ (cur_rev, entry.get('revision')))
+ return False
+
+ return True
+
+ def Installsvn(self, entry):
+ """Checkout contents from a svn repository"""
+ try:
+ client = pysvn.Client.update(entry.get('name'), recurse=True)
+ except:
+ self.logger.error("Failed to update repository", exc_info=1)
+ return False
+
+ return True
+
+ def VerifyPath(self, entry, _):
+ vcs = entry.get('vcstype')
+ if vcs in missing:
+ self.logger.error("Missing %s python libraries. Cannot verify" %
+ vcs)
+ return False
+ ret = getattr(self, 'Verify%s' % vcs)
+ return ret(entry, _)
+
+ def InstallPath(self, entry):
+ vcs = entry.get('vcstype')
+ if vcs in missing:
+ self.logger.error("Missing %s python libraries. "
+ "Unable to install" % vcs)
+ return False
+ ret = getattr(self, 'Install%s' % vcs)
+ return ret(entry)
diff --git a/src/lib/Bcfg2/Client/Tools/YUM24.py b/src/lib/Bcfg2/Client/Tools/YUM24.py
new file mode 100644
index 000000000..4e488b9da
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/YUM24.py
@@ -0,0 +1,422 @@
+"""This provides bcfg2 support for yum."""
+
+import copy
+import os.path
+import sys
+import yum
+import Bcfg2.Client.XML
+import Bcfg2.Client.Tools.RPMng
+# Compatibility import
+from Bcfg2.Bcfg2Py3k import ConfigParser
+
+YAD = True
+CP = ConfigParser.ConfigParser()
+try:
+ if '-C' in sys.argv:
+ CP.read([sys.argv[sys.argv.index('-C') + 1]])
+ else:
+ CP.read(['/etc/bcfg2.conf'])
+ if CP.get('YUMng', 'autodep').lower() == 'false':
+ YAD = False
+except:
+ pass
+
+if not hasattr(Bcfg2.Client.Tools.RPMng, 'RPMng'):
+ raise ImportError
+
+
+def build_yname(pkgname, inst):
+ """Build yum appropriate package name."""
+ ypname = pkgname
+ if inst.get('version') != 'any':
+ ypname += '-'
+ if inst.get('epoch', False):
+ ypname += "%s:" % inst.get('epoch')
+ if inst.get('version', False) and inst.get('version') != 'any':
+ ypname += "%s" % (inst.get('version'))
+ if inst.get('release', False) and inst.get('release') != 'any':
+ ypname += "-%s" % (inst.get('release'))
+ if inst.get('arch', False) and inst.get('arch') != 'any':
+ ypname += ".%s" % (inst.get('arch'))
+ return ypname
+
+
+class YUM24(Bcfg2.Client.Tools.RPMng.RPMng):
+ """Support for Yum packages."""
+ pkgtype = 'yum'
+
+ name = 'YUM24'
+ __execs__ = ['/usr/bin/yum', '/var/lib/rpm']
+ __handles__ = [('Package', 'yum'),
+ ('Package', 'rpm'),
+ ('Path', 'ignore')]
+
+ __req__ = {'Package': ['name', 'version']}
+ __ireq__ = {'Package': ['name']}
+ #__ireq__ = {'Package': ['name', 'version']}
+
+ __new_req__ = {'Package': ['name'],
+ 'Instance': ['version', 'release', 'arch']}
+ __new_ireq__ = {'Package': ['name'], \
+ 'Instance': []}
+ #__new_ireq__ = {'Package': ['name', 'uri'], \
+ # 'Instance': ['simplefile', 'version', 'release', 'arch']}
+
+ __gpg_req__ = {'Package': ['name', 'version']}
+ __gpg_ireq__ = {'Package': ['name', 'version']}
+
+ __new_gpg_req__ = {'Package': ['name'],
+ 'Instance': ['version', 'release']}
+ __new_gpg_ireq__ = {'Package': ['name'],
+ 'Instance': ['version', 'release']}
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.RPMng.RPMng.__init__(self, logger, setup, config)
+ self.__important__ = self.__important__ + \
+ [entry.get('name') for struct in config \
+ for entry in struct \
+ if entry.tag in ['Path', 'ConfigFile'] and \
+ (entry.get('name').startswith('/etc/yum.d') \
+ or entry.get('name').startswith('/etc/yum.repos.d')) \
+ or entry.get('name') == '/etc/yum.conf']
+ self.yum_avail = dict()
+ self.yum_installed = dict()
+ self.yb = yum.YumBase()
+ self.yb.doConfigSetup()
+ self.yb.doTsSetup()
+ self.yb.doRpmDBSetup()
+ yup = self.yb.doPackageLists(pkgnarrow='updates')
+ if hasattr(self.yb.rpmdb, 'pkglist'):
+ yinst = self.yb.rpmdb.pkglist
+ else:
+ yinst = self.yb.rpmdb.getPkgList()
+ for dest, source in [(self.yum_avail, yup.updates),
+ (self.yum_installed, yinst)]:
+ for pkg in source:
+ if dest is self.yum_avail:
+ pname = pkg.name
+ data = {pkg.arch: (pkg.epoch, pkg.version, pkg.release)}
+ else:
+ pname = pkg[0]
+ if pkg[1] is None:
+ a = 'noarch'
+ else:
+ a = pkg[1]
+ if pkg[2] is None:
+ e = '0'
+ else:
+ e = pkg[2]
+ data = {a: (e, pkg[3], pkg[4])}
+ if pname in dest:
+ dest[pname].update(data)
+ else:
+ dest[pname] = dict(data)
+
+ def VerifyPackage(self, entry, modlist):
+ pinned_version = None
+ if entry.get('version', False) == 'auto':
+ # old style entry; synthesize Instances from current installed
+ if entry.get('name') not in self.yum_installed and \
+ entry.get('name') not in self.yum_avail:
+ # new entry; fall back to default
+ entry.set('version', 'any')
+ else:
+ data = copy.copy(self.yum_installed[entry.get('name')])
+ if entry.get('name') in self.yum_avail:
+ # installed but out of date
+ data.update(self.yum_avail[entry.get('name')])
+ for (arch, (epoch, vers, rel)) in list(data.items()):
+ x = Bcfg2.Client.XML.SubElement(entry, "Instance",
+ name=entry.get('name'),
+ version=vers, arch=arch,
+ release=rel, epoch=epoch)
+ if 'verify_flags' in entry.attrib:
+ x.set('verify_flags', entry.get('verify_flags'))
+ if 'verify' in entry.attrib:
+ x.set('verify', entry.get('verify'))
+
+ if entry.get('type', False) == 'yum':
+ # Check for virtual provides or packages. If we don't have
+ # this package use Yum to resolve it to a real package name
+ knownPkgs = list(self.yum_installed.keys()) + list(self.yum_avail.keys())
+ if entry.get('name') not in knownPkgs:
+ # If the package name matches something installed
+ # or available the that's the correct package.
+ try:
+ pkgDict = dict([(i.name, i) for i in \
+ self.yb.returnPackagesByDep(entry.get('name'))])
+ except yum.Errors.YumBaseError:
+ e = sys.exc_info()[1]
+ self.logger.error('Yum Error Depsolving for %s: %s' % \
+ (entry.get('name'), str(e)))
+ pkgDict = {}
+
+ if len(pkgDict) > 1:
+ # What do we do with multiple packages?
+ s = "YUMng: returnPackagesByDep(%s) returned many packages"
+ self.logger.info(s % entry.get('name'))
+ s = "YUMng: matching packages: %s"
+ self.logger.info(s % str(list(pkgDict.keys())))
+ pkgs = set(pkgDict.keys()) & set(self.yum_installed.keys())
+ if len(pkgs) > 0:
+ # Virtual packages matches an installed real package
+ pkg = pkgDict[pkgs.pop()]
+ s = "YUMng: chosing: %s" % pkg.name
+ self.logger.info(s)
+ else:
+ # What's the right package? This will fail verify
+ # and Yum should Do The Right Thing on package install
+ pkg = None
+ elif len(pkgDict) == 1:
+ pkg = list(pkgDict.values())[0]
+ else: # len(pkgDict) == 0
+ s = "YUMng: returnPackagesByDep(%s) returned no results"
+ self.logger.info(s % entry.get('name'))
+ pkg = None
+
+ if pkg is not None:
+ s = "YUMng: remapping virtual package %s to %s"
+ self.logger.info(s % (entry.get('name'), pkg.name))
+ entry.set('name', pkg.name)
+
+ return Bcfg2.Client.Tools.RPMng.RPMng.VerifyPackage(self, entry,
+ modlist)
+
+ def Install(self, packages, states):
+ """
+ Try and fix everything that RPMng.VerifyPackages() found wrong for
+ each Package Entry. This can result in individual RPMs being
+ installed (for the first time), deleted, downgraded
+ or upgraded.
+
+ NOTE: YUM can not reinstall a package that it thinks is already
+ installed.
+
+ packages is a list of Package Elements that has
+ states[<Package Element>] == False
+
+ The following effects occur:
+ - states{} is conditionally updated for each package.
+ - self.installed{} is rebuilt, possibly multiple times.
+ - self.instance_status{} is conditionally updated for each instance
+ of a package.
+ - Each package will be added to self.modified[] if its states{}
+ entry is set to True.
+
+ """
+ self.logger.info('Running YUMng.Install()')
+
+ install_pkgs = []
+ gpg_keys = []
+ upgrade_pkgs = []
+
+ # Remove extra instances.
+ # Can not reverify because we don't have a package entry.
+ if len(self.extra_instances) > 0:
+ if (self.setup.get('remove') == 'all' or \
+ self.setup.get('remove') == 'packages'):
+ self.RemovePackages(self.extra_instances)
+ else:
+ self.logger.info("The following extra package instances will be removed by the '-r' option:")
+ for pkg in self.extra_instances:
+ for inst in pkg:
+ self.logger.info(" %s %s" % \
+ ((pkg.get('name'), self.str_evra(inst))))
+
+ # Figure out which instances of the packages actually need something
+ # doing to them and place in the appropriate work 'queue'.
+ for pkg in packages:
+ insts = [pinst for pinst in pkg \
+ if pinst.tag in ['Instance', 'Package']]
+ if insts:
+ for inst in insts:
+ if self.FixInstance(inst, self.instance_status[inst]):
+ if self.instance_status[inst].get('installed', False) \
+ == False:
+ if pkg.get('name') == 'gpg-pubkey':
+ gpg_keys.append(inst)
+ else:
+ install_pkgs.append(inst)
+ elif self.instance_status[inst].get('version_fail', \
+ False) == True:
+ upgrade_pkgs.append(inst)
+ else:
+ install_pkgs.append(pkg)
+
+ # Install GPG keys.
+ # Alternatively specify the required keys using 'gpgkey' in the
+ # repository definition in yum.conf. YUM will install the keys
+ # automatically.
+ if len(gpg_keys) > 0:
+ for inst in gpg_keys:
+ self.logger.info("Installing GPG keys.")
+ if inst.get('simplefile') is None:
+ self.logger.error("GPG key has no simplefile attribute")
+ continue
+ key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ cmdrc, output = self.cmd.run("rpm --import %s" % key_arg)
+ if cmdrc != 0:
+ self.logger.debug("Unable to install %s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+ else:
+ self.logger.debug("Installed %s-%s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ inst.get('version'), inst.get('release')))
+ self.RefreshPackages()
+ self.gpg_keyids = self.getinstalledgpg()
+ pkg = self.instance_status[gpg_keys[0]].get('pkg')
+ states[pkg] = self.VerifyPackage(pkg, [])
+
+ # Install packages.
+ if len(install_pkgs) > 0:
+ self.logger.info("Attempting to install packages")
+
+ if YAD:
+ pkgtool = "/usr/bin/yum -d0 -y install %s"
+ else:
+ pkgtool = "/usr/bin/yum -d0 install %s"
+
+ install_args = []
+ for inst in install_pkgs:
+ pkg_arg = self.instance_status[inst].get('pkg').get('name')
+ install_args.append(build_yname(pkg_arg, inst))
+
+ cmdrc, output = self.cmd.run(pkgtool % " ".join(install_args))
+ if cmdrc == 0:
+ # The yum command succeeded. All packages installed.
+ self.logger.info("Single Pass for Install Succeeded")
+ self.RefreshPackages()
+ else:
+ # The yum command failed. No packages installed.
+ # Try installing instances individually.
+ self.logger.error("Single Pass Install of Packages Failed")
+ installed_instances = []
+ for inst in install_pkgs:
+ pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
+
+ cmdrc, output = self.cmd.run(pkgtool % pkg_arg)
+ if cmdrc == 0:
+ installed_instances.append(inst)
+ else:
+ self.logger.debug("%s %s would not install." % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+ self.RefreshPackages()
+
+ # Fix upgradeable packages.
+ if len(upgrade_pkgs) > 0:
+ self.logger.info("Attempting to upgrade packages")
+
+ if YAD:
+ pkgtool = "/usr/bin/yum -d0 -y update %s"
+ else:
+ pkgtool = "/usr/bin/yum -d0 update %s"
+
+ upgrade_args = []
+ for inst in upgrade_pkgs:
+ pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
+ upgrade_args.append(pkg_arg)
+
+ cmdrc, output = self.cmd.run(pkgtool % " ".join(upgrade_args))
+ if cmdrc == 0:
+ # The yum command succeeded. All packages installed.
+ self.logger.info("Single Pass for Install Succeeded")
+ self.RefreshPackages()
+ else:
+ # The yum command failed. No packages installed.
+ # Try installing instances individually.
+ self.logger.error("Single Pass Install of Packages Failed")
+ installed_instances = []
+ for inst in upgrade_pkgs:
+ pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
+ cmdrc, output = self.cmd.run(pkgtool % pkg_arg)
+ if cmdrc == 0:
+ installed_instances.append(inst)
+ else:
+ self.logger.debug("%s %s would not install." % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+
+ self.RefreshPackages()
+
+ if not self.setup['kevlar']:
+ for pkg_entry in [p for p in packages if self.canVerify(p)]:
+ self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name')))
+ states[pkg_entry] = self.VerifyPackage(pkg_entry, \
+ self.modlists.get(pkg_entry, []))
+
+ for entry in [ent for ent in packages if states[ent]]:
+ self.modified.append(entry)
+
+ def RemovePackages(self, packages):
+ """
+ Remove specified entries.
+
+ packages is a list of Package Entries with Instances generated
+ by FindExtraPackages().
+ """
+ self.logger.debug('Running YUMng.RemovePackages()')
+
+ if YAD:
+ pkgtool = "/usr/bin/yum -d0 -y erase %s"
+ else:
+ pkgtool = "/usr/bin/yum -d0 erase %s"
+
+ erase_args = []
+ for pkg in packages:
+ for inst in pkg:
+ if pkg.get('name') != 'gpg-pubkey':
+ pkg_arg = pkg.get('name') + '-'
+ if inst.get('epoch', False):
+ pkg_arg = pkg_arg + inst.get('epoch') + ':'
+ pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release')
+ if inst.get('arch', False):
+ pkg_arg = pkg_arg + '.' + inst.get('arch')
+ erase_args.append(pkg_arg)
+ else:
+ pkgspec = {'name': pkg.get('name'),
+ 'version': inst.get('version'),
+ 'release': inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
+ % (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.info(" This package will be deleted in a future version of the RPMng driver.")
+
+ cmdrc, output = self.cmd.run(pkgtool % " ".join(erase_args))
+ if cmdrc == 0:
+ self.modified += packages
+ for pkg in erase_args:
+ self.logger.info("Deleted %s" % (pkg))
+ else:
+ self.logger.info("Bulk erase failed with errors:")
+ self.logger.debug("Erase results = %s" % output)
+ self.logger.info("Attempting individual erase for each package.")
+ for pkg in packages:
+ pkg_modified = False
+ for inst in pkg:
+ if pkg.get('name') != 'gpg-pubkey':
+ pkg_arg = pkg.get('name') + '-'
+ if 'epoch' in inst.attrib:
+ pkg_arg = pkg_arg + inst.get('epoch') + ':'
+ pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release')
+ if 'arch' in inst.attrib:
+ pkg_arg = pkg_arg + '.' + inst.get('arch')
+ else:
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
+ % (pkg.get('name'), self.str_evra(pkg)))
+ self.logger.info(" This package will be deleted in a future version of the RPMng driver.")
+ continue
+
+ cmdrc, output = self.cmd.run(self.pkgtool % pkg_arg)
+ if cmdrc == 0:
+ pkg_modified = True
+ self.logger.info("Deleted %s" % pkg_arg)
+ else:
+ self.logger.error("unable to delete %s" % pkg_arg)
+ self.logger.debug("Failure = %s" % output)
+ if pkg_modified == True:
+ self.modified.append(pkg)
+
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/src/lib/Bcfg2/Client/Tools/YUMng.py b/src/lib/Bcfg2/Client/Tools/YUMng.py
new file mode 100644
index 000000000..154676764
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/YUMng.py
@@ -0,0 +1,936 @@
+"""This provides bcfg2 support for yum."""
+
+import copy
+import os.path
+import sys
+import yum
+import yum.packages
+import yum.rpmtrans
+import yum.callbacks
+import yum.Errors
+import yum.misc
+import rpmUtils.arch
+import Bcfg2.Client.XML
+import Bcfg2.Client.Tools
+# Compatibility import
+from Bcfg2.Bcfg2Py3k import ConfigParser
+
+
+def build_yname(pkgname, inst):
+ """Build yum appropriate package name."""
+ d = {}
+ if isinstance(inst, yum.packages.PackageObject):
+ for i in ['name', 'epoch', 'version', 'release', 'arch']:
+ d[i] = getattr(inst, i)
+ else:
+ d['name'] = pkgname
+ if inst.get('version') != 'any':
+ d['version'] = inst.get('version')
+ if inst.get('epoch', False):
+ d['epoch'] = inst.get('epoch')
+ if inst.get('release', False) and inst.get('release') != 'any':
+ d['release'] = inst.get('release')
+ if inst.get('arch', False) and inst.get('arch') != 'any':
+ d['arch'] = inst.get('arch')
+ return d
+
+
+def short_yname(nevra):
+ d = nevra.copy()
+ if 'version' in d:
+ d['ver'] = d['version']
+ del d['version']
+ if 'release' in d:
+ d['rel'] = d['release']
+ del d['release']
+ return d
+
+
+def nevraString(p):
+ if isinstance(p, yum.packages.PackageObject):
+ return str(p)
+ else:
+ ret = ""
+ for i, j in [('epoch', '%s:'), ('name', '%s'), ('version', '-%s'),
+ ('release', '-%s'), ('arch', '.%s')]:
+ if i in p:
+ ret = "%s%s" % (ret, j % p[i])
+ return ret
+
+
+class Parser(ConfigParser.ConfigParser):
+
+ def get(self, section, option, default):
+ """
+ Override ConfigParser.get: If the request option is not in the
+ config file then return the value of default rather than raise
+ an exception. We still raise exceptions on missing sections.
+ """
+ try:
+ return ConfigParser.ConfigParser.get(self, section, option)
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ return default
+
+
+class RPMDisplay(yum.rpmtrans.RPMBaseCallback):
+ """We subclass the default RPM transaction callback so that we
+ can control Yum's verbosity and pipe it through the right logger."""
+
+ def __init__(self, logger):
+ yum.rpmtrans.RPMBaseCallback.__init__(self)
+ self.logger = logger
+ self.state = None
+ self.package = None
+
+ def event(self, package, action, te_current, te_total,
+ ts_current, ts_total):
+ """
+ @param package: A yum package object or simple string of a package name
+ @param action: A yum.constant transaction set state or in the obscure
+ rpm repackage case it could be the string 'repackaging'
+ @param te_current: Current number of bytes processed in the transaction
+ element being processed
+ @param te_total: Total number of bytes in the transaction element being
+ processed
+ @param ts_current: number of processes completed in whole transaction
+ @param ts_total: total number of processes in the transaction.
+ """
+
+ if self.package != str(package) or action != self.state:
+ msg = "%s: %s" % (self.action[action], package)
+ self.logger.info(msg)
+ self.state = action
+ self.package = str(package)
+
+ def scriptout(self, package, msgs):
+ """Handle output from package scripts."""
+
+ if msgs:
+ msg = "%s: %s" % (package, msgs)
+ self.logger.debug(msg)
+
+ def errorlog(self, msg):
+ """Deal with error reporting."""
+ self.logger.error(msg)
+
+
+class YumDisplay(yum.callbacks.ProcessTransBaseCallback):
+ """Class to handle display of what step we are in the Yum transaction
+ such as downloading packages, etc."""
+
+ def __init__(self, logger):
+ self.logger = logger
+
+
+class YUMng(Bcfg2.Client.Tools.PkgTool):
+ """Support for Yum packages."""
+ pkgtype = 'yum'
+
+ name = 'YUMng'
+ __execs__ = []
+ __handles__ = [('Package', 'yum'),
+ ('Package', 'rpm'),
+ ('Path', 'ignore')]
+
+ __req__ = {'Package': ['name'],
+ 'Path': ['type']}
+ __ireq__ = {'Package': ['name']}
+
+ conflicts = ['YUM24', 'RPMng']
+
+ def __init__(self, logger, setup, config):
+ self._loadYumBase(setup=setup, logger=logger)
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ self.ignores = [entry.get('name') for struct in config \
+ for entry in struct \
+ if entry.tag == 'Path' and \
+ entry.get('type') == 'ignore']
+ self.instance_status = {}
+ self.extra_instances = []
+ self.modlists = {}
+ self._loadConfig()
+ self.__important__ = self.__important__ + \
+ [entry.get('name') for struct in config \
+ for entry in struct \
+ if entry.tag == 'Path' and \
+ (entry.get('name').startswith('/etc/yum.d') \
+ or entry.get('name').startswith('/etc/yum.repos.d')) \
+ or entry.get('name') == '/etc/yum.conf']
+ self.yum_avail = dict()
+ self.yum_installed = dict()
+
+ yup = self.yb.doPackageLists(pkgnarrow='updates')
+ if hasattr(self.yb.rpmdb, 'pkglist'):
+ yinst = self.yb.rpmdb.pkglist
+ else:
+ yinst = self.yb.rpmdb.getPkgList()
+ for dest, source in [(self.yum_avail, yup.updates),
+ (self.yum_installed, yinst)]:
+ for pkg in source:
+ if dest is self.yum_avail:
+ pname = pkg.name
+ data = [(pkg.arch, (pkg.epoch, pkg.version, pkg.release))]
+ else:
+ pname = pkg[0]
+ data = [(pkg[1], (pkg[2], pkg[3], pkg[4]))]
+ if pname in dest:
+ dest[pname].update(data)
+ else:
+ dest[pname] = dict(data)
+
+ def _loadYumBase(self, setup=None, logger=None):
+ ''' this may be called before PkgTool.__init__() is called on
+ this object (when the YUMng object is first instantiated;
+ PkgTool.__init__() calls RefreshPackages(), which requires a
+ YumBase object already exist), or after __init__() has
+ completed, when we reload the yum config before installing
+ packages. Consequently, we support both methods by allowing
+ setup and logger, the only object properties we use in this
+ function, to be passed as keyword arguments or to be omitted
+ and drawn from the object itself.'''
+ self.yb = yum.YumBase()
+
+ if setup is None:
+ setup = self.setup
+ if logger is None:
+ logger = self.logger
+
+ if setup['debug']:
+ debuglevel = 3
+ elif setup['verbose']:
+ debuglevel = 2
+ else:
+ debuglevel = 0
+
+ try:
+ self.yb.preconf.debuglevel = debuglevel
+ self.yb._getConfig()
+ except AttributeError:
+ self.yb._getConfig(self.yb.conf.config_file_path,
+ debuglevel=debuglevel)
+
+ try:
+ self.yb.doConfigSetup()
+ self.yb.doTsSetup()
+ self.yb.doRpmDBSetup()
+ except yum.Errors.RepoError:
+ err = sys.exc_info()[1]
+ logger.error("YUMng Repository error: %s" % err)
+ raise Bcfg2.Client.Tools.toolInstantiationError
+ except Exception:
+ err = sys.exc_info()[1]
+ logger.error("YUMng error: %s" % err)
+ raise Bcfg2.Client.Tools.toolInstantiationError
+
+ def _loadConfig(self):
+ # Process the YUMng section from the config file.
+ CP = Parser()
+ CP.read(self.setup.get('setup'))
+ truth = ['true', 'yes', '1']
+
+ # These are all boolean flags, either we do stuff or we don't
+ self.pkg_checks = CP.get(self.name, "pkg_checks", "true").lower() \
+ in truth
+ self.pkg_verify = CP.get(self.name, "pkg_verify", "true").lower() \
+ in truth
+ self.doInstall = CP.get(self.name, "installed_action",
+ "install").lower() == "install"
+ self.doUpgrade = CP.get(self.name,
+ "version_fail_action", "upgrade").lower() == "upgrade"
+ self.doReinst = CP.get(self.name, "verify_fail_action",
+ "reinstall").lower() == "reinstall"
+ self.verifyFlags = CP.get(self.name, "verify_flags",
+ "").lower().replace(' ', ',')
+
+ self.installOnlyPkgs = self.yb.conf.installonlypkgs
+ if 'gpg-pubkey' not in self.installOnlyPkgs:
+ self.installOnlyPkgs.append('gpg-pubkey')
+
+ self.logger.debug("YUMng: Install missing: %s" \
+ % self.doInstall)
+ self.logger.debug("YUMng: pkg_checks: %s" % self.pkg_checks)
+ self.logger.debug("YUMng: pkg_verify: %s" % self.pkg_verify)
+ self.logger.debug("YUMng: Upgrade on version fail: %s" \
+ % self.doUpgrade)
+ self.logger.debug("YUMng: Reinstall on verify fail: %s" \
+ % self.doReinst)
+ self.logger.debug("YUMng: installOnlyPkgs: %s" \
+ % str(self.installOnlyPkgs))
+ self.logger.debug("YUMng: verify_flags: %s" % self.verifyFlags)
+
+ def _fixAutoVersion(self, entry):
+ # old style entry; synthesize Instances from current installed
+ if entry.get('name') not in self.yum_installed and \
+ entry.get('name') not in self.yum_avail:
+ # new entry; fall back to default
+ entry.set('version', 'any')
+ else:
+ data = copy.copy(self.yum_installed[entry.get('name')])
+ if entry.get('name') in self.yum_avail:
+ # installed but out of date
+ data.update(self.yum_avail[entry.get('name')])
+ for (arch, (epoch, vers, rel)) in list(data.items()):
+ x = Bcfg2.Client.XML.SubElement(entry, "Instance",
+ name=entry.get('name'),
+ version=vers, arch=arch,
+ release=rel, epoch=epoch)
+ if 'verify_flags' in entry.attrib:
+ x.set('verify_flags', entry.get('verify_flags'))
+ if 'verify' in entry.attrib:
+ x.set('verify', entry.get('verify'))
+
+ def _buildInstances(self, entry):
+ instances = [inst for inst in entry \
+ if inst.tag == 'Instance' or inst.tag == 'Package']
+
+ # XXX: Uniquify instances. Cases where duplicates are returned.
+ # However, the elements aren't comparable.
+
+ if instances == []:
+ # We have an old style no Instance entry. Convert it to new style.
+ instance = Bcfg2.Client.XML.SubElement(entry, 'Package')
+ for attrib in list(entry.attrib.keys()):
+ instance.attrib[attrib] = entry.attrib[attrib]
+ instances = [instance]
+
+ return instances
+
+ def _getGPGKeysAsPackages(self):
+ """Return a list of the GPG RPM signing keys installed on the
+ system as a list of Package Objects."""
+
+ # XXX GPG keys existing in the RPMDB have numbered days
+ # and newer Yum versions will not return information about them
+ if hasattr(self.yb.rpmdb, 'returnGPGPubkeyPackages'):
+ return self.yb.rpmdb.returnGPGPubkeyPackages()
+ return self.yb.rpmdb.searchNevra(name='gpg-pubkey')
+
+ def _verifyHelper(self, po):
+ # This code primarly deals with a yum bug where the PO.verify()
+ # method does not properly take into count multilib sharing of files.
+ # Neither does RPM proper, really....it just ignores the problem.
+ def verify(p):
+ # disabling file checksums is a new feature yum 3.2.17-ish
+ try:
+ vResult = p.verify(fast=self.setup.get('quick', False))
+ except TypeError:
+ # Older Yum API
+ vResult = p.verify()
+ return vResult
+
+ key = (po.name, po.epoch, po.version, po.release, po.arch)
+ if key in self.verifyCache:
+ results = self.verifyCache[key]
+ else:
+ results = verify(po)
+ self.verifyCache[key] = results
+ if not rpmUtils.arch.isMultiLibArch():
+ return results
+
+ # Okay deal with a buggy yum multilib and verify
+ packages = self.yb.rpmdb.searchNevra(name=po.name, epoch=po.epoch,
+ ver=po.version, rel=po.release) # find all arches of pkg
+ if len(packages) == 1:
+ return results # No mathcing multilib packages
+
+ files = set(po.returnFileEntries()) # Will be the list of common fns
+ common = {}
+ for p in packages:
+ if p != po:
+ files = files & set(p.returnFileEntries())
+ for p in packages:
+ k = (p.name, p.epoch, p.version, p.release, p.arch)
+ self.logger.debug("Multilib Verify: comparing %s to %s" \
+ % (po, p))
+ if k in self.verifyCache:
+ v = self.verifyCache[k]
+ else:
+ v = verify(p)
+ self.verifyCache[k] = v
+
+ for fn, probs in list(v.items()):
+ # file problems must exist in ALL multilib packages to be real
+ if fn in files:
+ common[fn] = common.get(fn, 0) + 1
+
+ flag = len(packages) - 1
+ for fn, i in list(common.items()):
+ if i == flag:
+ # this fn had verify problems in all but one of the multilib
+ # packages. That means its correct in the package that's
+ # "on top." Therefore, this is a fake verify problem.
+ if fn in results:
+ del results[fn]
+
+ return results
+
+ def RefreshPackages(self):
+ """
+ Creates self.installed{} which is a dict of installed packages.
+
+ The dict items are lists of nevra dicts. This loosely matches the
+ config from the server and what rpmtools uses to specify pacakges.
+
+ e.g.
+
+ self.installed['foo'] = [ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'i386'},
+ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'x86_64'} ]
+ """
+
+ self.installed = {}
+ packages = self._getGPGKeysAsPackages() + \
+ self.yb.rpmdb.returnPackages()
+ for po in packages:
+ d = {}
+ for i in ['name', 'epoch', 'version', 'release', 'arch']:
+ if i == 'arch' and getattr(po, i) is None:
+ d[i] = 'noarch'
+ elif i == 'epoch' and getattr(po, i) is None:
+ d[i] = '0'
+ else:
+ d[i] = getattr(po, i)
+ self.installed.setdefault(po.name, []).append(d)
+
+ def VerifyPackage(self, entry, modlist, pinned_version=None):
+ """
+ Verify Package status for entry.
+ Performs the following:
+ - Checks for the presence of required Package Instances.
+ - Compares the evra 'version' info against self.installed{}.
+ - RPM level package verify (rpm --verify).
+ - Checks for the presence of unrequired package instances.
+
+ Produces the following dict and list for YUMng.Install() to use:
+ For installs/upgrades/fixes of required instances:
+ instance_status = { <Instance Element Object>:
+ { 'installed': True|False,
+ 'version_fail': True|False,
+ 'verify_fail': True|False,
+ 'pkg': <Package Element Object>,
+ 'modlist': [ <filename>, ... ],
+ 'verify' : [ <rpm --verify results> ]
+ }, ......
+ }
+
+ For deletions of unrequired instances:
+ extra_instances = [ <Package Element Object>, ..... ]
+
+ Constructs the text prompts for interactive mode.
+ """
+
+ if entry.get('version', False) == 'auto':
+ self._fixAutoVersion(entry)
+
+ self.logger.debug("Verifying package instances for %s" \
+ % entry.get('name'))
+
+ self.verifyCache = {} # Used for checking multilib packages
+ self.modlists[entry] = modlist
+ instances = self._buildInstances(entry)
+ packageCache = []
+ package_fail = False
+ qtext_versions = []
+ virtPkg = False
+ pkg_checks = self.pkg_checks and \
+ entry.get('pkg_checks', 'true').lower() == 'true'
+ pkg_verify = self.pkg_verify and \
+ entry.get('pkg_verify', 'true').lower() == 'true'
+
+ if entry.get('name') == 'gpg-pubkey':
+ POs = self._getGPGKeysAsPackages()
+ pkg_verify = False # No files here to verify
+ else:
+ POs = self.yb.rpmdb.searchNevra(name=entry.get('name'))
+ if len(POs) == 0:
+ # Some sort of virtual capability? Try to resolve it
+ POs = self.yb.rpmdb.searchProvides(entry.get('name'))
+ if len(POs) > 0:
+ virtPkg = True
+ self.logger.info("%s appears to be provided by:" \
+ % entry.get('name'))
+ for p in POs:
+ self.logger.info(" %s" % p)
+
+ for inst in instances:
+ nevra = build_yname(entry.get('name'), inst)
+ snevra = short_yname(nevra)
+ if nevra in packageCache:
+ continue # Ignore duplicate instances
+ else:
+ packageCache.append(nevra)
+
+ self.logger.debug("Verifying: %s" % nevraString(nevra))
+
+ # Set some defaults here
+ stat = self.instance_status.setdefault(inst, {})
+ stat['installed'] = True
+ stat['version_fail'] = False
+ stat['verify'] = {}
+ stat['verify_fail'] = False
+ stat['pkg'] = entry
+ stat['modlist'] = modlist
+ verify_flags = inst.get('verify_flags', self.verifyFlags)
+ verify_flags = verify_flags.lower().replace(' ', ',').split(',')
+
+ if 'arch' in nevra:
+ # If arch is specified use it to select the package
+ _POs = [ p for p in POs if p.arch == nevra['arch'] ]
+ else:
+ _POs = POs
+ if len(_POs) == 0:
+ # Package (name, arch) not installed
+ self.logger.debug(" %s is not installed" % nevraString(nevra))
+ stat['installed'] = False
+ package_fail = True
+ qtext_versions.append("I(%s)" % nevra)
+ continue
+
+ if not pkg_checks:
+ continue
+
+ # Check EVR
+ if virtPkg:
+ self.logger.debug(" Not checking version for virtual package")
+ _POs = [po for po in POs] # Make a copy
+ elif entry.get('name') == 'gpg-pubkey':
+ if 'version' not in nevra:
+ m = "Skipping verify: gpg-pubkey without an RPM version."
+ self.logger.warning(m)
+ continue
+ if 'release' not in nevra:
+ m = "Skipping verify: gpg-pubkey without an RPM release."
+ self.logger.warning(m)
+ continue
+ _POs = [p for p in POs if p.version == nevra['version'] \
+ and p.release == nevra['release']]
+ else:
+ _POs = self.yb.rpmdb.searchNevra(**snevra)
+ if len(_POs) == 0:
+ package_fail = True
+ stat['version_fail'] = True
+ # Just chose the first pkg for the error message
+ self.logger.info(" %s: Wrong version installed. "
+ "Want %s, but have %s" % (entry.get("name"),
+ nevraString(nevra),
+ nevraString(POs[0])))
+ qtext_versions.append("U(%s)" % str(POs[0]))
+ continue
+
+ if self.setup.get('quick', False):
+ # Passed -q on the command line
+ continue
+ if not (pkg_verify and \
+ inst.get('pkg_verify', 'true').lower() == 'true'):
+ continue
+
+ # XXX: We ignore GPG sig checking the package as it
+ # has nothing to do with the individual file hash/size/etc.
+ # GPG checking the package only eaxmines some header/rpmdb
+ # wacky-ness, and will not properly detect a compromised rpmdb.
+ # Yum's verify routine does not support it for that reaosn.
+
+ if len(_POs) > 1:
+ self.logger.debug(" Verify Instance found many packages:")
+ for po in _POs:
+ self.logger.debug(" %s" % str(po))
+
+ try:
+ vResult = self._verifyHelper(_POs[0])
+ except Exception:
+ e = sys.exc_info()[1]
+ # Unknown Yum exception
+ self.logger.warning(" Verify Exception: %s" % str(e))
+ package_fail = True
+ continue
+
+ # Now take out the Yum specific objects / modlists / unproblmes
+ ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \
+ [ig.get('name') for ig in inst.findall('Ignore')] + \
+ self.ignores
+ for fn, probs in list(vResult.items()):
+ if fn in modlist:
+ self.logger.debug(" %s in modlist, skipping" % fn)
+ continue
+ if fn in ignores:
+ self.logger.debug(" %s in ignore list, skipping" % fn)
+ continue
+ tmp = []
+ for p in probs:
+ if p.type == 'missing' and os.path.islink(fn):
+ continue
+ elif 'no' + p.type in verify_flags:
+ continue
+ if p.type not in ['missingok', 'ghost']:
+ tmp.append((p.type, p.message))
+ if tmp != []:
+ stat['verify'][fn] = tmp
+
+ if stat['verify'] != {}:
+ stat['verify_fail'] = True
+ package_fail = True
+ self.logger.debug("It is suggested that you either manage "
+ "these files, revert the changes, or ignore "
+ "false failures:")
+ self.logger.debug(" Verify Problems:")
+ for fn, probs in list(stat['verify'].items()):
+ self.logger.debug(" %s" % fn)
+ for p in probs:
+ self.logger.debug(" %s: %s" % p)
+
+ if len(POs) > 0:
+ # Is this an install only package? We just look at the first one
+ provides = set([p[0] for p in POs[0].provides] + [POs[0].name])
+ install_only = len(set(self.installOnlyPkgs) & provides) > 0
+ else:
+ install_only = False
+
+ if virtPkg or (install_only and not self.setup['kevlar']):
+ # XXX: virtual capability supplied, we a probably dealing
+ # with multiple packages of different names. This check
+ # doesn't make a lot of since in this case
+ # XXX: install_only: Yum may clean some of these up itself.
+ # Otherwise having multiple instances of install only packages
+ # is considered correct
+ self.extra_instances = None
+ else:
+ self.extra_instances = self.FindExtraInstances(entry, POs)
+ if self.extra_instances is not None:
+ package_fail = True
+
+ return not package_fail
+
+ def FindExtraInstances(self, entry, POs):
+ """
+ Check for installed instances that are not in the config.
+ Return a Package Entry with Instances to remove, or None if there
+ are no Instances to remove.
+
+ """
+ if len(POs) == 0:
+ return None
+ name = entry.get('name')
+ extra_entry = Bcfg2.Client.XML.Element('Package', name=name,
+ type=self.pkgtype)
+ instances = self._buildInstances(entry)
+ _POs = [p for p in POs] # Shallow copy
+
+ # Algorythm is sensitive to duplicates, check for them
+ checked = []
+ for inst in instances:
+ nevra = build_yname(name, inst)
+ snevra = short_yname(nevra)
+ pkgs = self.yb.rpmdb.searchNevra(**snevra)
+ flag = True
+ if len(pkgs) > 0:
+ if pkgs[0] in checked:
+ continue # We've already taken care of this Instance
+ else:
+ checked.append(pkgs[0])
+ _POs.remove(pkgs[0])
+
+ for p in _POs:
+ self.logger.debug(" Extra Instance Found: %s" % str(p))
+ Bcfg2.Client.XML.SubElement(extra_entry, 'Instance',
+ epoch=p.epoch, name=p.name, version=p.version,
+ release=p.release, arch=p.arch)
+
+ if _POs == []:
+ return None
+ else:
+ return extra_entry
+
+ def FindExtraPackages(self):
+ """Find extra packages."""
+ packages = [e.get('name') for e in self.getSupportedEntries()]
+ extras = []
+
+ for p in list(self.installed.keys()):
+ if p not in packages:
+ entry = Bcfg2.Client.XML.Element('Package', name=p,
+ type=self.pkgtype)
+ for i in self.installed[p]:
+ inst = Bcfg2.Client.XML.SubElement(entry,
+ 'Instance',
+ epoch=i['epoch'],
+ version=i['version'],
+ release=i['release'],
+ arch=i['arch'])
+
+ extras.append(entry)
+
+ return extras
+
+ def _installGPGKey(self, inst, key_file):
+ """Examine the GPG keys carefully before installation. Avoid
+ installing duplicate keys. Returns True on successful install."""
+
+ # RPM Transaction Set
+ ts = self.yb.rpmdb.readOnlyTS()
+
+ if not os.path.exists(key_file):
+ self.logger.debug("GPG Key file %s not installed" % key_file)
+ return False
+
+ rawkey = open(key_file).read()
+ gpg = yum.misc.getgpgkeyinfo(rawkey)
+
+ ver = yum.misc.keyIdToRPMVer(gpg['keyid'])
+ rel = yum.misc.keyIdToRPMVer(gpg['timestamp'])
+ if not (ver == inst.get('version') and rel == inst.get('release')):
+ self.logger.info("GPG key file %s does not match gpg-pubkey-%s-%s"\
+ % (key_file, inst.get('version'),
+ inst.get('release')))
+ return False
+
+ if not yum.misc.keyInstalled(ts, gpg['keyid'],
+ gpg['timestamp']) == 0:
+ result = ts.pgpImportPubkey(yum.misc.procgpgkey(rawkey))
+ else:
+ self.logger.debug("gpg-pubkey-%s-%s already installed"\
+ % (inst.get('version'),
+ inst.get('release')))
+ return True
+
+ if result != 0:
+ self.logger.debug("Unable to install %s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'),
+ self.str_evra(inst)))
+ return False
+ else:
+ self.logger.debug("Installed %s-%s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'),
+ inst.get('version'), inst.get('release')))
+ return True
+
+ def _runYumTransaction(self):
+ def cleanup():
+ self.yb.closeRpmDB()
+ self.RefreshPackages()
+
+ rDisplay = RPMDisplay(self.logger)
+ yDisplay = YumDisplay(self.logger)
+ # Run the Yum Transaction
+ try:
+ rescode, restring = self.yb.buildTransaction()
+ except yum.Errors.YumBaseError:
+ e = sys.exc_info()[1]
+ self.logger.error("Yum transaction error: %s" % str(e))
+ cleanup()
+ return
+
+ self.logger.debug("Initial Yum buildTransaction() run said:")
+ self.logger.debug(" resultcode: %s, msgs: %s" \
+ % (rescode, restring))
+
+ if rescode != 1:
+ # Transaction built successfully, run it
+ try:
+ self.yb.processTransaction(callback=yDisplay,
+ rpmDisplay=rDisplay)
+ self.logger.info("Single Pass for Install Succeeded")
+ except yum.Errors.YumBaseError:
+ e = sys.exc_info()[1]
+ self.logger.error("Yum transaction error: %s" % str(e))
+ cleanup()
+ return
+ else:
+ # The yum command failed. No packages installed.
+ # Try installing instances individually.
+ self.logger.error("Single Pass Install of Packages Failed")
+ skipBroken = self.yb.conf.skip_broken
+ self.yb.conf.skip_broken = True
+ try:
+ rescode, restring = self.yb.buildTransaction()
+ if rescode != 1:
+ self.yb.processTransaction(callback=yDisplay,
+ rpmDisplay=rDisplay)
+ self.logger.debug(
+ "Second pass install did not install all packages")
+ else:
+ self.logger.error("Second pass yum install failed.")
+ self.logger.debug(" %s" % restring)
+ except yum.Errors.YumBaseError, e:
+ self.logger.error("Yum transaction error: %s" % str(e))
+
+ self.yb.conf.skip_broken = skipBroken
+
+ cleanup()
+
+ def Install(self, packages, states):
+ """
+ Try and fix everything that YUMng.VerifyPackages() found wrong for
+ each Package Entry. This can result in individual RPMs being
+ installed (for the first time), deleted, downgraded
+ or upgraded.
+
+ packages is a list of Package Elements that has
+ states[<Package Element>] == False
+
+ The following effects occur:
+ - states{} is conditionally updated for each package.
+ - self.installed{} is rebuilt, possibly multiple times.
+ - self.instance_status{} is conditionally updated for each instance
+ of a package.
+ - Each package will be added to self.modified[] if its states{}
+ entry is set to True.
+
+ """
+ self.logger.debug('Running YUMng.Install()')
+
+ install_pkgs = []
+ gpg_keys = []
+ upgrade_pkgs = []
+ reinstall_pkgs = []
+
+ def queuePkg(pkg, inst, queue):
+ if pkg.get('name') == 'gpg-pubkey':
+ gpg_keys.append(inst)
+ else:
+ queue.append(inst)
+
+ # Remove extra instances.
+ # Can not reverify because we don't have a package entry.
+ if self.extra_instances is not None and len(self.extra_instances) > 0:
+ if (self.setup.get('remove') == 'all' or \
+ self.setup.get('remove') == 'packages'):
+ self.RemovePackages(self.extra_instances)
+ else:
+ self.logger.info("The following extra package instances will be removed by the '-r' option:")
+ for pkg in self.extra_instances:
+ for inst in pkg:
+ self.logger.info(" %s %s" % \
+ ((pkg.get('name'), self.str_evra(inst))))
+
+ # Figure out which instances of the packages actually need something
+ # doing to them and place in the appropriate work 'queue'.
+ for pkg in packages:
+ insts = [pinst for pinst in pkg \
+ if pinst.tag in ['Instance', 'Package']]
+ if insts:
+ for inst in insts:
+ if inst not in self.instance_status:
+ m = " Asked to install/update package never verified"
+ p = nevraString(build_yname(pkg.get('name'), inst))
+ self.logger.warning("%s: %s" % (m, p))
+ continue
+ status = self.instance_status[inst]
+ if not status.get('installed', False) and self.doInstall:
+ queuePkg(pkg, inst, install_pkgs)
+ elif status.get('version_fail', False) and self.doUpgrade:
+ queuePkg(pkg, inst, upgrade_pkgs)
+ elif status.get('verify_fail', False) and self.doReinst:
+ queuePkg(pkg, inst, reinstall_pkgs)
+ else:
+ # Either there was no Install/Version/Verify
+ # task to be done or the user disabled the actions
+ # in the configuration. XXX Logging for the latter?
+ pass
+ else:
+ msg = "YUMng: Package tag found where Instance expected: %s"
+ self.logger.warning(msg % pkg.get('name'))
+ queuePkg(pkg, pkg, install_pkgs)
+
+ # Install GPG keys.
+ # Alternatively specify the required keys using 'gpgkey' in the
+ # repository definition in yum.conf. YUM will install the keys
+ # automatically.
+ if len(gpg_keys) > 0:
+ self.logger.info("Installing GPG keys.")
+ for inst in gpg_keys:
+ if inst.get('simplefile') is None:
+ self.logger.error("GPG key has no simplefile attribute")
+ continue
+ key_file = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ self._installGPGKey(inst, key_file)
+
+ self.RefreshPackages()
+ pkg = self.instance_status[gpg_keys[0]].get('pkg')
+ states[pkg] = self.VerifyPackage(pkg, [])
+
+ # We want to reload all Yum configuration in case we've
+ # deployed new .repo files we should consider
+ self._loadYumBase()
+
+ # Install packages.
+ if len(install_pkgs) > 0:
+ self.logger.info("Attempting to install packages")
+
+ for inst in install_pkgs:
+ pkg_arg = self.instance_status[inst].get('pkg').get('name')
+ self.logger.debug("Installing %s" % pkg_arg)
+ try:
+ self.yb.install(**build_yname(pkg_arg, inst))
+ except yum.Errors.YumBaseError:
+ yume = sys.exc_info()[1]
+ self.logger.error("Error installing package %s: %s" %
+ (pkg_arg, yume))
+
+ if len(upgrade_pkgs) > 0:
+ self.logger.info("Attempting to upgrade packages")
+
+ for inst in upgrade_pkgs:
+ pkg_arg = self.instance_status[inst].get('pkg').get('name')
+ self.logger.debug("Upgrading %s" % pkg_arg)
+ try:
+ self.yb.update(**build_yname(pkg_arg, inst))
+ except yum.Errors.YumBaseError:
+ yume = sys.exc_info()[1]
+ self.logger.error("Error upgrading package %s: %s" %
+ (pkg_arg, yume))
+
+ if len(reinstall_pkgs) > 0:
+ self.logger.info("Attempting to reinstall packages")
+ for inst in reinstall_pkgs:
+ pkg_arg = self.instance_status[inst].get('pkg').get('name')
+ self.logger.debug("Reinstalling %s" % pkg_arg)
+ try:
+ self.yb.reinstall(**build_yname(pkg_arg, inst))
+ except yum.Errors.YumBaseError:
+ yume = sys.exc_info()[1]
+ self.logger.error("Error reinstalling package %s: %s" %
+ (pkg_arg, yume))
+
+ self._runYumTransaction()
+
+ if not self.setup['kevlar']:
+ for pkg_entry in [p for p in packages if self.canVerify(p)]:
+ self.logger.debug("Reverifying Failed Package %s" \
+ % (pkg_entry.get('name')))
+ states[pkg_entry] = self.VerifyPackage(pkg_entry,
+ self.modlists.get(pkg_entry, []))
+
+ for entry in [ent for ent in packages if states[ent]]:
+ self.modified.append(entry)
+
+ def RemovePackages(self, packages):
+ """
+ Remove specified entries.
+
+ packages is a list of Package Entries with Instances generated
+ by FindExtraPackages().
+ """
+ self.logger.debug('Running YUMng.RemovePackages()')
+
+ erase_args = []
+ for pkg in packages:
+ for inst in pkg:
+ nevra = build_yname(pkg.get('name'), inst)
+ if pkg.get('name') != 'gpg-pubkey':
+ self.yb.remove(**nevra)
+ self.modified.append(pkg)
+ else:
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s-%s"\
+ % (nevra['name'], nevra['version'], nevra['release']))
+ self.logger.info(" This package will be deleted in a future version of the YUMng driver.")
+
+ self._runYumTransaction()
+ self.extra = self.FindExtraPackages()
+
+ def VerifyPath(self, entry, _):
+ """Do nothing here since we only verify Path type=ignore"""
+ return True
diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py
new file mode 100644
index 000000000..c6cb6e239
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/__init__.py
@@ -0,0 +1,353 @@
+"""This contains all Bcfg2 Tool modules"""
+import os
+import stat
+import sys
+from subprocess import Popen, PIPE
+import time
+
+import Bcfg2.Client.XML
+
+__all__ = [tool.split('.')[0] \
+ for tool in os.listdir(os.path.dirname(__file__)) \
+ if tool.endswith(".py") and tool != "__init__.py"]
+
+drivers = [item for item in __all__ if item not in ['rpmtools']]
+default = [item for item in drivers if item not in ['RPM', 'Yum']]
+
+
+class toolInstantiationError(Exception):
+ """This error is called if the toolset cannot be instantiated."""
+ pass
+
+
+class executor:
+ """This class runs stuff for us"""
+
+ def __init__(self, logger):
+ self.logger = logger
+
+ def run(self, command):
+ """Run a command in a pipe dealing with stdout buffer overloads."""
+ p = Popen(command, shell=True, bufsize=16384,
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+ output = p.communicate()[0]
+ for line in output.splitlines():
+ self.logger.debug('< %s' % line)
+ return (p.returncode, output.splitlines())
+
+
+class Tool:
+ """
+ All tools subclass this. It defines all interfaces that need to be defined.
+ """
+ name = 'Tool'
+ __execs__ = []
+ __handles__ = []
+ __req__ = {}
+ __important__ = []
+
+ def __init__(self, logger, setup, config):
+ self.__important__ = [entry.get('name') \
+ for struct in config for entry in struct \
+ if entry.tag == 'Path' and \
+ entry.get('important') in ['true', 'True']]
+ self.setup = setup
+ self.logger = logger
+ if not hasattr(self, '__ireq__'):
+ self.__ireq__ = self.__req__
+ self.config = config
+ self.cmd = executor(logger)
+ self.modified = []
+ self.extra = []
+ self.handled = [entry for struct in self.config for entry in struct \
+ if self.handlesEntry(entry)]
+ for filename in self.__execs__:
+ try:
+ mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE])
+ if mode & stat.S_IEXEC != stat.S_IEXEC:
+ self.logger.debug("%s: %s not executable" % \
+ (self.name, filename))
+ raise toolInstantiationError
+ except OSError:
+ raise toolInstantiationError
+ except:
+ self.logger.debug("%s failed" % filename, exc_info=1)
+ raise toolInstantiationError
+
+ def BundleUpdated(self, _, states):
+ """This callback is used when bundle updates occur."""
+ return
+
+ def BundleNotUpdated(self, _, states):
+ """This callback is used when a bundle is not updated."""
+ return
+
+ def Inventory(self, states, structures=[]):
+ """Dispatch verify calls to underlying methods."""
+ if not structures:
+ structures = self.config.getchildren()
+ mods = self.buildModlist()
+ for (struct, entry) in [(struct, entry) for struct in structures \
+ for entry in struct.getchildren() \
+ if self.canVerify(entry)]:
+ try:
+ func = getattr(self, "Verify%s" % (entry.tag))
+ states[entry] = func(entry, mods)
+ except:
+ self.logger.error(
+ "Unexpected failure of verification method for entry type %s" \
+ % (entry.tag), exc_info=1)
+ self.extra = self.FindExtra()
+
+ def Install(self, entries, states):
+ """Install all entries in sublist."""
+ for entry in entries:
+ try:
+ func = getattr(self, "Install%s" % (entry.tag))
+ states[entry] = func(entry)
+ if states[entry]:
+ self.modified.append(entry)
+ except:
+ self.logger.error("Unexpected failure of install method for entry type %s" \
+ % (entry.tag), exc_info=1)
+
+ def Remove(self, entries):
+ """Remove specified extra entries"""
+ pass
+
+ def getSupportedEntries(self):
+ """Return a list of supported entries."""
+ return [entry for struct in \
+ self.config.getchildren() for entry in \
+ struct.getchildren() \
+ if self.handlesEntry(entry)]
+
+ def handlesEntry(self, entry):
+ """Return if entry is handled by this tool."""
+ return (entry.tag, entry.get('type')) in self.__handles__
+
+ def buildModlist(self):
+ '''Build a list of potentially modified POSIX paths for this entry'''
+ return [entry.get('name') for struct in self.config.getchildren() \
+ for entry in struct.getchildren() \
+ if entry.tag in ['Ignore', 'Path']]
+
+ def gatherCurrentData(self, entry):
+ """Default implementation of the information gathering routines."""
+ pass
+
+ def canVerify(self, entry):
+ """Test if entry has enough information to be verified."""
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Entry %s:%s reports bind failure: %s" % \
+ (entry.tag,
+ entry.get('name'),
+ entry.get('failure')))
+ return False
+
+ missing = [attr for attr in self.__req__[entry.tag] \
+ if attr not in entry.attrib]
+ if missing:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ self.logger.error("\t... due to absence of %s attribute(s)" % \
+ (":".join(missing)))
+ try:
+ self.gatherCurrentData(entry)
+ except:
+ self.logger.error("Unexpected error in gatherCurrentData",
+ exc_info=1)
+ return False
+ return True
+
+ def FindExtra(self):
+ """Return a list of extra entries."""
+ return []
+
+ def canInstall(self, entry):
+ """Test if entry has enough information to be installed."""
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Cannot install entry %s:%s with bind failure" % \
+ (entry.tag, entry.get('name')))
+ return False
+
+ missing = [attr for attr in self.__ireq__[entry.tag] \
+ if attr not in entry.attrib or not entry.attrib[attr]]
+ if missing:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ self.logger.error("\t... due to absence of %s attribute" % \
+ (":".join(missing)))
+ return False
+ return True
+
+
+class PkgTool(Tool):
+ """
+ PkgTool provides a one-pass install with
+ fallback for use with packaging systems
+ """
+ pkgtool = ('echo %s', ('%s', ['name']))
+ pkgtype = 'echo'
+ name = 'PkgTool'
+
+ def __init__(self, logger, setup, config):
+ Tool.__init__(self, logger, setup, config)
+ self.installed = {}
+ self.Remove = self.RemovePackages
+ self.FindExtra = self.FindExtraPackages
+ self.RefreshPackages()
+
+ def VerifyPackage(self, dummy, _):
+ """Dummy verification method"""
+ return False
+
+ def Install(self, packages, states):
+ """
+ Run a one-pass install, followed by
+ single pkg installs in case of failure.
+ """
+ self.logger.info("Trying single pass package install for pkgtype %s" % \
+ self.pkgtype)
+
+ data = [tuple([pkg.get(field) for field in self.pkgtool[1][1]])
+ for pkg in packages]
+ pkgargs = " ".join([self.pkgtool[1][0] % datum for datum in data])
+
+ self.logger.debug("Installing packages: :%s:" % pkgargs)
+ self.logger.debug("Running command ::%s::" % (self.pkgtool[0] % pkgargs))
+
+ cmdrc = self.cmd.run(self.pkgtool[0] % pkgargs)[0]
+ if cmdrc == 0:
+ self.logger.info("Single Pass Succeded")
+ # set all package states to true and flush workqueues
+ pkgnames = [pkg.get('name') for pkg in packages]
+ for entry in [entry for entry in list(states.keys())
+ if entry.tag == 'Package'
+ and entry.get('type') == self.pkgtype
+ and entry.get('name') in pkgnames]:
+ self.logger.debug('Setting state to true for pkg %s' % \
+ (entry.get('name')))
+ states[entry] = True
+ self.RefreshPackages()
+ else:
+ self.logger.error("Single Pass Failed")
+ # do single pass installs
+ self.RefreshPackages()
+ for pkg in packages:
+ # handle state tracking updates
+ if self.VerifyPackage(pkg, []):
+ self.logger.info("Forcing state to true for pkg %s" % \
+ (pkg.get('name')))
+ states[pkg] = True
+ else:
+ self.logger.info("Installing pkg %s version %s" %
+ (pkg.get('name'), pkg.get('version')))
+ cmdrc = self.cmd.run(self.pkgtool[0] %
+ (self.pkgtool[1][0] %
+ tuple([pkg.get(field) for field in self.pkgtool[1][1]])))
+ if cmdrc[0] == 0:
+ states[pkg] = True
+ else:
+ self.logger.error("Failed to install package %s" % \
+ (pkg.get('name')))
+ self.RefreshPackages()
+ for entry in [ent for ent in packages if states[ent]]:
+ self.modified.append(entry)
+
+ def RefreshPackages(self):
+ """Dummy state refresh method."""
+ pass
+
+ def RemovePackages(self, packages):
+ """Dummy implementation of package removal method."""
+ pass
+
+ def FindExtraPackages(self):
+ """Find extra packages."""
+ packages = [entry.get('name') for entry in self.getSupportedEntries()]
+ extras = [data for data in list(self.installed.items()) \
+ if data[0] not in packages]
+ return [Bcfg2.Client.XML.Element('Package', name=name, \
+ type=self.pkgtype, version=version) \
+ for (name, version) in extras]
+
+
+class SvcTool(Tool):
+ """This class defines basic Service behavior"""
+ name = 'SvcTool'
+
+ def __init__(self, logger, setup, config):
+ Tool.__init__(self, logger, setup, config)
+ self.restarted = []
+
+ def get_svc_command(self, service, action):
+ """Return the basename of the command used to start/stop services."""
+ return '/etc/init.d/%s %s' % (service.get('name'), action)
+
+ def start_service(self, service):
+ self.logger.debug('Starting service %s' % service.get('name'))
+ return self.cmd.run(self.get_svc_command(service, 'start'))[0]
+
+ def stop_service(self, service):
+ self.logger.debug('Stopping service %s' % service.get('name'))
+ return self.cmd.run(self.get_svc_command(service, 'stop'))[0]
+
+ def restart_service(self, service):
+ self.logger.debug('Restarting service %s' % service.get('name'))
+ restart_target = service.get('target', 'restart')
+ return self.cmd.run(self.get_svc_command(service, restart_target))[0]
+
+ def check_service(self, service):
+ # not supported for this driver
+ return 0
+
+ def Remove(self, services):
+ """ Dummy implementation of service removal method """
+ if self.setup['servicemode'] != 'disabled':
+ for entry in services:
+ entry.set("status", "off")
+ self.InstallService(entry)
+
+ def BundleUpdated(self, bundle, states):
+ """The Bundle has been updated."""
+ if self.setup['servicemode'] == 'disabled':
+ return
+
+ for entry in [ent for ent in bundle if self.handlesEntry(ent)]:
+ mode = entry.get('mode', 'default')
+ if (mode == 'manual' or
+ (mode == 'interactive_only' and
+ not self.setup['interactive'])):
+ continue
+ # need to handle servicemode = (build|default)
+ # need to handle mode = (default|supervised)
+ rc = None
+ if entry.get('status') == 'on':
+ if self.setup['servicemode'] == 'build':
+ rc = self.stop_service(entry)
+ elif entry.get('name') not in self.restarted:
+ if self.setup['interactive']:
+ prompt = ('Restart service %s?: (y/N): ' %
+ entry.get('name'))
+ # py3k compatibility
+ try:
+ ans = raw_input(prompt)
+ except NameError:
+ ans = input(prompt)
+ if ans not in ['y', 'Y']:
+ continue
+ rc = self.restart_service(entry)
+ if not rc:
+ self.restarted.append(entry.get('name'))
+ else:
+ rc = self.stop_service(entry)
+ if rc:
+ self.logger.error("Failed to manipulate service %s" %
+ (entry.get('name')))
diff --git a/src/lib/Bcfg2/Client/Tools/launchd.py b/src/lib/Bcfg2/Client/Tools/launchd.py
new file mode 100644
index 000000000..700234cc8
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/launchd.py
@@ -0,0 +1,135 @@
+"""launchd support for Bcfg2."""
+
+import os
+import popen2
+
+import Bcfg2.Client.Tools
+
+
+class launchd(Bcfg2.Client.Tools.Tool):
+ """Support for Mac OS X launchd services."""
+ __handles__ = [('Service', 'launchd')]
+ __execs__ = ['/bin/launchctl', '/usr/bin/defaults']
+ name = 'launchd'
+ __req__ = {'Service': ['name', 'status']}
+
+ '''
+ Currently requires the path to the plist to load/unload,
+ and Name is acually a reverse-fqdn (or the label).
+ '''
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+
+ '''Locate plist file that provides given reverse-fqdn name
+ /Library/LaunchAgents Per-user agents provided by the administrator.
+ /Library/LaunchDaemons System wide daemons provided by the administrator.
+ /System/Library/LaunchAgents Mac OS X Per-user agents.
+ /System/Library/LaunchDaemons Mac OS X System wide daemons.'''
+ plistLocations = ["/Library/LaunchDaemons", "/System/Library/LaunchDaemons"]
+ self.plistMapping = {}
+ for directory in plistLocations:
+ for daemon in os.listdir(directory):
+ try:
+ if daemon.endswith(".plist"):
+ d = daemon[:-6]
+ else:
+ d = daemon
+ (stdout, _) = popen2.popen2('defaults read %s/%s Label' % (directory, d))
+ label = stdout.read().strip()
+ self.plistMapping[label] = "%s/%s" % (directory, daemon)
+ except KeyError: #perhaps this could be more robust
+ pass
+
+ def FindPlist(self, entry):
+ return self.plistMapping.get(entry.get('name'), None)
+
+ def os_version(self):
+ version = ""
+ try:
+ vers = self.cmd.run('sw_vers')[1]
+ except:
+ return version
+
+ for line in vers:
+ if line.startswith("ProductVersion"):
+ version = line.split()[-1]
+ return version
+
+ def VerifyService(self, entry, _):
+ """Verify launchd service entry."""
+ try:
+ services = self.cmd.run("/bin/launchctl list")[1]
+ except IndexError:#happens when no services are running (should be never)
+ services = []
+ # launchctl output changed in 10.5
+ # It is now three columns, with the last column being the name of the # service
+ version = self.os_version()
+ if version.startswith('10.5') or version.startswith('10.6'):
+ services = [s.split()[-1] for s in services]
+ if entry.get('name') in services:#doesn't check if non-spawning services are Started
+ return entry.get('status') == 'on'
+ else:
+ self.logger.debug("Didn't find service Loaded (launchd running under same user as bcfg)")
+ return entry.get('status') == 'off'
+
+ try: #Perhaps add the "-w" flag to load and unload to modify the file itself!
+ self.cmd.run("/bin/launchctl load -w %s" % self.FindPlist(entry))
+ except IndexError:
+ return 'on'
+ return False
+
+ def InstallService(self, entry):
+ """Enable or disable launchd item."""
+ # don't take any actions for mode='manual'
+ if entry.get('mode', 'default') == 'manual':
+ self.logger.info("Service %s mode set to manual. Skipping "
+ "installation." % (entry.get('name')))
+ return False
+ name = entry.get('name')
+ if entry.get('status') == 'on':
+ self.logger.error("Installing service %s" % name)
+ cmdrc = self.cmd.run("/bin/launchctl load -w %s" % self.FindPlist(entry))
+ cmdrc = self.cmd.run("/bin/launchctl start %s" % name)
+ else:
+ self.logger.error("Uninstalling service %s" % name)
+ cmdrc = self.cmd.run("/bin/launchctl stop %s" % name)
+ cmdrc = self.cmd.run("/bin/launchctl unload -w %s" % self.FindPlist(entry))
+ return cmdrc[0] == 0
+
+ def Remove(self, svcs):
+ """Remove Extra launchd entries."""
+ pass
+
+ def FindExtra(self):
+ """Find Extra launchd services."""
+ try:
+ allsrv = self.cmd.run("/bin/launchctl list")[1]
+ except IndexError:
+ allsrv = []
+
+ [allsrv.remove(svc) for svc in [entry.get("name") for entry
+ in self.getSupportedEntries()] if svc in allsrv]
+ return [Bcfg2.Client.XML.Element("Service",
+ type='launchd',
+ name=name,
+ status='on') for name in allsrv]
+
+ def BundleUpdated(self, bundle, states):
+ """Reload launchd plist."""
+ for entry in [entry for entry in bundle if self.handlesEntry(entry)]:
+ if not self.canInstall(entry):
+ self.logger.error("Insufficient information to restart service %s" % (entry.get('name')))
+ else:
+ name = entry.get('name')
+ if entry.get('status') == 'on' and self.FindPlist(entry):
+ self.logger.info("Reloading launchd service %s" % name)
+ #stop?
+ self.cmd.run("/bin/launchctl stop %s" % name)
+ self.cmd.run("/bin/launchctl unload -w %s" % (self.FindPlist(entry)))#what if it disappeared? how do we stop services that are currently running but the plist disappeared?!
+ self.cmd.run("/bin/launchctl load -w %s" % (self.FindPlist(entry)))
+ self.cmd.run("/bin/launchctl start %s" % name)
+ else:
+ #only if necessary....
+ self.cmd.run("/bin/launchctl stop %s" % name)
+ self.cmd.run("/bin/launchctl unload -w %s" % (self.FindPlist(entry)))
diff --git a/src/lib/Bcfg2/Client/Tools/rpmtools.py b/src/lib/Bcfg2/Client/Tools/rpmtools.py
new file mode 100755
index 000000000..7441b2c06
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/rpmtools.py
@@ -0,0 +1,1114 @@
+#!/usr/bin/env python
+"""
+ Module that uses rpm-python to implement the following rpm
+ functionality for the bcfg2 RPM and YUM client drivers:
+
+ rpm -qa
+ rpm --verify
+ rpm --erase
+
+ The code closely follows the rpm C code.
+
+ The code was written to be used in the bcfg2 RPM/YUM drivers.
+
+ Some command line options have been provided to assist with
+ testing and development, but the output isn't pretty and looks
+ nothing like rpm output.
+
+ Run 'rpmtools' -h for the options.
+
+"""
+
+import grp
+import optparse
+import os
+import pwd
+import rpm
+import stat
+import sys
+if sys.version_info >= (2, 5):
+ import hashlib
+ py24compat = False
+else:
+ # FIXME: Remove when client python dep is 2.5 or greater
+ py24compat = True
+ import md5
+
+# Determine what prelink tools we have available.
+# The isprelink module is a python extension that examines the ELF headers
+# to see if the file has been prelinked. If it is not present a lot of files
+# are unnecessarily run through the prelink command.
+try:
+ from isprelink import *
+ isprelink_imported = True
+except ImportError:
+ isprelink_imported = False
+ #print '*********************** isprelink not loaded ***********************'
+
+# If the prelink command is installed on the system then we need to do
+# prelink -y on files.
+if os.access('/usr/sbin/prelink', os.X_OK):
+ prelink_exists = True
+else:
+ prelink_exists = False
+
+# If we don't have isprelink then we will use the prelink configuration file to
+# filter what we have to put through prelink -y.
+import re
+blacklist = []
+whitelist = []
+try:
+ f = open('/etc/prelink.conf', mode='r')
+ for line in f:
+ if line.startswith('#'):
+ continue
+ option, pattern = line.split()
+ if pattern.startswith('*.'):
+ pattern = pattern.replace('*.', '\.')
+ pattern += '$'
+ elif pattern.startswith('/'):
+ pattern = '^' + pattern
+ if option == '-b':
+ blacklist.append(pattern)
+ elif option == '-l':
+ whitelist.append(pattern)
+ f.close()
+except IOError:
+ pass
+
+blacklist_re = re.compile('|'.join(blacklist))
+whitelist_re = re.compile('|'.join(whitelist))
+
+# Flags that are not defined in rpm-python.
+# They are defined in lib/rpmcli.h
+# Bit(s) for verifyFile() attributes.
+#
+RPMVERIFY_NONE = 0 # /*!< */
+RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */
+RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */
+RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */
+RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */
+RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */
+RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */
+RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */
+RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */
+RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */
+RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */
+RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */
+RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */
+RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */
+
+RPMVERIFY_FAILURES = \
+ (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \
+ RPMVERIFY_LGETFILECONFAIL)
+
+# Bit(s) to control rpm_verify() operation.
+#
+VERIFY_DEFAULT = 0, # /*!< */
+VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
+VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
+VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
+VERIFY_USER = 1 << 3 # /*!< from --nouser */
+VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
+VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
+VERIFY_MODE = 1 << 6 # /*!< from --nomode */
+VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
+# /* bits 8-14 unused, reserved for rpmVerifyAttrs */
+VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
+VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
+VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
+VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
+VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
+VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
+VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
+VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
+VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
+VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
+VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
+VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
+VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
+# /* bits 28-31 used in rpmVerifyAttrs */
+
+# Comes from C cource. lib/rpmcli.h
+VERIFY_ATTRS = \
+ (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \
+ VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
+
+VERIFY_ALL = \
+ (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\
+ VERIFY_SIGNATURE | VERIFY_HDRCHK)
+
+
+# Some masks for what checks to NOT do on these file types.
+# The C code actiually resets these up for every file.
+DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+ RPMVERIFY_LINKTO)
+
+# These file types all have the same mask, but hopefully this will make the
+# code more readable.
+FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS
+
+LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+ RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP)
+
+REG_FLAGS = ~(RPMVERIFY_LINKTO)
+
+
+def s_isdev(mode):
+ """
+ Check to see if a file is a device.
+
+ """
+ return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
+
+def rpmpackagelist(rts):
+ """
+ Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
+ Requires rpmtransactionset() to be run first to get a ts.
+ Returns a list of pkgspec dicts.
+
+ e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' },
+ {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ]
+
+ """
+ return [{'name':header[rpm.RPMTAG_NAME],
+ 'epoch':header[rpm.RPMTAG_EPOCH],
+ 'version':header[rpm.RPMTAG_VERSION],
+ 'release':header[rpm.RPMTAG_RELEASE],
+ 'arch':header[rpm.RPMTAG_ARCH],
+ 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
+ for header in rts.dbMatch()]
+
+def getindexbykeyword(index_ts, **kwargs):
+ """
+ Return list of indexs from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ index_mi = index_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys()) \
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in index_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(index_mi.instance())
+ del index_mi
+ return lst
+
+def getheadersbykeyword(header_ts, **kwargs):
+ """
+ Borrowed parts of this from from Yum. Need to fix it though.
+ Epoch is not handled right.
+
+ Return list of headers from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ header_mi = header_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys()) \
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in header_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(hdr)
+ del header_mi
+ return lst
+
+def prelink_md5_check(filename):
+ """
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked md5 and file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+ Always return the md5.
+
+ """
+ prelink = False
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False, 0
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+ elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+
+ fsize = 0
+ if py24compat:
+ chksum = md5.new()
+ else:
+ chksum = hashlib.md5()
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+ chksum.update(data)
+ plf.close()
+ file_md5 = chksum.hexdigest()
+ if prelink:
+ return file_md5, fsize
+ else:
+ return file_md5, 0
+
+def prelink_size_check(filename):
+ """
+ This check is only done if the prelink_md5_check() is not done first.
+
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+
+ """
+ fsize = 0
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ # print "***** Warning isprelink extension failed to import ******"
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ plf.close()
+
+ return fsize
+
+def debug_verify_flags(vflags):
+ """
+ Decodes the verify flags bits.
+ """
+ if vflags & RPMVERIFY_MD5:
+ print('RPMVERIFY_MD5')
+ if vflags & RPMVERIFY_FILESIZE:
+ print('RPMVERIFY_FILESIZE')
+ if vflags & RPMVERIFY_LINKTO:
+ print('RPMVERIFY_LINKTO')
+ if vflags & RPMVERIFY_USER:
+ print('RPMVERIFY_USER')
+ if vflags & RPMVERIFY_GROUP:
+ print('RPMVERIFY_GROUP')
+ if vflags & RPMVERIFY_MTIME:
+ print('RPMVERIFY_MTIME')
+ if vflags & RPMVERIFY_MODE:
+ print('RPMVERIFY_MODE')
+ if vflags & RPMVERIFY_RDEV:
+ print('RPMVERIFY_RDEV')
+ if vflags & RPMVERIFY_CONTEXTS:
+ print('RPMVERIFY_CONTEXTS')
+ if vflags & RPMVERIFY_READLINKFAIL:
+ print('RPMVERIFY_READLINKFAIL')
+ if vflags & RPMVERIFY_READFAIL:
+ print('RPMVERIFY_READFAIL')
+ if vflags & RPMVERIFY_LSTATFAIL:
+ print('RPMVERIFY_LSTATFAIL')
+ if vflags & RPMVERIFY_LGETFILECONFAIL:
+ print('RPMVERIFY_LGETFILECONFAIL')
+
+def debug_file_flags(fflags):
+ """
+ Decodes the file flags bits.
+ """
+ if fflags & rpm.RPMFILE_CONFIG:
+ print('rpm.RPMFILE_CONFIG')
+
+ if fflags & rpm.RPMFILE_DOC:
+ print('rpm.RPMFILE_DOC')
+
+ if fflags & rpm.RPMFILE_ICON:
+ print('rpm.RPMFILE_ICON')
+
+ if fflags & rpm.RPMFILE_MISSINGOK:
+ print('rpm.RPMFILE_MISSINGOK')
+
+ if fflags & rpm.RPMFILE_NOREPLACE:
+ print('rpm.RPMFILE_NOREPLACE')
+
+ if fflags & rpm.RPMFILE_GHOST:
+ print('rpm.RPMFILE_GHOST')
+
+ if fflags & rpm.RPMFILE_LICENSE:
+ print('rpm.RPMFILE_LICENSE')
+
+ if fflags & rpm.RPMFILE_README:
+ print('rpm.RPMFILE_README')
+
+ if fflags & rpm.RPMFILE_EXCLUDE:
+ print('rpm.RPMFILE_EXLUDE')
+
+ if fflags & rpm.RPMFILE_UNPATCHED:
+ print('rpm.RPMFILE_UNPATCHED')
+
+ if fflags & rpm.RPMFILE_PUBKEY:
+ print('rpm.RPMFILE_PUBKEY')
+
+def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
+ """
+ Verify all the files in a package.
+
+ Returns a list of error flags, the file type and file name. The list
+ entries are strings that are the same as the labels for the bitwise
+ flags used in the C code.
+
+ """
+ (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \
+ vflags, fuser, fgroup, fmd5) = fileinfo
+
+ # 1. rpmtsRootDir stuff. What does it do and where to I get it from?
+
+ file_results = []
+ flags = vflags
+
+ # Check to see if the file was installed - if not pretend all is ok.
+ # This is what the rpm C code does!
+ if fstate != rpm.RPMFILE_STATE_NORMAL:
+ return file_results
+
+ # Get the installed files stats
+ try:
+ lstat = os.lstat(fname)
+ except OSError:
+ if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)):
+ file_results.append('RPMVERIFY_LSTATFAIL')
+ #file_results.append(fname)
+ return file_results
+
+ # 5. Contexts? SELinux stuff?
+
+ # Setup what checks to do. This is straight out of the C code.
+ if stat.S_ISDIR(lstat.st_mode):
+ flags &= DIR_FLAGS
+ elif stat.S_ISLNK(lstat.st_mode):
+ flags &= LINK_FLAGS
+ elif stat.S_ISFIFO(lstat.st_mode):
+ flags &= FIFO_FLAGS
+ elif stat.S_ISCHR(lstat.st_mode):
+ flags &= CHR_FLAGS
+ elif stat.S_ISBLK(lstat.st_mode):
+ flags &= BLK_FLAGS
+ else:
+ flags &= REG_FLAGS
+
+ if (fflags & rpm.RPMFILE_GHOST):
+ flags &= GHOST_FLAGS
+
+ flags &= ~(omitmask | RPMVERIFY_FAILURES)
+
+ # 8. SELinux stuff.
+
+ prelink_size = 0
+ if flags & RPMVERIFY_MD5:
+ prelink_md5, prelink_size = prelink_md5_check(fname)
+ if prelink_md5 == False:
+ file_results.append('RPMVERIFY_MD5')
+ file_results.append('RPMVERIFY_READFAIL')
+ elif prelink_md5 != fmd5:
+ file_results.append('RPMVERIFY_MD5')
+
+ if flags & RPMVERIFY_LINKTO:
+ linkto = os.readlink(fname)
+ if not linkto:
+ file_results.append('RPMVERIFY_READLINKFAIL')
+ file_results.append('RPMVERIFY_LINKTO')
+ else:
+ if len(rpmlinktos) == 0 or linkto != rpmlinktos:
+ file_results.append('RPMVERIFY_LINKTO')
+
+ if flags & RPMVERIFY_FILESIZE:
+ if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
+ prelink_size = prelink_size_check(fname)
+ if (prelink_size != 0): # This is a prelinked file.
+ if (prelink_size != fsize):
+ file_results.append('RPMVERIFY_FILESIZE')
+ elif lstat.st_size != fsize: # It wasn't a prelinked file.
+ file_results.append('RPMVERIFY_FILESIZE')
+
+ if flags & RPMVERIFY_MODE:
+ metamode = fmode
+ filemode = lstat.st_mode
+
+ # Comparing the type of %ghost files is meaningless, but perms are ok.
+ if fflags & rpm.RPMFILE_GHOST:
+ metamode &= ~0xf000
+ filemode &= ~0xf000
+
+ if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \
+ (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)):
+ file_results.append('RPMVERIFY_MODE')
+
+ if flags & RPMVERIFY_RDEV:
+ if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or
+ stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
+ file_results.append('RPMVERIFY_RDEV')
+ elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
+ st_rdev = lstat.st_rdev
+ if frdev != st_rdev:
+ file_results.append('RPMVERIFY_RDEV')
+
+ if flags & RPMVERIFY_MTIME:
+ if lstat.st_mtime != fmtime:
+ file_results.append('RPMVERIFY_MTIME')
+
+ if flags & RPMVERIFY_USER:
+ try:
+ user = pwd.getpwuid(lstat.st_uid)[0]
+ except KeyError:
+ user = None
+ if not user or not fuser or (user != fuser):
+ file_results.append('RPMVERIFY_USER')
+
+ if flags & RPMVERIFY_GROUP:
+ try:
+ group = grp.getgrgid(lstat.st_gid)[0]
+ except KeyError:
+ group = None
+ if not group or not fgroup or (group != fgroup):
+ file_results.append('RPMVERIFY_GROUP')
+
+ return file_results
+
+def rpm_verify_dependencies(header):
+ """
+ Check package dependencies. Header is an rpm.hdr.
+
+ Don't like opening another ts to do this, but
+ it was the only way I could find of clearing the ts
+ out.
+
+ Have asked on the rpm-maint list on how to do
+ this the right way (28 Feb 2007).
+
+ ts.check() returns:
+
+ ((name, version, release), (reqname, reqversion), \
+ flags, suggest, sense)
+
+ """
+ _ts1 = rpmtransactionset()
+ _ts1.addInstall(header, 'Dep Check', 'i')
+ dep_errors = _ts1.check()
+ _ts1.closeDB()
+ return dep_errors
+
+def rpm_verify_package(vp_ts, header, verify_options):
+ """
+ Verify a single package specified by header. Header is an rpm.hdr.
+
+ If errors are found it returns a dictionary of errors.
+
+ """
+ # Set some transaction level flags.
+ vsflags = 0
+ if 'nodigest' in verify_options:
+ vsflags |= rpm._RPMVSF_NODIGESTS
+ if 'nosignature' in verify_options:
+ vsflags |= rpm._RPMVSF_NOSIGNATURES
+ ovsflags = vp_ts.setVSFlags(vsflags)
+
+ # Map from the Python options to the rpm bitwise flags.
+ omitmask = 0
+
+ if 'nolinkto' in verify_options:
+ omitmask |= VERIFY_LINKTO
+ if 'nomd5' in verify_options:
+ omitmask |= VERIFY_MD5
+ if 'nosize' in verify_options:
+ omitmask |= VERIFY_SIZE
+ if 'nouser' in verify_options:
+ omitmask |= VERIFY_USER
+ if 'nogroup' in verify_options:
+ omitmask |= VERIFY_GROUP
+ if 'nomtime' in verify_options:
+ omitmask |= VERIFY_MTIME
+ if 'nomode' in verify_options:
+ omitmask |= VERIFY_MODE
+ if 'nordev' in verify_options:
+ omitmask |= VERIFY_RDEV
+
+ omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS)
+ #print 'omitmask =', omitmask
+
+ package_results = {}
+
+ # Check Signatures and Digests.
+ # No idea what this might return. Need to break something to see.
+ # Setting the vsflags above determines what gets checked in the header.
+ hdr_stat = vp_ts.hdrCheck(header.unload())
+ if hdr_stat:
+ package_results['hdr'] = hdr_stat
+
+ # Check Package Depencies.
+ if 'nodeps' not in verify_options:
+ dep_stat = rpm_verify_dependencies(header)
+ if dep_stat:
+ package_results['deps'] = dep_stat
+
+ # Check all the package files.
+ if 'nofiles' not in verify_options:
+ vp_fi = header.fiFromHeader()
+ for fileinfo in vp_fi:
+ # Do not bother doing anything with ghost files.
+ # This is what RPM does.
+ if fileinfo[4] & rpm.RPMFILE_GHOST:
+ continue
+
+ # This is only needed because of an inconsistency in the
+ # rpm.fi interface.
+ linktos = vp_fi.FLink()
+
+ file_stat = rpm_verify_file(fileinfo, linktos, omitmask)
+
+ #if len(file_stat) > 0 or options.verbose:
+ if len(file_stat) > 0:
+ fflags = fileinfo[4]
+ if fflags & rpm.RPMFILE_CONFIG:
+ file_stat.append('c')
+ elif fflags & rpm.RPMFILE_DOC:
+ file_stat.append('d')
+ elif fflags & rpm.RPMFILE_GHOST:
+ file_stat.append('g')
+ elif fflags & rpm.RPMFILE_LICENSE:
+ file_stat.append('l')
+ elif fflags & rpm.RPMFILE_PUBKEY:
+ file_stat.append('P')
+ elif fflags & rpm.RPMFILE_README:
+ file_stat.append('r')
+ else:
+ file_stat.append(' ')
+
+ file_stat.append(fileinfo[0]) # The filename.
+ package_results.setdefault('files', []).append(file_stat)
+
+ # Run the verify script if there is one.
+ # Do we want this?
+ #if 'noscripts' not in verify_options:
+ # script_stat = rpmVerifyscript()
+ # if script_stat:
+ # package_results['script'] = script_stat
+
+ # If there have been any errors, add the package nevra to the result.
+ if len(package_results) > 0:
+ package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \
+ header[rpm.RPMTAG_EPOCH], \
+ header[rpm.RPMTAG_VERSION], \
+ header[rpm.RPMTAG_RELEASE], \
+ header[rpm.RPMTAG_ARCH]))
+ else:
+ package_results = None
+
+ # Put things back the way we found them.
+ vsflags = vp_ts.setVSFlags(ovsflags)
+
+ return package_results
+
+def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
+ """
+ Requires rpmtransactionset() to be run first to get a ts.
+
+ pkgspec is a dict specifying the package
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ For all packages
+ {}
+
+ Or any combination of keywords to select one or more packages to verify.
+
+ options is a list of 'rpm --verify' options. Default is to check everything.
+ e.g.:
+ [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature',
+ 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime',
+ 'nomode', 'nordev' ]
+
+ Returns a list. One list entry per package. Each list entry is a
+ dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'.
+ Entries only get added for the failures. If nothing failed, None is
+ returned.
+
+ Its all a bit messy and probably needs reviewing.
+
+ [ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] }
+ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ]
+
+ """
+ verify_results = []
+ headers = getheadersbykeyword(verify_ts, **verify_pkgspec)
+ for header in headers:
+ result = rpm_verify_package(verify_ts, header, verify_options)
+ if result:
+ verify_results.append(result)
+
+ return verify_results
+
+def rpmtransactionset():
+ """
+ A simple wrapper for rpm.TransactionSet() to keep everthiing together.
+ Might use it to set some ts level flags later.
+
+ """
+ ts = rpm.TransactionSet()
+ return ts
+
+class Rpmtscallback(object):
+ """
+ Callback for ts.run(). Used for adding, upgrading and removing packages.
+ Starting with all possible reasons codes, but bcfg2 will probably only
+ make use of a few of them.
+
+ Mostly just printing stuff at the moment to understand how the callback
+ is used.
+
+ """
+ def __init__(self):
+ self.fdnos = {}
+
+ def callback(self, reason, amount, total, key, client_data):
+ """
+ Generic rpmts call back.
+ """
+ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+ pass
+ #print 'rpm.RPMCALLBACK_INST_OPEN_FILE'
+ elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
+ pass
+ #print 'rpm.RPMCALLBACK_INST_CLOSE_FILE'
+ elif reason == rpm.RPMCALLBACK_INST_START:
+ pass
+ #print 'rpm.RPMCALLBACK_INST_START'
+ elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
+ reason == rpm.RPMCALLBACK_INST_PROGRESS:
+ pass
+ #print 'rpm.RPMCALLBACK_TRANS_PROGRESS or \
+ # rpm.RPMCALLBACK_INST_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_TRANS_START:
+ pass
+ #print 'rpm.RPMCALLBACK_TRANS_START'
+ elif reason == rpm.RPMCALLBACK_TRANS_STOP:
+ pass
+ #print 'rpm.RPMCALLBACK_TRANS_STOP'
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_START:
+ pass
+ #print 'rpm.RPMCALLBACK_REPACKAGE_START'
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
+ pass
+ #print 'rpm.RPMCALLBACK_REPACKAGE_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP:
+ pass
+ #print 'rpm.RPMCALLBACK_REPACKAGE_STOP'
+ elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS:
+ pass
+ #print 'rpm.RPMCALLBACK_UNINST_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_UNINST_START:
+ pass
+ #print 'rpm.RPMCALLBACK_UNINST_START'
+ elif reason == rpm.RPMCALLBACK_UNINST_STOP:
+ pass
+ #print 'rpm.RPMCALLBACK_UNINST_STOP'
+ #print '***Package ', key, ' deleted ***'
+ # How do we get at this?
+ # RPM.modified += key
+ elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
+ pass
+ #print 'rpm.RPMCALLBACK_UNPACK_ERROR'
+ elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
+ pass
+ #print 'rpm.RPMCALLBACK_CPIO_ERROR'
+ elif reason == rpm.RPMCALLBACK_UNKNOWN:
+ pass
+ #print 'rpm.RPMCALLBACK_UNKNOWN'
+ else:
+ print('ERROR - Fell through callBack')
+
+ #print reason, amount, total, key, client_data
+
+def rpm_erase(erase_pkgspecs, erase_flags):
+ """
+ pkgspecs is a list of pkgspec dicts specifying packages
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ """
+ erase_ts_flags = 0
+ if 'noscripts' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS
+ if 'notriggers' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS
+ if 'repackage' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE
+
+ erase_ts = rpmtransactionset()
+ erase_ts.setFlags(erase_ts_flags)
+
+ for pkgspec in erase_pkgspecs:
+ idx_list = getindexbykeyword(erase_ts, **pkgspec)
+ if len(idx_list) > 1 and not 'allmatches' in erase_flags:
+ #pass
+ print('ERROR - Multiple package match for erase', pkgspec)
+ else:
+ for idx in idx_list:
+ erase_ts.addErase(idx)
+
+ #for te in erase_ts:
+ # print "%s %s:%s-%s.%s" % (te.N(), te.E(), te.V(), te.R(), te.A())
+
+ erase_problems = []
+ if 'nodeps' not in erase_flags:
+ erase_problems = erase_ts.check()
+
+ if erase_problems == []:
+ erase_ts.order()
+ erase_callback = Rpmtscallback()
+ erase_ts.run(erase_callback.callback, 'Erase')
+ #else:
+ # print 'ERROR - Dependency failures on package erase'
+ # print erase_problems
+
+ erase_ts.closeDB()
+ del erase_ts
+ return erase_problems
+
+def display_verify_file(file_results):
+ '''
+ Display file results similar to rpm --verify.
+ '''
+ filename = file_results[-1]
+ filetype = file_results[-2]
+
+ result_string = ''
+
+ if 'RPMVERIFY_LSTATFAIL' in file_results:
+ result_string = 'missing '
+ else:
+ if 'RPMVERIFY_FILESIZE' in file_results:
+ result_string = result_string + 'S'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MODE' in file_results:
+ result_string = result_string + 'M'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MD5' in file_results:
+ if 'RPMVERIFY_READFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + '5'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_RDEV' in file_results:
+ result_string = result_string + 'D'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_LINKTO' in file_results:
+ if 'RPMVERIFY_READLINKFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + 'L'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_USER' in file_results:
+ result_string = result_string + 'U'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_GROUP' in file_results:
+ result_string = result_string + 'G'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MTIME' in file_results:
+ result_string = result_string + 'T'
+ else:
+ result_string = result_string + '.'
+
+ print(result_string + ' ' + filetype + ' ' + filename)
+ sys.stdout.flush()
+
+#===============================================================================
+# Some options and output to assist with development and testing.
+# These are not intended for normal use.
+if __name__ == "__main__":
+
+ p = optparse.OptionParser()
+
+ p.add_option('--name', action='store', \
+ default=None, \
+ help='''Package name to verify.
+
+ ******************************************
+ NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES.
+ ******************************************
+
+ The specified operation will be carried out on all
+ instances of packages that match the package specification
+ (name, epoch, version, release, arch).''')
+
+ p.add_option('--epoch', action='store', \
+ default=None, \
+ help='''Package epoch.''')
+
+ p.add_option('--version', action='store', \
+ default=None, \
+ help='''Package version.''')
+
+ p.add_option('--release', action='store', \
+ default=None, \
+ help='''Package release.''')
+
+ p.add_option('--arch', action='store', \
+ default=None, \
+ help='''Package arch.''')
+
+ p.add_option('--erase', '-e', action='store_true', \
+ default=None, \
+ help='''****************************************************
+ REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
+ PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
+ GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
+ INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
+ DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
+ ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
+ ****************************************************''')
+
+ p.add_option('--list', '-l', action='store_true', \
+ help='''List package identity info. rpm -qa ish equivalent
+ intended for use in RefreshPackages().''')
+
+ p.add_option('--verify', action='store_true', \
+ help='''Verify Package(s). Output is only produced after all
+ packages has been verified. Be patient.''')
+
+ p.add_option('--verbose', '-v', action='store_true', \
+ help='''Verbose output for --verify option. Output is the
+ same as rpm -v --verify.''')
+
+ p.add_option('--nodeps', action='store_true', \
+ default=False, \
+ help='Do not do dependency testing.')
+
+ p.add_option('--nodigest', action='store_true', \
+ help='Do not check package digests.')
+
+ p.add_option('--nofiles', action='store_true', \
+ help='Do not do file checks.')
+
+ p.add_option('--noscripts', action='store_true', \
+ help='Do not run verification scripts.')
+
+ p.add_option('--nosignature', action='store_true', \
+ help='Do not do package signature verification.')
+
+ p.add_option('--nolinkto', action='store_true', \
+ help='Do not do symlink tests.')
+
+ p.add_option('--nomd5', action='store_true', \
+ help='''Do not do MD5 checksums on files. Note that this does
+ not work for prelink files yet.''')
+
+ p.add_option('--nosize', action='store_true', \
+ help='''Do not do file size tests. Note that this does not work
+ for prelink files yet.''')
+
+ p.add_option('--nouser', action='store_true', \
+ help='Do not check file user ownership.')
+
+ p.add_option('--nogroup', action='store_true', \
+ help='Do not check file group ownership.')
+
+ p.add_option('--nomtime', action='store_true', \
+ help='Do not check file modification times.')
+
+ p.add_option('--nomode', action='store_true', \
+ help='Do not check file modes (permissions).')
+
+ p.add_option('--nordev', action='store_true', \
+ help='Do not check device node.')
+
+ p.add_option('--notriggers', action='store_true', \
+ help='Do not do not generate triggers on erase.')
+
+ p.add_option('--repackage', action='store_true', \
+ help='''Do repackage on erase.i Packages are put
+ in /var/spool/repackage.''')
+
+ p.add_option('--allmatches', action='store_true', \
+ help='''Remove all package instances that match the
+ pkgspec.
+
+ ***************************************************
+ NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
+ THAT MEANS ALL PACKAGES!!!!
+ ***************************************************''')
+
+ options, arguments = p.parse_args()
+
+ pkgspec = {}
+ rpm_options = []
+
+ if options.nodeps:
+ rpm_options.append('nodeps')
+
+ if options.nodigest:
+ rpm_options.append('nodigest')
+
+ if options.nofiles:
+ rpm_options.append('nofiles')
+
+ if options.noscripts:
+ rpm_options.append('noscripts')
+
+ if options.nosignature:
+ rpm_options.append('nosignature')
+
+ if options.nolinkto:
+ rpm_options.append('nolinkto')
+
+ if options.nomd5:
+ rpm_options.append('nomd5')
+
+ if options.nosize:
+ rpm_options.append('nosize')
+
+ if options.nouser:
+ rpm_options.append('nouser')
+
+ if options.nogroup:
+ rpm_options.append('nogroup')
+
+ if options.nomtime:
+ rpm_options.append('nomtime')
+
+ if options.nomode:
+ rpm_options.append('nomode')
+
+ if options.nordev:
+ rpm_options.append('nordev')
+
+ if options.repackage:
+ rpm_options.append('repackage')
+
+ if options.allmatches:
+ rpm_options.append('allmatches')
+
+ main_ts = rpmtransactionset()
+
+ cmdline_pkgspec = {}
+ if options.name != 'all':
+ if options.name:
+ cmdline_pkgspec['name'] = str(options.name)
+ if options.epoch:
+ cmdline_pkgspec['epoch'] = str(options.epoch)
+ if options.version:
+ cmdline_pkgspec['version'] = str(options.version)
+ if options.release:
+ cmdline_pkgspec['release'] = str(options.release)
+ if options.arch:
+ cmdline_pkgspec['arch'] = str(options.arch)
+
+ if options.verify:
+ results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options)
+ for r in results:
+ files = r.get('files', '')
+ for f in files:
+ display_verify_file(f)
+
+ elif options.list:
+ for p in rpmpackagelist(main_ts):
+ print(p)
+
+ elif options.erase:
+ if options.name:
+ rpm_erase([cmdline_pkgspec], rpm_options)
+ else:
+ print('You must specify the "--name" option')
diff --git a/src/lib/Bcfg2/Client/XML.py b/src/lib/Bcfg2/Client/XML.py
new file mode 100644
index 000000000..858479611
--- /dev/null
+++ b/src/lib/Bcfg2/Client/XML.py
@@ -0,0 +1,36 @@
+'''XML lib compatibility layer for the Bcfg2 client'''
+
+# library will use lxml, then builtin xml.etree, then ElementTree
+
+try:
+ from lxml.etree import Element, SubElement, XML, tostring
+ from lxml.etree import XMLSyntaxError as ParseError
+ driver = 'lxml'
+except ImportError:
+ # lxml not available
+ from xml.parsers.expat import ExpatError as ParseError
+ try:
+ import xml.etree.ElementTree
+ Element = xml.etree.ElementTree.Element
+ SubElement = xml.etree.ElementTree.SubElement
+ XML = xml.etree.ElementTree.XML
+ def tostring(e, encoding=None, xml_declaration=None):
+ return xml.etree.ElementTree.tostring(e, encoding=encoding)
+ driver = 'etree-py'
+ except ImportError:
+ try:
+ from elementtree.ElementTree import Element, SubElement, XML, tostring
+ driver = 'etree'
+ import elementtree.ElementTree
+ Element = elementtree.ElementTree.Element
+ SubElement = elementtree.ElementTree.SubElement
+ XML = elementtree.ElementTree.XML
+ def tostring(e, encoding=None, xml_declaration=None):
+ return elementtree.ElementTree.tostring(e)
+
+ except ImportError:
+ print("Failed to load lxml, xml.etree and elementtree.ElementTree")
+ print("Cannot continue")
+ raise SystemExit(1)
+
+len([Element, SubElement, XML, tostring, ParseError])
diff --git a/src/lib/Bcfg2/Client/__init__.py b/src/lib/Bcfg2/Client/__init__.py
new file mode 100644
index 000000000..6ed37b257
--- /dev/null
+++ b/src/lib/Bcfg2/Client/__init__.py
@@ -0,0 +1,3 @@
+"""This contains all Bcfg2 Client modules"""
+
+__all__ = ["Frame", "Tools", "XML"]