summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Client
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Client')
-rw-r--r--src/lib/Bcfg2/Client/Client.py340
-rw-r--r--src/lib/Bcfg2/Client/Frame.py536
-rw-r--r--src/lib/Bcfg2/Client/Proxy.py395
-rw-r--r--src/lib/Bcfg2/Client/Tools/APK.py7
-rw-r--r--src/lib/Bcfg2/Client/Tools/APT.py157
-rw-r--r--src/lib/Bcfg2/Client/Tools/Action.py60
-rw-r--r--src/lib/Bcfg2/Client/Tools/Blast.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/Chkconfig.py13
-rw-r--r--src/lib/Bcfg2/Client/Tools/DebInit.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/FreeBSDInit.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/IPS.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/MacPorts.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/OpenCSW.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Device.py2
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/File.py16
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/__init__.py53
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/base.py39
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIXUsers.py62
-rw-r--r--src/lib/Bcfg2/Client/Tools/Pacman.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/Portage.py22
-rw-r--r--src/lib/Bcfg2/Client/Tools/RPM.py1951
-rw-r--r--src/lib/Bcfg2/Client/Tools/RPMng.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/RcUpdate.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/SELinux.py41
-rw-r--r--src/lib/Bcfg2/Client/Tools/SYSV.py8
-rw-r--r--src/lib/Bcfg2/Client/Tools/VCS.py21
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUM.py131
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUM24.py399
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUMng.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/__init__.py138
-rw-r--r--src/lib/Bcfg2/Client/Tools/launchd.py10
-rwxr-xr-xsrc/lib/Bcfg2/Client/Tools/rpmtools.py1091
-rw-r--r--src/lib/Bcfg2/Client/__init__.py872
34 files changed, 3299 insertions, 3135 deletions
diff --git a/src/lib/Bcfg2/Client/Client.py b/src/lib/Bcfg2/Client/Client.py
deleted file mode 100644
index 1676ee717..000000000
--- a/src/lib/Bcfg2/Client/Client.py
+++ /dev/null
@@ -1,340 +0,0 @@
-""" The main Bcfg2 client class """
-
-import os
-import sys
-import stat
-import time
-import fcntl
-import socket
-import logging
-import tempfile
-import Bcfg2.Proxy
-import Bcfg2.Logger
-import Bcfg2.Options
-import Bcfg2.Client.XML
-import Bcfg2.Client.Frame
-import Bcfg2.Client.Tools
-from Bcfg2.Utils import locked, Executor
-from Bcfg2.Compat import xmlrpclib
-from Bcfg2.version import __version__
-
-
-class Client(object):
- """ The main Bcfg2 client class """
-
- def __init__(self, setup):
- self.toolset = None
- self.tools = None
- self.config = None
- self._proxy = None
- self.setup = setup
-
- if self.setup['debug']:
- level = logging.DEBUG
- elif self.setup['verbose']:
- level = logging.INFO
- else:
- level = logging.WARNING
- Bcfg2.Logger.setup_logging('bcfg2',
- to_syslog=self.setup['syslog'],
- level=level,
- to_file=self.setup['logging'])
- self.logger = logging.getLogger('bcfg2')
- self.logger.debug(self.setup)
-
- self.cmd = Executor(self.setup['command_timeout'])
-
- if self.setup['bundle_quick']:
- if not self.setup['bundle'] and not self.setup['skipbundle']:
- self.logger.error("-Q option requires -b or -B")
- raise SystemExit(1)
- elif self.setup['remove']:
- self.logger.error("-Q option incompatible with -r")
- raise SystemExit(1)
- if 'drivers' in self.setup and self.setup['drivers'] == 'help':
- self.logger.info("The following drivers are available:")
- self.logger.info(Bcfg2.Client.Tools.drivers)
- raise SystemExit(0)
- if self.setup['remove'] and 'services' in self.setup['remove'].lower():
- self.logger.error("Service removal is nonsensical; "
- "removed services will only be disabled")
- if (self.setup['remove'] and
- self.setup['remove'].lower() not in ['all', 'services', 'packages',
- 'users']):
- self.logger.error("Got unknown argument %s for -r" %
- self.setup['remove'])
- if self.setup["file"] and self.setup["cache"]:
- print("cannot use -f and -c together")
- raise SystemExit(1)
- if not self.setup['server'].startswith('https://'):
- self.setup['server'] = 'https://' + self.setup['server']
-
- def _probe_failure(self, probename, msg):
- """ handle failure of a probe in the way the user wants us to
- (exit or continue) """
- message = "Failed to execute probe %s: %s" % (probename, msg)
- if self.setup['probe_exit']:
- self.fatal_error(message)
- else:
- self.logger.error(message)
-
- def run_probe(self, probe):
- """Execute probe."""
- name = probe.get('name')
- self.logger.info("Running probe %s" % name)
- ret = Bcfg2.Client.XML.Element("probe-data",
- name=name,
- source=probe.get('source'))
- try:
- scripthandle, scriptname = tempfile.mkstemp()
- script = os.fdopen(scripthandle, 'w')
- try:
- script.write("#!%s\n" %
- (probe.attrib.get('interpreter', '/bin/sh')))
- if sys.hexversion >= 0x03000000:
- script.write(probe.text)
- else:
- script.write(probe.text.encode('utf-8'))
- script.close()
- os.chmod(scriptname,
- stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
- stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
- stat.S_IWUSR) # 0755
- rv = self.cmd.run(scriptname, timeout=self.setup['timeout'])
- if rv.stderr:
- self.logger.warning("Probe %s has error output: %s" %
- (name, rv.stderr))
- if not rv.success:
- self._probe_failure(name, "Return value %s" % rv)
- self.logger.info("Probe %s has result:" % name)
- self.logger.info(rv.stdout)
- if sys.hexversion >= 0x03000000:
- ret.text = rv.stdout
- else:
- ret.text = rv.stdout.decode('utf-8')
- finally:
- os.unlink(scriptname)
- except SystemExit:
- raise
- except:
- self._probe_failure(name, sys.exc_info()[1])
- return ret
-
- def fatal_error(self, message):
- """Signal a fatal error."""
- self.logger.error("Fatal error: %s" % (message))
- raise SystemExit(1)
-
- @property
- def proxy(self):
- """ get an XML-RPC proxy to the server """
- if self._proxy is None:
- self._proxy = Bcfg2.Proxy.ComponentProxy(
- self.setup['server'],
- self.setup['user'],
- self.setup['password'],
- key=self.setup['key'],
- cert=self.setup['certificate'],
- ca=self.setup['ca'],
- allowedServerCNs=self.setup['serverCN'],
- timeout=self.setup['timeout'],
- retries=int(self.setup['retries']),
- delay=int(self.setup['retry_delay']))
- return self._proxy
-
- def run_probes(self, times=None):
- """ run probes and upload probe data """
- if times is None:
- times = dict()
-
- try:
- probes = Bcfg2.Client.XML.XML(str(self.proxy.GetProbes()))
- except (Bcfg2.Proxy.ProxyError,
- Bcfg2.Proxy.CertificateError,
- socket.gaierror,
- socket.error):
- err = sys.exc_info()[1]
- self.fatal_error("Failed to download probes from bcfg2: %s" % err)
- except Bcfg2.Client.XML.ParseError:
- err = sys.exc_info()[1]
- self.fatal_error("Server returned invalid probe requests: %s" %
- err)
-
- times['probe_download'] = time.time()
-
- # execute probes
- probedata = Bcfg2.Client.XML.Element("ProbeData")
- for probe in probes.findall(".//probe"):
- probedata.append(self.run_probe(probe))
-
- if len(probes.findall(".//probe")) > 0:
- try:
- # upload probe responses
- self.proxy.RecvProbeData(
- Bcfg2.Client.XML.tostring(
- probedata,
- xml_declaration=False).decode('utf-8'))
- except Bcfg2.Proxy.ProxyError:
- err = sys.exc_info()[1]
- self.fatal_error("Failed to upload probe data: %s" % err)
-
- times['probe_upload'] = time.time()
-
- def get_config(self, times=None):
- """ load the configuration, either from the cached
- configuration file (-f), or from the server """
- if times is None:
- times = dict()
-
- if self.setup['file']:
- # read config from file
- try:
- self.logger.debug("Reading cached configuration from %s" %
- self.setup['file'])
- return open(self.setup['file'], 'r').read()
- except IOError:
- self.fatal_error("Failed to read cached configuration from: %s"
- % (self.setup['file']))
- else:
- # retrieve config from server
- if self.setup['profile']:
- try:
- self.proxy.AssertProfile(self.setup['profile'])
- except Bcfg2.Proxy.ProxyError:
- err = sys.exc_info()[1]
- self.fatal_error("Failed to set client profile: %s" % err)
-
- try:
- self.proxy.DeclareVersion(__version__)
- except xmlrpclib.Fault:
- err = sys.exc_info()[1]
- if (err.faultCode == xmlrpclib.METHOD_NOT_FOUND or
- (err.faultCode == 7 and
- err.faultString.startswith("Unknown method"))):
- self.logger.debug("Server does not support declaring "
- "client version")
- else:
- self.logger.error("Failed to declare version: %s" % err)
- except (Bcfg2.Proxy.ProxyError,
- Bcfg2.Proxy.CertificateError,
- socket.gaierror,
- socket.error):
- err = sys.exc_info()[1]
- self.logger.error("Failed to declare version: %s" % err)
-
- self.run_probes(times=times)
-
- if self.setup['decision'] in ['whitelist', 'blacklist']:
- try:
- self.setup['decision_list'] = \
- self.proxy.GetDecisionList(self.setup['decision'])
- self.logger.info("Got decision list from server:")
- self.logger.info(self.setup['decision_list'])
- except Bcfg2.Proxy.ProxyError:
- err = sys.exc_info()[1]
- self.fatal_error("Failed to get decision list: %s" % err)
-
- try:
- rawconfig = self.proxy.GetConfig().encode('utf-8')
- except Bcfg2.Proxy.ProxyError:
- err = sys.exc_info()[1]
- self.fatal_error("Failed to download configuration from "
- "Bcfg2: %s" % err)
-
- times['config_download'] = time.time()
- return rawconfig
-
- def run(self):
- """Perform client execution phase."""
- times = {}
-
- # begin configuration
- times['start'] = time.time()
-
- self.logger.info("Starting Bcfg2 client run at %s" % times['start'])
-
- rawconfig = self.get_config(times=times).decode('utf-8')
-
- if self.setup['cache']:
- try:
- open(self.setup['cache'], 'w').write(rawconfig)
- os.chmod(self.setup['cache'], 33152)
- except IOError:
- self.logger.warning("Failed to write config cache file %s" %
- (self.setup['cache']))
- times['caching'] = time.time()
-
- try:
- self.config = Bcfg2.Client.XML.XML(rawconfig)
- except Bcfg2.Client.XML.ParseError:
- syntax_error = sys.exc_info()[1]
- self.fatal_error("The configuration could not be parsed: %s" %
- syntax_error)
-
- times['config_parse'] = time.time()
-
- if self.config.tag == 'error':
- self.fatal_error("Server error: %s" % (self.config.text))
- return(1)
-
- if self.setup['bundle_quick']:
- newconfig = Bcfg2.Client.XML.XML('<Configuration/>')
- for bundle in self.config.getchildren():
- if (bundle.tag == 'Bundle' and
- ((self.setup['bundle'] and
- bundle.get('name') in self.setup['bundle']) or
- (self.setup['skipbundle'] and
- bundle.get('name') not in self.setup['skipbundle']))):
- newconfig.append(bundle)
- self.config = newconfig
-
- self.tools = Bcfg2.Client.Frame.Frame(self.config,
- self.setup,
- times, self.setup['drivers'],
- self.setup['dryrun'])
-
- if not self.setup['omit_lock_check']:
- #check lock here
- try:
- lockfile = open(self.setup['lockfile'], 'w')
- if locked(lockfile.fileno()):
- self.fatal_error("Another instance of Bcfg2 is running. "
- "If you want to bypass the check, run "
- "with the %s option" %
- Bcfg2.Options.OMIT_LOCK_CHECK.cmd)
- except SystemExit:
- raise
- except:
- lockfile = None
- self.logger.error("Failed to open lockfile %s: %s" %
- (self.setup['lockfile'], sys.exc_info()[1]))
-
- # execute the configuration
- self.tools.Execute()
-
- if not self.setup['omit_lock_check']:
- # unlock here
- if lockfile:
- try:
- fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
- os.remove(self.setup['lockfile'])
- except OSError:
- self.logger.error("Failed to unlock lockfile %s" %
- lockfile.name)
-
- if not self.setup['file'] and not self.setup['bundle_quick']:
- # upload statistics
- feedback = self.tools.GenerateStats()
-
- try:
- self.proxy.RecvStats(
- Bcfg2.Client.XML.tostring(
- feedback,
- xml_declaration=False).decode('utf-8'))
- except Bcfg2.Proxy.ProxyError:
- err = sys.exc_info()[1]
- self.logger.error("Failed to upload configuration statistics: "
- "%s" % err)
- raise SystemExit(2)
-
- self.logger.info("Finished Bcfg2 client run at %s" % time.time())
diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py
deleted file mode 100644
index ad718749e..000000000
--- a/src/lib/Bcfg2/Client/Frame.py
+++ /dev/null
@@ -1,536 +0,0 @@
-""" Frame is the Client Framework that verifies and installs entries,
-and generates statistics. """
-
-import copy
-import time
-import fnmatch
-import logging
-import Bcfg2.Client.Tools
-from Bcfg2.Client import prompt
-from Bcfg2.Compat import any, all # pylint: disable=W0622
-
-
-def matches_entry(entryspec, entry):
- """ Determine if the Decisions-style entry specification matches
- the entry. Both are tuples of (tag, name). The entryspec can
- handle the wildcard * in either position. """
- if entryspec == entry:
- return True
- return all(fnmatch.fnmatch(entry[i], entryspec[i]) for i in [0, 1])
-
-
-def matches_white_list(entry, whitelist):
- """ Return True if (<entry tag>, <entry name>) is in the given
- whitelist. """
- return any(matches_entry(we, (entry.tag, entry.get('name')))
- for we in whitelist)
-
-
-def passes_black_list(entry, blacklist):
- """ Return True if (<entry tag>, <entry name>) is not in the given
- blacklist. """
- return not any(matches_entry(be, (entry.tag, entry.get('name')))
- for be in blacklist)
-
-
-# pylint: disable=W0702
-# in frame we frequently want to catch all exceptions, regardless of
-# type, so disable the pylint rule that catches that.
-
-
-class Frame(object):
- """Frame is the container for all Tool objects and state information."""
-
- def __init__(self, config, setup, times, drivers, dryrun):
- self.config = config
- self.times = times
- self.dryrun = dryrun
- self.times['initialization'] = time.time()
- self.setup = setup
- self.tools = []
- self.states = {}
- self.whitelist = []
- self.blacklist = []
- self.removal = []
- self.logger = logging.getLogger(__name__)
- for driver in drivers[:]:
- if (driver not in Bcfg2.Client.Tools.drivers and
- isinstance(driver, str)):
- self.logger.error("Tool driver %s is not available" % driver)
- drivers.remove(driver)
-
- tclass = {}
- for tool in drivers:
- if not isinstance(tool, str):
- tclass[time.time()] = tool
- tool_class = "Bcfg2.Client.Tools.%s" % tool
- try:
- tclass[tool] = getattr(__import__(tool_class, globals(),
- locals(), ['*']),
- tool)
- except ImportError:
- continue
- except:
- self.logger.error("Tool %s unexpectedly failed to load" % tool,
- exc_info=1)
-
- for tool in list(tclass.values()):
- try:
- self.tools.append(tool(self.logger, setup, config))
- except Bcfg2.Client.Tools.ToolInstantiationError:
- continue
- except:
- self.logger.error("Failed to instantiate tool %s" % tool,
- exc_info=1)
-
- for tool in self.tools[:]:
- for conflict in getattr(tool, 'conflicts', []):
- for item in self.tools:
- if item.name == conflict:
- self.tools.remove(item)
-
- self.logger.info("Loaded tool drivers:")
- self.logger.info([tool.name for tool in self.tools])
-
- deprecated = [tool.name for tool in self.tools if tool.deprecated]
- if deprecated:
- self.logger.warning("Loaded deprecated tool drivers:")
- self.logger.warning(deprecated)
- experimental = [tool.name for tool in self.tools if tool.experimental]
- if experimental:
- self.logger.info("Loaded experimental tool drivers:")
- self.logger.info(experimental)
-
- # find entries not handled by any tools
- self.unhandled = [entry for struct in config
- for entry in struct
- if entry not in self.handled]
-
- if self.unhandled:
- self.logger.error("The following entries are not handled by any "
- "tool:")
- for entry in self.unhandled:
- self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'),
- entry.get('name')))
-
- self.find_dups(config)
-
- pkgs = [(entry.get('name'), entry.get('origin'))
- for struct in config
- for entry in struct
- if entry.tag == 'Package']
- if pkgs:
- self.logger.debug("The following packages are specified in bcfg2:")
- self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None])
- self.logger.debug("The following packages are prereqs added by "
- "Packages:")
- self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages'])
-
- def find_dups(self, config):
- """ Find duplicate entries and warn about them """
- entries = dict()
- for struct in config:
- for entry in struct:
- for tool in self.tools:
- if tool.handlesEntry(entry):
- pkey = tool.primarykey(entry)
- if pkey in entries:
- entries[pkey] += 1
- else:
- entries[pkey] = 1
- multi = [e for e, c in entries.items() if c > 1]
- if multi:
- self.logger.debug("The following entries are included multiple "
- "times:")
- for entry in multi:
- self.logger.debug(entry)
-
- def promptFilter(self, msg, entries):
- """Filter a supplied list based on user input."""
- ret = []
- entries.sort(key=lambda e: e.tag + ":" + e.get('name'))
- for entry in entries[:]:
- if entry in self.unhandled:
- # don't prompt for entries that can't be installed
- continue
- if 'qtext' in entry.attrib:
- iprompt = entry.get('qtext')
- else:
- iprompt = msg % (entry.tag, entry.get('name'))
- if prompt(iprompt):
- ret.append(entry)
- return ret
-
- def __getattr__(self, name):
- if name in ['extra', 'handled', 'modified', '__important__']:
- ret = []
- for tool in self.tools:
- ret += getattr(tool, name)
- return ret
- elif name in self.__dict__:
- return self.__dict__[name]
- raise AttributeError(name)
-
- def InstallImportant(self):
- """Install important entries
-
- We also process the decision mode stuff here because we want to prevent
- non-whitelisted/blacklisted 'important' entries from being installed
- prior to determining the decision mode on the client.
- """
- # Need to process decision stuff early so that dryrun mode
- # works with it
- self.whitelist = [entry for entry in self.states
- if not self.states[entry]]
- if not self.setup['file']:
- if self.setup['decision'] == 'whitelist':
- dwl = self.setup['decision_list']
- w_to_rem = [e for e in self.whitelist
- if not matches_white_list(e, dwl)]
- if w_to_rem:
- self.logger.info("In whitelist mode: "
- "suppressing installation of:")
- self.logger.info(["%s:%s" % (e.tag, e.get('name'))
- for e in w_to_rem])
- self.whitelist = [x for x in self.whitelist
- if x not in w_to_rem]
- elif self.setup['decision'] == 'blacklist':
- b_to_rem = \
- [e for e in self.whitelist
- if not passes_black_list(e, self.setup['decision_list'])]
- if b_to_rem:
- self.logger.info("In blacklist mode: "
- "suppressing installation of:")
- self.logger.info(["%s:%s" % (e.tag, e.get('name'))
- for e in b_to_rem])
- self.whitelist = [x for x in self.whitelist
- if x not in b_to_rem]
-
- # take care of important entries first
- if not self.dryrun:
- parent_map = dict((c, p)
- for p in self.config.getiterator()
- for c in p)
- for cfile in self.config.findall(".//Path"):
- if (cfile.get('name') not in self.__important__ or
- cfile.get('type') != 'file' or
- cfile not in self.whitelist):
- continue
- parent = parent_map[cfile]
- if ((parent.tag == "Bundle" and
- ((self.setup['bundle'] and
- parent.get("name") not in self.setup['bundle']) or
- (self.setup['skipbundle'] and
- parent.get("name") in self.setup['skipbundle']))) or
- (parent.tag == "Independent" and
- (self.setup['bundle'] or self.setup['skipindep']))):
- continue
- tools = [t for t in self.tools
- if t.handlesEntry(cfile) and t.canVerify(cfile)]
- if tools:
- if (self.setup['interactive'] and not
- self.promptFilter("Install %s: %s? (y/N):", [cfile])):
- self.whitelist.remove(cfile)
- continue
- try:
- self.states[cfile] = tools[0].InstallPath(cfile)
- if self.states[cfile]:
- tools[0].modified.append(cfile)
- except:
- self.logger.error("Unexpected tool failure",
- exc_info=1)
- cfile.set('qtext', '')
- if tools[0].VerifyPath(cfile, []):
- self.whitelist.remove(cfile)
-
- def Inventory(self):
- """
- Verify all entries,
- find extra entries,
- and build up workqueues
-
- """
- # initialize all states
- for struct in self.config.getchildren():
- for entry in struct.getchildren():
- self.states[entry] = False
- for tool in self.tools:
- try:
- tool.Inventory(self.states)
- except:
- self.logger.error("%s.Inventory() call failed:" % tool.name,
- exc_info=1)
-
- def Decide(self): # pylint: disable=R0912
- """Set self.whitelist based on user interaction."""
- iprompt = "Install %s: %s? (y/N): "
- rprompt = "Remove %s: %s? (y/N): "
- if self.setup['remove']:
- if self.setup['remove'] == 'all':
- self.removal = self.extra
- elif self.setup['remove'].lower() == 'services':
- self.removal = [entry for entry in self.extra
- if entry.tag == 'Service']
- elif self.setup['remove'].lower() == 'packages':
- self.removal = [entry for entry in self.extra
- if entry.tag == 'Package']
- elif self.setup['remove'].lower() == 'users':
- self.removal = [entry for entry in self.extra
- if entry.tag in ['POSIXUser', 'POSIXGroup']]
-
- candidates = [entry for entry in self.states
- if not self.states[entry]]
-
- if self.dryrun:
- if self.whitelist:
- self.logger.info("In dryrun mode: "
- "suppressing entry installation for:")
- self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
- for entry in self.whitelist])
- self.whitelist = []
- if self.removal:
- self.logger.info("In dryrun mode: "
- "suppressing entry removal for:")
- self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
- for entry in self.removal])
- self.removal = []
-
- # Here is where most of the work goes
- # first perform bundle filtering
- all_bundle_names = [b.get('name')
- for b in self.config.findall('./Bundle')]
- bundles = self.config.getchildren()
- if self.setup['bundle']:
- # warn if non-existent bundle given
- for bundle in self.setup['bundle']:
- if bundle not in all_bundle_names:
- self.logger.info("Warning: Bundle %s not found" % bundle)
- bundles = [b for b in bundles
- if b.get('name') in self.setup['bundle']]
- elif self.setup['indep']:
- bundles = [b for b in bundles if b.tag != 'Bundle']
- if self.setup['skipbundle']:
- # warn if non-existent bundle given
- if not self.setup['bundle_quick']:
- for bundle in self.setup['skipbundle']:
- if bundle not in all_bundle_names:
- self.logger.info("Warning: Bundle %s not found" %
- bundle)
- bundles = [b for b in bundles
- if b.get('name') not in self.setup['skipbundle']]
- if self.setup['skipindep']:
- bundles = [b for b in bundles if b.tag == 'Bundle']
-
- self.whitelist = [e for e in self.whitelist
- if any(e in b for b in bundles)]
-
- # first process prereq actions
- for bundle in bundles[:]:
- if bundle.tag != 'Bundle':
- continue
- bmodified = len([item for item in bundle
- if item in self.whitelist or
- item in self.modified])
- actions = [a for a in bundle.findall('./Action')
- if (a.get('timing') != 'post' and
- (bmodified or a.get('when') == 'always'))]
- # now we process all "pre" and "both" actions that are either
- # always or the bundle has been modified
- if self.setup['interactive']:
- self.promptFilter(iprompt, actions)
- self.DispatchInstallCalls(actions)
-
- # need to test to fail entries in whitelist
- if False in [self.states[a] for a in actions]:
- # then display bundles forced off with entries
- self.logger.info("Bundle %s failed prerequisite action" %
- (bundle.get('name')))
- bundles.remove(bundle)
- b_to_remv = [ent for ent in self.whitelist if ent in bundle]
- if b_to_remv:
- self.logger.info("Not installing entries from Bundle %s" %
- (bundle.get('name')))
- self.logger.info(["%s:%s" % (e.tag, e.get('name'))
- for e in b_to_remv])
- for ent in b_to_remv:
- self.whitelist.remove(ent)
-
- self.logger.debug("Installing entries in the following bundle(s):")
- self.logger.debug(" %s" % ", ".join(b.get("name") for b in bundles
- if b.get("name")))
-
- if self.setup['interactive']:
- self.whitelist = self.promptFilter(iprompt, self.whitelist)
- self.removal = self.promptFilter(rprompt, self.removal)
-
- for entry in candidates:
- if entry not in self.whitelist:
- self.blacklist.append(entry)
-
- def DispatchInstallCalls(self, entries):
- """Dispatch install calls to underlying tools."""
- for tool in self.tools:
- handled = [entry for entry in entries if tool.canInstall(entry)]
- if not handled:
- continue
- try:
- tool.Install(handled, self.states)
- except:
- self.logger.error("%s.Install() call failed:" % tool.name,
- exc_info=1)
-
- def Install(self):
- """Install all entries."""
- self.DispatchInstallCalls(self.whitelist)
- mods = self.modified
- mbundles = [struct for struct in self.config.findall('Bundle')
- if any(True for mod in mods if mod in struct)]
-
- if self.modified:
- # Handle Bundle interdeps
- if mbundles:
- self.logger.info("The Following Bundles have been modified:")
- self.logger.info([mbun.get('name') for mbun in mbundles])
- tbm = [(t, b) for t in self.tools for b in mbundles]
- for tool, bundle in tbm:
- try:
- tool.Inventory(self.states, [bundle])
- except:
- self.logger.error("%s.Inventory() call failed:" %
- tool.name,
- exc_info=1)
- clobbered = [entry for bundle in mbundles for entry in bundle
- if (not self.states[entry] and
- entry not in self.blacklist)]
- if clobbered:
- self.logger.debug("Found clobbered entries:")
- self.logger.debug(["%s:%s" % (entry.tag, entry.get('name'))
- for entry in clobbered])
- if not self.setup['interactive']:
- self.DispatchInstallCalls(clobbered)
-
- for bundle in self.config.findall('.//Bundle'):
- if (self.setup['bundle'] and
- bundle.get('name') not in self.setup['bundle']):
- # prune out unspecified bundles when running with -b
- continue
- if bundle in mbundles:
- self.logger.debug("Bundle %s was modified" %
- bundle.get('name'))
- func = "BundleUpdated"
- else:
- self.logger.debug("Bundle %s was not modified" %
- bundle.get('name'))
- func = "BundleNotUpdated"
- for tool in self.tools:
- try:
- getattr(tool, func)(bundle, self.states)
- except:
- self.logger.error("%s.%s() call failed:" %
- (tool.name, func), exc_info=1)
-
- def Remove(self):
- """Remove extra entries."""
- for tool in self.tools:
- extras = [entry for entry in self.removal
- if tool.handlesEntry(entry)]
- if extras:
- try:
- tool.Remove(extras)
- except:
- self.logger.error("%s.Remove() failed" % tool.name,
- exc_info=1)
-
- def CondDisplayState(self, phase):
- """Conditionally print tracing information."""
- self.logger.info('Phase: %s' % phase)
- self.logger.info('Correct entries: %d' %
- list(self.states.values()).count(True))
- self.logger.info('Incorrect entries: %d' %
- list(self.states.values()).count(False))
- if phase == 'final' and list(self.states.values()).count(False):
- for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" +
- e.get('name')):
- if not self.states[entry]:
- etype = entry.get('type')
- if etype:
- self.logger.info("%s:%s:%s" % (entry.tag, etype,
- entry.get('name')))
- else:
- self.logger.info("%s:%s" % (entry.tag,
- entry.get('name')))
- self.logger.info('Total managed entries: %d' %
- len(list(self.states.values())))
- self.logger.info('Unmanaged entries: %d' % len(self.extra))
- if phase == 'final' and self.setup['extra']:
- for entry in sorted(self.extra, key=lambda e: e.tag + ":" +
- e.get('name')):
- etype = entry.get('type')
- if etype:
- self.logger.info("%s:%s:%s" % (entry.tag, etype,
- entry.get('name')))
- else:
- self.logger.info("%s:%s" % (entry.tag,
- entry.get('name')))
-
- if ((list(self.states.values()).count(False) == 0) and not self.extra):
- self.logger.info('All entries correct.')
-
- def ReInventory(self):
- """Recheck everything."""
- if not self.dryrun and self.setup['kevlar']:
- self.logger.info("Rechecking system inventory")
- self.Inventory()
-
- def Execute(self):
- """Run all methods."""
- self.Inventory()
- self.times['inventory'] = time.time()
- self.CondDisplayState('initial')
- self.InstallImportant()
- self.Decide()
- self.Install()
- self.times['install'] = time.time()
- self.Remove()
- self.times['remove'] = time.time()
- if self.modified:
- self.ReInventory()
- self.times['reinventory'] = time.time()
- self.times['finished'] = time.time()
- self.CondDisplayState('final')
-
- def GenerateStats(self):
- """Generate XML summary of execution statistics."""
- feedback = Bcfg2.Client.XML.Element("upload-statistics")
- stats = Bcfg2.Client.XML.SubElement(
- feedback,
- 'Statistics',
- total=str(len(self.states)),
- version='2.0',
- revision=self.config.get('revision', '-1'))
- good_entries = [key for key, val in list(self.states.items()) if val]
- good = len(good_entries)
- stats.set('good', str(good))
- if any(not val for val in list(self.states.values())):
- stats.set('state', 'dirty')
- else:
- stats.set('state', 'clean')
-
- # List bad elements of the configuration
- for (data, ename) in [(self.modified, 'Modified'),
- (self.extra, "Extra"),
- (good_entries, "Good"),
- ([entry for entry in self.states
- if not self.states[entry]], "Bad")]:
- container = Bcfg2.Client.XML.SubElement(stats, ename)
- for item in data:
- item.set('qtext', '')
- container.append(copy.deepcopy(item))
- item.text = None
-
- timeinfo = Bcfg2.Client.XML.Element("OpStamps")
- feedback.append(stats)
- for (event, timestamp) in list(self.times.items()):
- timeinfo.set(event, str(timestamp))
- stats.append(timeinfo)
- return feedback
diff --git a/src/lib/Bcfg2/Client/Proxy.py b/src/lib/Bcfg2/Client/Proxy.py
new file mode 100644
index 000000000..a464d6a40
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Proxy.py
@@ -0,0 +1,395 @@
+import re
+import sys
+import time
+import socket
+import logging
+import Bcfg2.Options
+from Bcfg2.Compat import httplib, xmlrpclib, urlparse, quote_plus
+
+# The ssl module is provided by either Python 2.6 or a separate ssl
+# package that works on older versions of Python (see
+# http://pypi.python.org/pypi/ssl). If neither can be found, look for
+# M2Crypto instead.
+try:
+ import ssl
+ SSL_LIB = 'py26_ssl'
+ SSL_ERROR = ssl.SSLError
+except ImportError:
+ from M2Crypto import SSL
+ import M2Crypto.SSL.Checker
+ SSL_LIB = 'm2crypto'
+ SSL_ERROR = SSL.SSLError
+
+
+version = sys.version_info[:2]
+has_py26 = version >= (2, 6)
+has_py32 = version >= (3, 2)
+
+__all__ = ["ComponentProxy",
+ "RetryMethod",
+ "SSLHTTPConnection",
+ "XMLRPCTransport"]
+
+
+class ProxyError(Exception):
+ """ ProxyError provides a consistent reporting interface to
+ the various xmlrpclib errors that might arise (mainly
+ ProtocolError and Fault) """
+ def __init__(self, err):
+ msg = None
+ if isinstance(err, xmlrpclib.ProtocolError):
+ # cut out the password in the URL
+ url = re.sub(r'([^:]+):(.*?)@([^@]+:\d+/)', r'\1:******@\3',
+ err.url)
+ msg = "XML-RPC Protocol Error for %s: %s (%s)" % (url,
+ err.errmsg,
+ err.errcode)
+ elif isinstance(err, xmlrpclib.Fault):
+ msg = "XML-RPC Fault: %s (%s)" % (err.faultString,
+ err.faultCode)
+ else:
+ msg = str(err)
+ Exception.__init__(self, msg)
+
+
+class CertificateError(Exception):
+ def __init__(self, commonName):
+ self.commonName = commonName
+
+ def __str__(self):
+ return ("Got unallowed commonName %s from server"
+ % self.commonName)
+
+
+_orig_Method = xmlrpclib._Method
+
+
+class RetryMethod(xmlrpclib._Method):
+ """Method with error handling and retries built in."""
+ log = logging.getLogger('xmlrpc')
+ max_retries = 3
+ retry_delay = 1
+
+ def __call__(self, *args):
+ for retry in range(self.max_retries):
+ if retry >= self.max_retries - 1:
+ final = True
+ else:
+ final = False
+ msg = None
+ try:
+ return _orig_Method.__call__(self, *args)
+ except xmlrpclib.ProtocolError:
+ err = sys.exc_info()[1]
+ msg = "Server failure: Protocol Error: %s %s" % \
+ (err.errcode, err.errmsg)
+ except xmlrpclib.Fault:
+ msg = sys.exc_info()[1]
+ except socket.error:
+ err = sys.exc_info()[1]
+ if hasattr(err, 'errno') and err.errno == 336265218:
+ msg = "SSL Key error: %s" % err
+ elif hasattr(err, 'errno') and err.errno == 185090050:
+ msg = "SSL CA error: %s" % err
+ elif final:
+ msg = "Server failure: %s" % err
+ except CertificateError:
+ err = sys.exc_info()[1]
+ msg = "Got unallowed commonName %s from server" % \
+ err.commonName
+ except KeyError:
+ err = sys.exc_info()[1]
+ msg = "Server disallowed connection: %s" % err
+ except ProxyError:
+ err = sys.exc_info()[1]
+ msg = err
+ except:
+ etype, err = sys.exc_info()[:2]
+ msg = "Unknown failure: %s (%s)" % (err, etype.__name__)
+ if msg:
+ if final:
+ self.log.error(msg)
+ raise ProxyError(msg)
+ else:
+ self.log.info(msg)
+ time.sleep(self.retry_delay)
+
+xmlrpclib._Method = RetryMethod
+
+
+class SSLHTTPConnection(httplib.HTTPConnection):
+ """Extension of HTTPConnection that
+ implements SSL and related behaviors.
+ """
+
+ def __init__(self, host, port=None, strict=None, timeout=90, key=None,
+ cert=None, ca=None, scns=None, protocol='xmlrpc/ssl'):
+ """Initializes the `httplib.HTTPConnection` object and stores security
+ parameters
+
+ Parameters
+ ----------
+ host : string
+ Name of host to contact
+ port : int, optional
+ Port on which to contact the host. If none is specified,
+ the default port of 80 will be used unless the `host`
+ string has a port embedded in the form host:port.
+ strict : Boolean, optional
+ Passed to the `httplib.HTTPConnection` constructor and if
+ True, causes the `BadStatusLine` exception to be raised if
+ the status line cannot be parsed as a valid HTTP 1.0 or
+ 1.1 status.
+ timeout : int, optional
+ Causes blocking operations to timeout after `timeout`
+ seconds.
+ key : string, optional
+ The file system path to the local endpoint's SSL key. May
+ specify the same file as `cert` if using a file that
+ contains both. See
+ http://docs.python.org/library/ssl.html#ssl-certificates
+ for details. Required if using xmlrpc/ssl with client
+ certificate authentication.
+ cert : string, optional
+ The file system path to the local endpoint's SSL
+ certificate. May specify the same file as `cert` if using
+ a file that contains both. See
+ http://docs.python.org/library/ssl.html#ssl-certificates
+ for details. Required if using xmlrpc/ssl with client
+ certificate authentication.
+ ca : string, optional
+ The file system path to a set of concatenated certificate
+ authority certs, which are used to validate certificates
+ passed from the other end of the connection.
+ scns : array-like, optional
+ List of acceptable server commonNames. The peer cert's
+ common name must appear in this list, otherwise the
+ connect() call will throw a `CertificateError`.
+ protocol : {'xmlrpc/ssl', 'xmlrpc/tlsv1'}, optional
+ Communication protocol to use.
+
+ """
+ if not has_py26:
+ httplib.HTTPConnection.__init__(self, host, port, strict)
+ elif not has_py32:
+ httplib.HTTPConnection.__init__(self, host, port, strict, timeout)
+ else:
+ # the strict parameter is deprecated.
+ # HTTP 0.9-style "Simple Responses" are not supported anymore.
+ httplib.HTTPConnection.__init__(self, host, port, timeout=timeout)
+ self.logger = logging.getLogger("%s.%s" % (self.__class__.__module__,
+ self.__class__.__name__))
+ self.key = key
+ self.cert = cert
+ self.ca = ca
+ self.scns = scns
+ self.protocol = protocol
+ self.timeout = timeout
+
+ def connect(self):
+ """Initiates a connection using previously set attributes."""
+ if SSL_LIB == 'py26_ssl':
+ self._connect_py26ssl()
+ elif SSL_LIB == 'm2crypto':
+ self._connect_m2crypto()
+ else:
+ raise Exception("No SSL module support")
+
+ def _connect_py26ssl(self):
+ """Initiates a connection using the ssl module."""
+ # check for IPv6
+ hostip = socket.getaddrinfo(self.host,
+ self.port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0][4][0]
+ if ':' in hostip:
+ rawsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if self.protocol == 'xmlrpc/ssl':
+ ssl_protocol_ver = ssl.PROTOCOL_SSLv23
+ elif self.protocol == 'xmlrpc/tlsv1':
+ ssl_protocol_ver = ssl.PROTOCOL_TLSv1
+ else:
+ self.logger.error("Unknown protocol %s" % (self.protocol))
+ raise Exception("unknown protocol %s" % self.protocol)
+ if self.ca:
+ other_side_required = ssl.CERT_REQUIRED
+ else:
+ other_side_required = ssl.CERT_NONE
+ self.logger.warning("No ca is specified. Cannot authenticate the "
+ "server with SSL.")
+ if self.cert and not self.key:
+ self.logger.warning("SSL cert specfied, but no key. Cannot "
+ "authenticate this client with SSL.")
+ self.cert = None
+ if self.key and not self.cert:
+ self.logger.warning("SSL key specfied, but no cert. Cannot "
+ "authenticate this client with SSL.")
+ self.key = None
+
+ rawsock.settimeout(self.timeout)
+ self.sock = ssl.SSLSocket(rawsock, cert_reqs=other_side_required,
+ ca_certs=self.ca, suppress_ragged_eofs=True,
+ keyfile=self.key, certfile=self.cert,
+ ssl_version=ssl_protocol_ver)
+ self.sock.connect((self.host, self.port))
+ peer_cert = self.sock.getpeercert()
+ if peer_cert and self.scns:
+ scn = [x[0][1] for x in peer_cert['subject']
+ if x[0][0] == 'commonName'][0]
+ if scn not in self.scns:
+ raise CertificateError(scn)
+ self.sock.closeSocket = True
+
+ def _connect_m2crypto(self):
+ """Initiates a connection using the M2Crypto module."""
+
+ if self.protocol == 'xmlrpc/ssl':
+ ctx = SSL.Context('sslv23')
+ elif self.protocol == 'xmlrpc/tlsv1':
+ ctx = SSL.Context('tlsv1')
+ else:
+ self.logger.error("Unknown protocol %s" % (self.protocol))
+ raise Exception("unknown protocol %s" % self.protocol)
+
+ if self.ca:
+ # Use the certificate authority to validate the cert
+ # presented by the server
+ ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert,
+ depth=9)
+ if ctx.load_verify_locations(self.ca) != 1:
+ raise Exception('No CA certs')
+ else:
+ self.logger.warning("No ca is specified. Cannot authenticate the "
+ "server with SSL.")
+
+ if self.cert and self.key:
+ # A cert/key is defined, use them to support client
+ # authentication to the server
+ ctx.load_cert(self.cert, self.key)
+ elif self.cert:
+ self.logger.warning("SSL cert specfied, but no key. Cannot "
+ "authenticate this client with SSL.")
+ elif self.key:
+ self.logger.warning("SSL key specfied, but no cert. Cannot "
+ "authenticate this client with SSL.")
+
+ self.sock = SSL.Connection(ctx)
+ if re.match('\\d+\\.\\d+\\.\\d+\\.\\d+', self.host):
+ # host is ip address
+ try:
+ hostname = socket.gethostbyaddr(self.host)[0]
+ except:
+ # fall back to ip address
+ hostname = self.host
+ else:
+ hostname = self.host
+ try:
+ self.sock.connect((hostname, self.port))
+ # automatically checks cert matches host
+ except M2Crypto.SSL.Checker.WrongHost:
+ wr = sys.exc_info()[1]
+ raise CertificateError(wr)
+
+
+class XMLRPCTransport(xmlrpclib.Transport):
+ def __init__(self, key=None, cert=None, ca=None,
+ scns=None, use_datetime=0, timeout=90):
+ if hasattr(xmlrpclib.Transport, '__init__'):
+ xmlrpclib.Transport.__init__(self, use_datetime)
+ self.key = key
+ self.cert = cert
+ self.ca = ca
+ self.scns = scns
+ self.timeout = timeout
+
+ def make_connection(self, host):
+ host, self._extra_headers = self.get_host_info(host)[0:2]
+ return SSLHTTPConnection(host,
+ key=self.key,
+ cert=self.cert,
+ ca=self.ca,
+ scns=self.scns,
+ timeout=self.timeout)
+
+ def request(self, host, handler, request_body, verbose=0):
+ """Send request to server and return response."""
+ try:
+ conn = self.send_request(host, handler, request_body, False)
+ response = conn.getresponse()
+ errcode = response.status
+ errmsg = response.reason
+ headers = response.msg
+ except (socket.error, SSL_ERROR, httplib.BadStatusLine):
+ err = sys.exc_info()[1]
+ raise ProxyError(xmlrpclib.ProtocolError(host + handler,
+ 408,
+ str(err),
+ self._extra_headers))
+
+ if errcode != 200:
+ raise ProxyError(xmlrpclib.ProtocolError(host + handler,
+ errcode,
+ errmsg,
+ headers))
+
+ self.verbose = verbose
+ return self.parse_response(response)
+
+ if sys.hexversion < 0x03000000:
+ # pylint: disable=E1101
+ def send_request(self, host, handler, request_body, debug):
+ """ send_request() changed significantly in py3k."""
+ conn = self.make_connection(host)
+ xmlrpclib.Transport.send_request(self, conn, handler, request_body)
+ self.send_host(conn, host)
+ self.send_user_agent(conn)
+ self.send_content(conn, request_body)
+ return conn
+ # pylint: enable=E1101
+
+
+class ComponentProxy(xmlrpclib.ServerProxy):
+ """Constructs proxies to components. """
+
+ options = [
+ Bcfg2.Options.Common.location, Bcfg2.Options.Common.ssl_key,
+ Bcfg2.Options.Common.ssl_cert, Bcfg2.Options.Common.ssl_ca,
+ Bcfg2.Options.Common.password, Bcfg2.Options.Common.client_timeout,
+ Bcfg2.Options.Option(
+ "-u", "--user", default="root", cf=('communication', 'user'),
+ help='The user to provide for authentication'),
+ Bcfg2.Options.Option(
+ "-R", "--retries", type=int, default=3,
+ cf=('communication', 'retries'),
+ help='The number of times to retry network communication'),
+ Bcfg2.Options.Option(
+ "-y", "--retry-delay", type=int, default=1,
+ cf=('communication', 'retry_delay'),
+ help='The time in seconds to wait between retries'),
+ Bcfg2.Options.Option(
+ '--ssl-cns', cf=('communication', 'serverCommonNames'),
+ type=Bcfg2.Options.Types.colon_list,
+ help='List of server commonNames')]
+
+ def __init__(self):
+ RetryMethod.max_retries = Bcfg2.Options.setup.retries
+ RetryMethod.retry_delay = Bcfg2.Options.setup.retry_delay
+
+ if Bcfg2.Options.setup.user and Bcfg2.Options.setup.password:
+ method, path = urlparse(Bcfg2.Options.setup.server)[:2]
+ url = "%s://%s:%s@%s" % (
+ method,
+ quote_plus(Bcfg2.Options.setup.user, ''),
+ quote_plus(Bcfg2.Options.setup.password, ''),
+ path)
+ else:
+ url = Bcfg2.Options.setup.server
+ ssl_trans = XMLRPCTransport(Bcfg2.Options.setup.key,
+ Bcfg2.Options.setup.cert,
+ Bcfg2.Options.setup.ca,
+ Bcfg2.Options.setup.ssl_cns,
+ Bcfg2.Options.setup.client_timeout)
+ xmlrpclib.ServerProxy.__init__(self, url,
+ allow_none=True, transport=ssl_trans)
diff --git a/src/lib/Bcfg2/Client/Tools/APK.py b/src/lib/Bcfg2/Client/Tools/APK.py
index 58641ed37..457197c28 100644
--- a/src/lib/Bcfg2/Client/Tools/APK.py
+++ b/src/lib/Bcfg2/Client/Tools/APK.py
@@ -12,11 +12,6 @@ class APK(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'apk'
pkgtool = ("/sbin/apk add %s", ("%s", ["name"]))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
- self.installed = {}
- self.RefreshPackages()
-
def RefreshPackages(self):
"""Refresh memory hashes of packages."""
names = self.cmd.run("/sbin/apk info").stdout.splitlines()
@@ -38,8 +33,6 @@ class APK(Bcfg2.Client.Tools.PkgTool):
if entry.attrib['name'] in self.installed:
if entry.attrib['version'] in \
['auto', self.installed[entry.attrib['name']]]:
- #if not self.setup['quick'] and \
- # entry.get('verify', 'true') == 'true':
#FIXME: Does APK have any sort of verification mechanism?
return True
else:
diff --git a/src/lib/Bcfg2/Client/Tools/APT.py b/src/lib/Bcfg2/Client/Tools/APT.py
index 39816403a..cf4e7c7ea 100644
--- a/src/lib/Bcfg2/Client/Tools/APT.py
+++ b/src/lib/Bcfg2/Client/Tools/APT.py
@@ -4,29 +4,38 @@
import warnings
warnings.filterwarnings("ignore", "apt API not stable yet",
FutureWarning)
-import apt.cache
import os
+import apt.cache
+import Bcfg2.Options
import Bcfg2.Client.Tools
+
class APT(Bcfg2.Client.Tools.Tool):
- """The Debian toolset implements package and service operations and inherits
- the rest from Toolset.Toolset.
+ """The Debian toolset implements package and service operations
+ and inherits the rest from Tools.Tool. """
+
+ options = Bcfg2.Client.Tools.Tool.options + [
+ Bcfg2.Options.PathOption(
+ cf=('APT', 'install_path'),
+ default='/usr', dest='apt_install_path',
+ help='Apt tools install path'),
+ Bcfg2.Options.PathOption(
+ cf=('APT', 'var_path'), default='/var', dest='apt_var_path',
+ help='Apt tools var path'),
+ Bcfg2.Options.PathOption(
+ cf=('APT', 'etc_path'), default='/etc', dest='apt_etc_path',
+ help='System etc path')]
- """
- name = 'APT'
__execs__ = []
__handles__ = [('Package', 'deb'), ('Path', 'ignore')]
__req__ = {'Package': ['name', 'version'], 'Path': ['type']}
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
- self.install_path = setup.get('apt_install_path', '/usr')
- self.var_path = setup.get('apt_var_path', '/var')
- self.etc_path = setup.get('apt_etc_path', '/etc')
- self.debsums = '%s/bin/debsums' % self.install_path
- self.aptget = '%s/bin/apt-get' % self.install_path
- self.dpkg = '%s/bin/dpkg' % self.install_path
+ self.debsums = '%s/bin/debsums' % Bcfg2.Options.setup.apt_install_path
+ self.aptget = '%s/bin/apt-get' % Bcfg2.Options.setup.apt_install_path
+ self.dpkg = '%s/bin/dpkg' % Bcfg2.Options.setup.apt_install_path
self.__execs__ = [self.debsums, self.aptget, self.dpkg]
path_entries = os.environ['PATH'].split(':')
@@ -38,27 +47,32 @@ class APT(Bcfg2.Client.Tools.Tool):
'-o DPkg::Options::=--force-confmiss ' + \
'--reinstall ' + \
'--force-yes '
- if not self.setup['debug']:
+ if not Bcfg2.Options.setup.debug:
self.pkgcmd += '-q=2 '
self.pkgcmd += '-y install %s'
- self.ignores = [entry.get('name') for struct in config \
- for entry in struct \
- if entry.tag == 'Path' and \
+ self.ignores = [entry.get('name') for struct in config
+ for entry in struct
+ if entry.tag == 'Path' and
entry.get('type') == 'ignore']
- self.__important__ = self.__important__ + \
- ["%s/cache/debconf/config.dat" % self.var_path,
- "%s/cache/debconf/templates.dat" % self.var_path,
- '/etc/passwd', '/etc/group',
- '%s/apt/apt.conf' % self.etc_path,
- '%s/dpkg/dpkg.cfg' % self.etc_path] + \
- [entry.get('name') for struct in config for entry in struct \
- if entry.tag == 'Path' and \
- entry.get('name').startswith('%s/apt/sources.list' % self.etc_path)]
- self.nonexistent = [entry.get('name') for struct in config for entry in struct \
- if entry.tag == 'Path' and entry.get('type') == 'nonexistent']
+ self.__important__ = self.__important__ + [
+ "%s/cache/debconf/config.dat" % Bcfg2.Options.setup.apt_var_path,
+ "%s/cache/debconf/templates.dat" %
+ Bcfg2.Options.setup.apt_var_path,
+ '/etc/passwd', '/etc/group',
+ '%s/apt/apt.conf' % Bcfg2.Options.setup.apt_etc_path,
+ '%s/dpkg/dpkg.cfg' % Bcfg2.Options.setup.apt_etc_path] + \
+ [entry.get('name') for struct in config
+ for entry in struct
+ if (entry.tag == 'Path' and
+ entry.get('name').startswith(
+ '%s/apt/sources.list' %
+ Bcfg2.Options.setup.apt_etc_path))]
+ self.nonexistent = [entry.get('name') for struct in config
+ for entry in struct if entry.tag == 'Path'
+ and entry.get('type') == 'nonexistent']
os.environ["DEBIAN_FRONTEND"] = 'noninteractive'
self.actions = {}
- if self.setup['kevlar'] and not self.setup['dryrun']:
+ if Bcfg2.Options.setup.kevlar and not Bcfg2.Options.setup.dry_run:
self.cmd.run("%s --force-confold --configure --pending" %
self.dpkg)
self.cmd.run("%s clean" % self.aptget)
@@ -84,16 +98,16 @@ class APT(Bcfg2.Client.Tools.Tool):
else:
extras = [(p.name, p.installedVersion) for p in self.pkg_cache
if p.isInstalled and p.name not in packages]
- return [Bcfg2.Client.XML.Element('Package', name=name, \
- type='deb', version=version) \
- for (name, version) in extras]
+ return [Bcfg2.Client.XML.Element('Package', name=name,
+ type='deb', version=version)
+ for (name, version) in extras]
def VerifyDebsums(self, entry, modlist):
output = \
self.cmd.run("%s -as %s" %
(self.debsums, entry.get('name'))).stdout.splitlines()
if len(output) == 1 and "no md5sums for" in output[0]:
- self.logger.info("Package %s has no md5sums. Cannot verify" % \
+ self.logger.info("Package %s has no md5sums. Cannot verify" %
entry.get('name'))
entry.set('qtext',
"Reinstall Package %s-%s to setup md5sums? (y/N) " %
@@ -113,11 +127,11 @@ class APT(Bcfg2.Client.Tools.Tool):
# these files should not exist
continue
elif "is not installed" in item or "missing file" in item:
- self.logger.error("Package %s is not fully installed" \
- % entry.get('name'))
+ self.logger.error("Package %s is not fully installed" %
+ entry.get('name'))
else:
- self.logger.error("Got Unsupported pattern %s from debsums" \
- % item)
+ self.logger.error("Got Unsupported pattern %s from debsums" %
+ item)
files.append(item)
files = list(set(files) - set(self.ignores))
# We check if there is file in the checksum to do
@@ -127,15 +141,15 @@ class APT(Bcfg2.Client.Tools.Tool):
modlist = [os.path.realpath(filename) for filename in modlist]
bad = [filename for filename in files if filename not in modlist]
if bad:
- self.logger.debug("It is suggested that you either manage these "
- "files, revert the changes, or ignore false "
- "failures:")
- self.logger.info("Package %s failed validation. Bad files are:" % \
- entry.get('name'))
+ self.logger.debug("It is suggested that you either manage "
+ "these files, revert the changes, or ignore "
+ "false failures:")
+ self.logger.info("Package %s failed validation. Bad files "
+ "are:" % entry.get('name'))
self.logger.info(bad)
entry.set('qtext',
- "Reinstall Package %s-%s to fix failing files? (y/N) " % \
- (entry.get('name'), entry.get('version')))
+ "Reinstall Package %s-%s to fix failing files? "
+ "(y/N) " % (entry.get('name'), entry.get('version')))
return False
return True
@@ -146,12 +160,12 @@ class APT(Bcfg2.Client.Tools.Tool):
(entry.attrib['name']))
return False
pkgname = entry.get('name')
- if self.pkg_cache.has_key(pkgname):
+ if self.pkg_cache.has_key(pkgname): # nopep8
if self._newapi:
is_installed = self.pkg_cache[pkgname].is_installed
else:
is_installed = self.pkg_cache[pkgname].isInstalled
- if not self.pkg_cache.has_key(pkgname) or not is_installed:
+ if not self.pkg_cache.has_key(pkgname) or not is_installed: # nopep8
self.logger.info("Package %s not installed" % (entry.get('name')))
entry.set('current_exists', 'false')
return False
@@ -165,9 +179,11 @@ class APT(Bcfg2.Client.Tools.Tool):
candidate_version = pkg.candidateVersion
if entry.get('version') == 'auto':
if self._newapi:
- is_upgradable = self.pkg_cache._depcache.is_upgradable(pkg._pkg)
+ is_upgradable = \
+ self.pkg_cache._depcache.is_upgradable(pkg._pkg)
else:
- is_upgradable = self.pkg_cache._depcache.IsUpgradable(pkg._pkg)
+ is_upgradable = \
+ self.pkg_cache._depcache.IsUpgradable(pkg._pkg)
if is_upgradable:
desiredVersion = candidate_version
else:
@@ -178,14 +194,15 @@ class APT(Bcfg2.Client.Tools.Tool):
desiredVersion = entry.get('version')
if desiredVersion != installed_version:
entry.set('current_version', installed_version)
- entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % \
+ entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " %
(entry.get('name'), entry.get('current_version'),
desiredVersion))
return False
else:
# version matches
- if not self.setup['quick'] and entry.get('verify', 'true') == 'true' \
- and checksums:
+ if (not Bcfg2.Options.setup.quick and
+ entry.get('verify', 'true') == 'true'
+ and checksums):
pkgsums = self.VerifyDebsums(entry, modlist)
return pkgsums
return True
@@ -217,41 +234,45 @@ class APT(Bcfg2.Client.Tools.Tool):
self.modified += packages
self.extra = self.FindExtra()
- def Install(self, packages, states):
+ def Install(self, packages):
# it looks like you can't install arbitrary versions of software
# out of the pkg cache, we will still need to call apt-get
ipkgs = []
bad_pkgs = []
for pkg in packages:
- if not self.pkg_cache.has_key(pkg.get('name')):
- self.logger.error("APT has no information about package %s" % (pkg.get('name')))
+ if not self.pkg_cache.has_key(pkg.get('name')): # nopep8
+ self.logger.error("APT has no information about package %s" %
+ (pkg.get('name')))
continue
if pkg.get('version') in ['auto', 'any']:
if self._newapi:
try:
- ipkgs.append("%s=%s" % (pkg.get('name'),
- self.pkg_cache[pkg.get('name')].candidate.version))
+ cversion = \
+ self.pkg_cache[pkg.get('name')].candidate.version
+ ipkgs.append("%s=%s" % (pkg.get('name'), cversion))
except AttributeError:
- self.logger.error("Failed to find %s in apt package cache" %
- pkg.get('name'))
+ self.logger.error("Failed to find %s in apt package "
+ "cache" % pkg.get('name'))
continue
else:
- ipkgs.append("%s=%s" % (pkg.get('name'),
- self.pkg_cache[pkg.get('name')].candidateVersion))
+ cversion = self.pkg_cache[pkg.get('name')].candidateVersion
+ ipkgs.append("%s=%s" % (pkg.get('name'), cversion))
continue
if self._newapi:
- avail_vers = [x.ver_str for x in \
- self.pkg_cache[pkg.get('name')]._pkg.version_list]
+ avail_vers = [
+ x.ver_str for x in
+ self.pkg_cache[pkg.get('name')]._pkg.version_list]
else:
- avail_vers = [x.VerStr for x in \
- self.pkg_cache[pkg.get('name')]._pkg.VersionList]
+ avail_vers = [
+ x.VerStr for x in
+ self.pkg_cache[pkg.get('name')]._pkg.VersionList]
if pkg.get('version') in avail_vers:
ipkgs.append("%s=%s" % (pkg.get('name'), pkg.get('version')))
continue
else:
- self.logger.error("Package %s: desired version %s not in %s" \
- % (pkg.get('name'), pkg.get('version'),
- avail_vers))
+ self.logger.error("Package %s: desired version %s not in %s" %
+ (pkg.get('name'), pkg.get('version'),
+ avail_vers))
bad_pkgs.append(pkg.get('name'))
if bad_pkgs:
self.logger.error("Cannot find correct versions of packages:")
@@ -262,10 +283,12 @@ class APT(Bcfg2.Client.Tools.Tool):
self.logger.error("APT command failed")
self.pkg_cache = apt.cache.Cache()
self.extra = self.FindExtra()
+ states = dict()
for package in packages:
states[package] = self.VerifyPackage(package, [], checksums=False)
if states[package]:
self.modified.append(package)
+ return states
def VerifyPath(self, entry, _):
"""Do nothing here since we only verify Path type=ignore."""
diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py
index 0166e4c00..5549b1717 100644
--- a/src/lib/Bcfg2/Client/Tools/Action.py
+++ b/src/lib/Bcfg2/Client/Tools/Action.py
@@ -1,30 +1,28 @@
"""Action driver"""
-import os
-import sys
-import select
import Bcfg2.Client.Tools
-from Bcfg2.Client.Frame import matches_white_list, passes_black_list
-from Bcfg2.Compat import input # pylint: disable=W0622
+from Bcfg2.Utils import safe_input
+from Bcfg2.Client import matches_white_list, passes_black_list
class Action(Bcfg2.Client.Tools.Tool):
"""Implement Actions"""
name = 'Action'
- __handles__ = [('PostInstall', None), ('Action', None)]
- __req__ = {'PostInstall': ['name'],
- 'Action': ['name', 'timing', 'when', 'command', 'status']}
+ __handles__ = [('Action', None)]
+ __req__ = {'Action': ['name', 'timing', 'when', 'command', 'status']}
def _action_allowed(self, action):
""" Return true if the given action is allowed to be run by
the whitelist or blacklist """
- if self.setup['decision'] == 'whitelist' and \
- not matches_white_list(action, self.setup['decision_list']):
+ if (Bcfg2.Options.setup.decision == 'whitelist' and
+ not matches_white_list(action,
+ Bcfg2.Options.setup.decision_list)):
self.logger.info("In whitelist mode: suppressing Action: %s" %
action.get('name'))
return False
- if self.setup['decision'] == 'blacklist' and \
- not passes_black_list(action, self.setup['decision_list']):
+ if (Bcfg2.Options.setup.decision == 'blacklist' and
+ not passes_black_list(action,
+ Bcfg2.Options.setup.decision_list)):
self.logger.info("In blacklist mode: suppressing Action: %s" %
action.get('name'))
return False
@@ -38,19 +36,15 @@ class Action(Bcfg2.Client.Tools.Tool):
shell = True
shell_string = '(in shell) '
- if not self.setup['dryrun']:
- if self.setup['interactive']:
+ if not Bcfg2.Options.setup.dryrun:
+ if Bcfg2.Options.setup.interactive:
prompt = ('Run Action %s%s, %s: (y/N): ' %
(shell_string, entry.get('name'),
entry.get('command')))
- # flush input buffer
- while len(select.select([sys.stdin.fileno()], [], [],
- 0.0)[0]) > 0:
- os.read(sys.stdin.fileno(), 4096)
- ans = input(prompt)
+ ans = safe_input(prompt)
if ans not in ['y', 'Y']:
return False
- if self.setup['servicemode'] == 'build':
+ if Bcfg2.Options.setup.service_mode == 'build':
if entry.get('build', 'true') == 'false':
self.logger.debug("Action: Deferring execution of %s due "
"to build mode" % entry.get('command'))
@@ -71,39 +65,29 @@ class Action(Bcfg2.Client.Tools.Tool):
"""Actions always verify true."""
return True
- def VerifyPostInstall(self, dummy, _):
- """Actions always verify true."""
- return True
-
def InstallAction(self, entry):
"""Run actions as pre-checks for bundle installation."""
if entry.get('timing') != 'post':
return self.RunAction(entry)
return True
- def InstallPostInstall(self, entry):
- """ Install a deprecated PostInstall entry """
- self.logger.warning("Installing deprecated PostInstall entry %s" %
- entry.get("name"))
- return self.InstallAction(entry)
-
- def BundleUpdated(self, bundle, states):
+ def BundleUpdated(self, bundle):
"""Run postinstalls when bundles have been updated."""
- for postinst in bundle.findall("PostInstall"):
- if not self._action_allowed(postinst):
- continue
- self.cmd.run(postinst.get('name'))
+ states = dict()
for action in bundle.findall("Action"):
if action.get('timing') in ['post', 'both']:
if not self._action_allowed(action):
continue
states[action] = self.RunAction(action)
+ return states
- def BundleNotUpdated(self, bundle, states):
+ def BundleNotUpdated(self, bundle):
"""Run Actions when bundles have not been updated."""
+ states = dict()
for action in bundle.findall("Action"):
- if action.get('timing') in ['post', 'both'] and \
- action.get('when') != 'modified':
+ if (action.get('timing') in ['post', 'both'] and
+ action.get('when') != 'modified'):
if not self._action_allowed(action):
continue
states[action] = self.RunAction(action)
+ return states
diff --git a/src/lib/Bcfg2/Client/Tools/Blast.py b/src/lib/Bcfg2/Client/Tools/Blast.py
index 2627c42fe..fd594b4f4 100644
--- a/src/lib/Bcfg2/Client/Tools/Blast.py
+++ b/src/lib/Bcfg2/Client/Tools/Blast.py
@@ -13,9 +13,9 @@ class Blast(Bcfg2.Client.Tools.SYSV.SYSV):
__handles__ = [('Package', 'blast')]
__req__ = {'Package': ['name', 'version', 'bname']}
- def __init__(self, logger, setup, config):
+ def __init__(self, config):
# dont use the sysv constructor
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
noaskfile = tempfile.NamedTemporaryFile()
self.noaskname = noaskfile.name
try:
diff --git a/src/lib/Bcfg2/Client/Tools/Chkconfig.py b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
index 4833f3f68..fab142a7c 100644
--- a/src/lib/Bcfg2/Client/Tools/Chkconfig.py
+++ b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
@@ -3,7 +3,6 @@
"""This is chkconfig support."""
import os
-
import Bcfg2.Client.Tools
import Bcfg2.Client.XML
@@ -96,15 +95,15 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool):
bootcmd = '/sbin/chkconfig %s %s' % (entry.get('name'),
bootstatus)
bootcmdrv = self.cmd.run(bootcmd).success
- if self.setup['servicemode'] == 'disabled':
+ if Bcfg2.Options.setup.servicemode == 'disabled':
# 'disabled' means we don't attempt to modify running svcs
return bootcmdrv
- buildmode = self.setup['servicemode'] == 'build'
- if (entry.get('status') == 'on' and not buildmode) and \
- entry.get('current_status') == 'off':
+ buildmode = Bcfg2.Options.setup.servicemode == 'build'
+ if ((entry.get('status') == 'on' and not buildmode) and
+ entry.get('current_status') == 'off'):
svccmdrv = self.start_service(entry)
- elif (entry.get('status') == 'off' or buildmode) and \
- entry.get('current_status') == 'on':
+ elif ((entry.get('status') == 'off' or buildmode) and
+ entry.get('current_status') == 'on'):
svccmdrv = self.stop_service(entry)
else:
svccmdrv = True # ignore status attribute
diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py
index b544e44d4..53e5e7ec6 100644
--- a/src/lib/Bcfg2/Client/Tools/DebInit.py
+++ b/src/lib/Bcfg2/Client/Tools/DebInit.py
@@ -3,6 +3,7 @@
import glob
import os
import re
+import Bcfg2.Options
import Bcfg2.Client.Tools
# Debian squeeze and beyond uses a dependecy based boot sequence
@@ -33,8 +34,8 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
if entry.get('sequence'):
if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or
- deb_version.startswith('5') or
- os.path.exists('/etc/init.d/.legacy-bootordering')):
+ deb_version.startswith('5') or
+ os.path.exists('/etc/init.d/.legacy-bootordering')):
start_sequence = int(entry.get('sequence'))
kill_sequence = 100 - start_sequence
else:
@@ -137,10 +138,10 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
bootcmd = '/usr/sbin/update-rc.d -f %s remove' % \
entry.get('name')
bootcmdrv = self.cmd.run(bootcmd)
- if self.setup['servicemode'] == 'disabled':
+ if Bcfg2.Options.setup.service_mode == 'disabled':
# 'disabled' means we don't attempt to modify running svcs
return bootcmdrv and seqcmdrv
- buildmode = self.setup['servicemode'] == 'build'
+ buildmode = Bcfg2.Options.setup.service_mode == 'build'
if (entry.get('status') == 'on' and not buildmode) and \
entry.get('current_status') == 'off':
svccmdrv = self.start_service(entry)
diff --git a/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
index 8ff26d8f3..2ab64f86d 100644
--- a/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
+++ b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
@@ -15,8 +15,8 @@ class FreeBSDInit(Bcfg2.Client.Tools.SvcTool):
__handles__ = [('Service', 'freebsd')]
__req__ = {'Service': ['name', 'status']}
- def __init__(self, logger, cfg, setup):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.SvcTool.__init__(self, config)
if os.uname()[0] != 'FreeBSD':
raise Bcfg2.Client.Tools.ToolInstantiationError
diff --git a/src/lib/Bcfg2/Client/Tools/IPS.py b/src/lib/Bcfg2/Client/Tools/IPS.py
index aff276c3a..c998ff083 100644
--- a/src/lib/Bcfg2/Client/Tools/IPS.py
+++ b/src/lib/Bcfg2/Client/Tools/IPS.py
@@ -15,14 +15,13 @@ class IPS(Bcfg2.Client.Tools.PkgTool):
__req__ = {'Package': ['name', 'version']}
pkgtool = ('pkg install --no-refresh %s', ('%s', ['name']))
- def __init__(self, logger, setup, cfg):
+ def __init__(self, config):
self.installed = {}
self.pending_upgrades = set()
self.image = image.Image()
self.image.find_root('/', False)
self.image.load_config()
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, cfg)
- self.cfg = cfg
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
def RefreshPackages(self):
self.installed = dict()
diff --git a/src/lib/Bcfg2/Client/Tools/MacPorts.py b/src/lib/Bcfg2/Client/Tools/MacPorts.py
index bd9d24df3..265171a5a 100644
--- a/src/lib/Bcfg2/Client/Tools/MacPorts.py
+++ b/src/lib/Bcfg2/Client/Tools/MacPorts.py
@@ -12,11 +12,6 @@ class MacPorts(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'macport'
pkgtool = ('/opt/local/bin/port install %s', ('%s', ['name']))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
- self.installed = {}
- self.RefreshPackages()
-
def RefreshPackages(self):
"""Refresh memory hashes of packages."""
pkgcache = self.cmd.run(["/opt/local/bin/port",
@@ -43,9 +38,7 @@ class MacPorts(Bcfg2.Client.Tools.PkgTool):
if entry.attrib['name'] in self.installed:
if (self.installed[entry.attrib['name']] == entry.attrib['version']
- or entry.attrib['version'] == 'any'):
- #if not self.setup['quick'] and \
- # entry.get('verify', 'true') == 'true':
+ or entry.attrib['version'] == 'any'):
#FIXME: We should be able to check this once
# http://trac.macports.org/ticket/15709 is implemented
return True
diff --git a/src/lib/Bcfg2/Client/Tools/OpenCSW.py b/src/lib/Bcfg2/Client/Tools/OpenCSW.py
index 60e362e64..3ea9d835e 100644
--- a/src/lib/Bcfg2/Client/Tools/OpenCSW.py
+++ b/src/lib/Bcfg2/Client/Tools/OpenCSW.py
@@ -14,9 +14,9 @@ class OpenCSW(Bcfg2.Client.Tools.SYSV.SYSV):
__handles__ = [('Package', 'opencsw')]
__req__ = {'Package': ['name', 'version', 'bname']}
- def __init__(self, logger, setup, config):
+ def __init__(self, config):
# dont use the sysv constructor
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
noaskfile = tempfile.NamedTemporaryFile()
self.noaskname = noaskfile.name
try:
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
index 9b84adad0..6237ccce2 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
@@ -13,7 +13,7 @@ class POSIXDevice(POSIXTool):
if entry.get('dev_type') in ['block', 'char']:
# check if major/minor are properly specified
if (entry.get('major') is None or
- entry.get('minor') is None):
+ entry.get('minor') is None):
return False
return True
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/File.py b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
index b1bde1057..d7a70e202 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/File.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
@@ -6,6 +6,7 @@ import stat
import time
import difflib
import tempfile
+import Bcfg2.Options
from Bcfg2.Client.Tools.POSIX.base import POSIXTool
from Bcfg2.Compat import unicode, b64encode, b64decode # pylint: disable=W0622
@@ -43,7 +44,7 @@ class POSIXFile(POSIXTool):
tempdata = entry.text
if isinstance(tempdata, unicode) and unicode != str:
try:
- tempdata = tempdata.encode(self.setup['encoding'])
+ tempdata = tempdata.encode(Bcfg2.Options.setup.encoding)
except UnicodeEncodeError:
err = sys.exc_info()[1]
self.logger.error("POSIX: Error encoding file %s: %s" %
@@ -56,7 +57,7 @@ class POSIXFile(POSIXTool):
if isinstance(tempdata, str) and str != unicode:
tempdatasize = len(tempdata)
else:
- tempdatasize = len(tempdata.encode(self.setup['encoding']))
+ tempdatasize = len(tempdata.encode(Bcfg2.Options.setup.encoding))
different = False
content = None
@@ -78,7 +79,7 @@ class POSIXFile(POSIXTool):
content = open(entry.get('name')).read()
except UnicodeDecodeError:
content = open(entry.get('name'),
- encoding=self.setup['encoding']).read()
+ encoding=Bcfg2.Options.setup.encoding).read()
except IOError:
self.logger.error("POSIX: Failed to read %s: %s" %
(entry.get("name"), sys.exc_info()[1]))
@@ -89,7 +90,7 @@ class POSIXFile(POSIXTool):
self.logger.debug("POSIX: %s has incorrect contents" %
entry.get("name"))
self._get_diffs(
- entry, interactive=self.setup['interactive'],
+ entry, interactive=Bcfg2.Options.setup.interactive,
sensitive=entry.get('sensitive', 'false').lower() == 'true',
is_binary=is_binary, content=content)
return POSIXTool.verify(self, entry, modlist) and not different
@@ -116,7 +117,7 @@ class POSIXFile(POSIXTool):
os.fdopen(newfd, 'w').write(filedata)
else:
os.fdopen(newfd, 'wb').write(
- filedata.encode(self.setup['encoding']))
+ filedata.encode(Bcfg2.Options.setup.encoding))
except (OSError, IOError):
err = sys.exc_info()[1]
self.logger.error("POSIX: Failed to open temp file %s for writing "
@@ -181,7 +182,8 @@ class POSIXFile(POSIXTool):
(entry.get("name"), sys.exc_info()[1]))
return False
if not is_binary:
- is_binary |= not self._is_string(content, self.setup['encoding'])
+ is_binary |= not self._is_string(content,
+ Bcfg2.Options.setup.encoding)
if is_binary:
# don't compute diffs if the file is binary
prompt.append('Binary file, no printable diff')
@@ -194,7 +196,7 @@ class POSIXFile(POSIXTool):
if diff:
udiff = '\n'.join(l.rstrip('\n') for l in diff)
if hasattr(udiff, "decode"):
- udiff = udiff.decode(self.setup['encoding'])
+ udiff = udiff.decode(Bcfg2.Options.setup.encoding)
try:
prompt.append(udiff)
except UnicodeEncodeError:
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
index f7251ca50..d67a68c8b 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
@@ -24,8 +24,8 @@ class POSIXNonexistent(POSIXTool):
for struct in self.config.getchildren():
for el in struct.getchildren():
if (el.tag == 'Path' and
- el.get('type') != 'nonexistent' and
- el.get('name').startswith(ename)):
+ el.get('type') != 'nonexistent' and
+ el.get('name').startswith(ename)):
self.logger.error('POSIX: Not removing %s. One or '
'more files in this directory are '
'specified in your configuration.' %
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
index 7708c4f72..13b45a759 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
@@ -4,20 +4,31 @@ import os
import re
import sys
import shutil
-from datetime import datetime
+import Bcfg2.Options
import Bcfg2.Client.Tools
+from datetime import datetime
from Bcfg2.Compat import walk_packages
from Bcfg2.Client.Tools.POSIX.base import POSIXTool
class POSIX(Bcfg2.Client.Tools.Tool):
"""POSIX File support code."""
- name = 'POSIX'
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
- self.ppath = setup['ppath']
- self.max_copies = setup['max_copies']
+ options = Bcfg2.Client.Tools.Tool.options + [
+ Bcfg2.Options.PathOption(
+ cf=('paranoid', 'path'), default='/var/cache/bcfg2',
+ dest='paranoid_path',
+ help='Specify path for paranoid file backups'),
+ Bcfg2.Options.Option(
+ cf=('paranoid', 'max_copies'), default=1, type=int,
+ dest='paranoid_copies',
+ help='Specify the number of paranoid copies you want'),
+ Bcfg2.Options.BooleanOption(
+ '-P', '--paranoid', cf=('client', 'paranoid'),
+ help='Make automatic backups of config files')]
+
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
self._handlers = self._load_handlers()
self.logger.debug("POSIX: Handlers loaded: %s" %
(", ".join(self._handlers.keys())))
@@ -53,7 +64,7 @@ class POSIX(Bcfg2.Client.Tools.Tool):
if POSIXTool in hdlr.__mro__:
# figure out what entry type this handler handles
etype = hdlr.__name__[5:].lower()
- rv[etype] = hdlr(self.logger, self.setup, self.config)
+ rv[etype] = hdlr(self.config)
return rv
def canVerify(self, entry):
@@ -89,7 +100,7 @@ class POSIX(Bcfg2.Client.Tools.Tool):
self.logger.debug("POSIX: Verifying entry %s:%s:%s" %
(entry.tag, entry.get("type"), entry.get("name")))
ret = self._handlers[entry.get("type")].verify(entry, modlist)
- if self.setup['interactive'] and not ret:
+ if Bcfg2.Options.setup.interactive and not ret:
entry.set('qtext',
'%s\nInstall %s %s: (y/N) ' %
(entry.get('qtext', ''),
@@ -103,35 +114,39 @@ class POSIX(Bcfg2.Client.Tools.Tool):
bkupnam + r'_\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}$')
# current list of backups for this file
try:
- bkuplist = [f for f in os.listdir(self.ppath) if
- bkup_re.match(f)]
+ bkuplist = [f
+ for f in os.listdir(Bcfg2.Options.setup.paranoid_path)
+ if bkup_re.match(f)]
except OSError:
err = sys.exc_info()[1]
self.logger.error("POSIX: Failed to create backup list in %s: %s" %
- (self.ppath, err))
+ (Bcfg2.Options.setup.paranoid_path, err))
return
bkuplist.sort()
- while len(bkuplist) >= int(self.max_copies):
+ while len(bkuplist) >= int(Bcfg2.Options.setup.paranoid_copies):
# remove the oldest backup available
oldest = bkuplist.pop(0)
self.logger.info("POSIX: Removing old backup %s" % oldest)
try:
- os.remove(os.path.join(self.ppath, oldest))
+ os.remove(os.path.join(Bcfg2.Options.setup.paranoid_path,
+ oldest))
except OSError:
err = sys.exc_info()[1]
- self.logger.error("POSIX: Failed to remove old backup %s: %s" %
- (os.path.join(self.ppath, oldest), err))
+ self.logger.error(
+ "POSIX: Failed to remove old backup %s: %s" %
+ (os.path.join(Bcfg2.Options.setup.paranoid_path, oldest),
+ err))
def _paranoid_backup(self, entry):
""" Take a backup of the specified entry for paranoid mode """
if (entry.get("paranoid", 'false').lower() == 'true' and
- self.setup.get("paranoid", False) and
- entry.get('current_exists', 'true') == 'true' and
- not os.path.isdir(entry.get("name"))):
+ Bcfg2.Options.setup.paranoid and
+ entry.get('current_exists', 'true') == 'true' and
+ not os.path.isdir(entry.get("name"))):
self._prune_old_backups(entry)
bkupnam = "%s_%s" % (entry.get('name').replace('/', '_'),
datetime.isoformat(datetime.now()))
- bfile = os.path.join(self.ppath, bkupnam)
+ bfile = os.path.join(Bcfg2.Options.setup.paranoid_path, bkupnam)
try:
shutil.copy(entry.get('name'), bfile)
self.logger.info("POSIX: Backup of %s saved to %s" %
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/base.py b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
index 1e73d4f11..bce7ba0ca 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/base.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
@@ -105,23 +105,23 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
path = entry.get("name")
rv = True
- if entry.get("owner") and entry.get("group"):
- try:
- self.logger.debug("POSIX: Setting ownership of %s to %s:%s" %
- (path,
- self._norm_entry_uid(entry),
- self._norm_entry_gid(entry)))
- os.chown(path, self._norm_entry_uid(entry),
- self._norm_entry_gid(entry))
- except KeyError:
- self.logger.error('POSIX: Failed to change ownership of %s' %
- path)
- rv = False
- os.chown(path, 0, 0)
- except OSError:
- self.logger.error('POSIX: Failed to change ownership of %s' %
- path)
- rv = False
+ if os.geteuid() == 0:
+ if entry.get("owner") and entry.get("group"):
+ try:
+ self.logger.debug("POSIX: Setting ownership of %s to %s:%s"
+ % (path,
+ self._norm_entry_uid(entry),
+ self._norm_entry_gid(entry)))
+ os.chown(path, self._norm_entry_uid(entry),
+ self._norm_entry_gid(entry))
+ except (OSError, KeyError):
+ self.logger.error('POSIX: Failed to change ownership of %s'
+ % path)
+ rv = False
+ if sys.exc_info()[0] == KeyError:
+ os.chown(path, 0, 0)
+ else:
+ self.logger.debug("POSIX: Run as non-root, not setting ownership")
if entry.get("mode"):
wanted_mode = int(entry.get('mode'), 8)
@@ -508,7 +508,8 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
(path, attrib['current_group'], entry.get('group')))
if (wanted_mode and
- oct_mode(int(attrib['current_mode'], 8)) != oct_mode(wanted_mode)):
+ oct_mode(int(attrib['current_mode'], 8)) !=
+ oct_mode(wanted_mode)):
errors.append("Permissions for path %s are incorrect. "
"Current permissions are %s but should be %s" %
(path, attrib['current_mode'], entry.get('mode')))
@@ -533,7 +534,7 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
else:
wanted_secontext = entry.get("secontext")
if (wanted_secontext and
- attrib['current_secontext'] != wanted_secontext):
+ attrib['current_secontext'] != wanted_secontext):
errors.append("SELinux context for path %s is incorrect. "
"Current context is %s but should be %s" %
(path, attrib['current_secontext'],
diff --git a/src/lib/Bcfg2/Client/Tools/POSIXUsers.py b/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
index bf23aca6b..58a3bbdfc 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
@@ -3,14 +3,38 @@ and groupadd/mod/del """
import pwd
import grp
+import Bcfg2.Options
import Bcfg2.Client.XML
import Bcfg2.Client.Tools
from Bcfg2.Utils import PackedDigitRange
+def uid_range_type(val):
+ """ Option type to unpack a list of numerical ranges """
+ return PackedDigitRange(*Bcfg2.Options.Types.comma_list(val))
+
+
class POSIXUsers(Bcfg2.Client.Tools.Tool):
""" A tool to handle creating users and groups with
useradd/mod/del and groupadd/mod/del """
+ options = Bcfg2.Client.Tools.Tool.options + [
+ Bcfg2.Options.Option(
+ cf=('POSIXUsers', 'uid_whitelist'), default=[],
+ type=uid_range_type,
+ help="UID ranges the POSIXUsers tool will manage"),
+ Bcfg2.Options.Option(
+ cf=('POSIXUsers', 'gid_whitelist'), default=[],
+ type=uid_range_type,
+ help="GID ranges the POSIXUsers tool will manage"),
+ Bcfg2.Options.Option(
+ cf=('POSIXUsers', 'uid_blacklist'), default=[],
+ type=uid_range_type,
+ help="UID ranges the POSIXUsers tool will not manage"),
+ Bcfg2.Options.Option(
+ cf=('POSIXUsers', 'gid_blacklist'), default=[],
+ type=uid_range_type,
+ help="GID ranges the POSIXUsers tool will not manage")]
+
__execs__ = ['/usr/sbin/useradd', '/usr/sbin/usermod', '/usr/sbin/userdel',
'/usr/sbin/groupadd', '/usr/sbin/groupmod',
'/usr/sbin/groupdel']
@@ -18,7 +42,6 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
('POSIXGroup', None)]
__req__ = dict(POSIXUser=['name'],
POSIXGroup=['name'])
- experimental = True
#: A mapping of XML entry attributes to the indexes of
#: corresponding values in the get{pw|gr}all data structures
@@ -30,25 +53,15 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
#: user or group
id_mapping = dict(POSIXUser="uid", POSIXGroup="gid")
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
self.set_defaults = dict(POSIXUser=self.populate_user_entry,
POSIXGroup=lambda g: g)
self._existing = None
- self._whitelist = dict(POSIXUser=None, POSIXGroup=None)
- self._blacklist = dict(POSIXUser=None, POSIXGroup=None)
- if self.setup['posix_uid_whitelist']:
- self._whitelist['POSIXUser'] = \
- PackedDigitRange(*self.setup['posix_uid_whitelist'])
- else:
- self._blacklist['POSIXUser'] = \
- PackedDigitRange(*self.setup['posix_uid_blacklist'])
- if self.setup['posix_gid_whitelist']:
- self._whitelist['POSIXGroup'] = \
- PackedDigitRange(*self.setup['posix_gid_whitelist'])
- else:
- self._blacklist['POSIXGroup'] = \
- PackedDigitRange(*self.setup['posix_gid_blacklist'])
+ self._whitelist = dict(POSIXUser=Bcfg2.Options.setup.uid_whitelist,
+ POSIXGroup=Bcfg2.Options.setup.gid_whitelist)
+ self._blacklist = dict(POSIXUser=Bcfg2.Options.setup.uid_blacklist,
+ POSIXGroup=Bcfg2.Options.setup.gid_blacklist)
@property
def existing(self):
@@ -87,7 +100,7 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
return False
return True
- def Inventory(self, states, structures=None):
+ def Inventory(self, structures=None):
if not structures:
structures = self.config.getchildren()
# we calculate a list of all POSIXUser and POSIXGroup entries,
@@ -107,7 +120,8 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
(group, entry.get("name")))
struct.append(Bcfg2.Client.XML.Element("POSIXGroup",
name=group))
- return Bcfg2.Client.Tools.Tool.Inventory(self, states, structures)
+ return Bcfg2.Client.Tools.Tool.Inventory(self, structures)
+ Inventory.__doc__ = Bcfg2.Client.Tools.Tool.Inventory.__doc__
def FindExtra(self):
extra = []
@@ -164,7 +178,7 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
% (entry.tag, entry.get("name"),
actual, expected)]))
rv = False
- if self.setup['interactive'] and not rv:
+ if Bcfg2.Options.setup.interactive and not rv:
entry.set('qtext',
'%s\nInstall %s %s: (y/N) ' %
(entry.get('qtext', ''), entry.tag, entry.get('name')))
@@ -173,7 +187,7 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
def VerifyPOSIXGroup(self, entry, _):
""" Verify a POSIXGroup entry """
rv = self._verify(entry)
- if self.setup['interactive'] and not rv:
+ if Bcfg2.Options.setup.interactive and not rv:
entry.set('qtext',
'%s\nInstall %s %s: (y/N) ' %
(entry.get('qtext', ''), entry.tag, entry.get('name')))
@@ -190,7 +204,7 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
for attr, idx in self.attr_mapping[entry.tag].items():
val = str(self.existing[entry.tag][entry.get("name")][idx])
entry.set("current_%s" %
- attr, val.decode(self.setup['encoding']))
+ attr, val.decode(Bcfg2.Options.setup.encoding))
if attr in ["uid", "gid"]:
if entry.get(attr) is None:
# no uid/gid specified, so we let the tool
@@ -212,7 +226,8 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors))
return len(errors) == 0
- def Install(self, entries, states):
+ def Install(self, entries):
+ states = dict()
for entry in entries:
# install groups first, so that all groups exist for
# users that might need them
@@ -222,6 +237,7 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
if entry.tag == 'POSIXUser':
states[entry] = self._install(entry)
self._existing = None
+ return states
def _install(self, entry):
""" add or modify a user or group using the appropriate command """
diff --git a/src/lib/Bcfg2/Client/Tools/Pacman.py b/src/lib/Bcfg2/Client/Tools/Pacman.py
index a4cfd3315..2ab9b7403 100644
--- a/src/lib/Bcfg2/Client/Tools/Pacman.py
+++ b/src/lib/Bcfg2/Client/Tools/Pacman.py
@@ -13,11 +13,6 @@ class Pacman(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'pacman'
pkgtool = ("/usr/bin/pacman --needed --noconfirm --noprogressbar")
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
- self.installed = {}
- self.RefreshPackages()
-
def RefreshPackages(self):
'''Refresh memory hashes of packages'''
self.installed = {}
@@ -43,8 +38,6 @@ class Pacman(Bcfg2.Client.Tools.PkgTool):
return True
elif self.installed[entry.attrib['name']] == \
entry.attrib['version']:
- #if not self.setup['quick'] and \
- # entry.get('verify', 'true') == 'true':
#FIXME: need to figure out if pacman
# allows you to verify packages
return True
@@ -66,7 +59,7 @@ class Pacman(Bcfg2.Client.Tools.PkgTool):
self.RefreshPackages()
self.extra = self.FindExtra()
- def Install(self, packages, states):
+ def Install(self, packages):
'''
Pacman Install
'''
diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py
index 2d8b66ce5..a61ede820 100644
--- a/src/lib/Bcfg2/Client/Tools/Portage.py
+++ b/src/lib/Bcfg2/Client/Tools/Portage.py
@@ -5,9 +5,13 @@ import Bcfg2.Client.Tools
class Portage(Bcfg2.Client.Tools.PkgTool):
- """The Gentoo toolset implements package and service operations and
- inherits the rest from Toolset.Toolset."""
- name = 'Portage'
+ """The Gentoo toolset implements package and service operations
+ and inherits the rest from Tools.Tool."""
+
+ options = Bcfg2.Client.Tools.PkgTool.options + [
+ Bcfg2.Options.BooleanOption(
+ cf=('Portage', 'binpkgonly'), help='Portage binary packages only')]
+
__execs__ = ['/usr/bin/emerge', '/usr/bin/equery']
__handles__ = [('Package', 'ebuild')]
__req__ = {'Package': ['name', 'version']}
@@ -17,17 +21,15 @@ class Portage(Bcfg2.Client.Tools.PkgTool):
'version']))
pkgtool = ('emerge %s', ('=%s-%s', ['name', 'version']))
- def __init__(self, logger, cfg, setup):
+ def __init__(self, config):
self._initialised = False
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, cfg, setup)
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
self._initialised = True
self.__important__ = self.__important__ + ['/etc/make.conf']
self._pkg_pattern = re.compile(r'(.*)-(\d.*)')
self._ebuild_pattern = re.compile('(ebuild|binary)')
- self.cfg = cfg
self.installed = {}
- self._binpkgonly = self.setup.get('portage_binpkgonly', False)
- if self._binpkgonly:
+ if Bcfg2.Options.setup.binpkgonly:
self.pkgtool = self._binpkgtool
self.RefreshPackages()
@@ -62,9 +64,9 @@ class Portage(Bcfg2.Client.Tools.PkgTool):
version = self.installed[entry.get('name')]
entry.set('current_version', version)
- if not self.setup['quick']:
+ if not Bcfg2.Options.setup.quick:
if ('verify' not in entry.attrib or
- entry.get('verify').lower() == 'true'):
+ entry.get('verify').lower() == 'true'):
# Check the package if:
# - Not running in quick mode
diff --git a/src/lib/Bcfg2/Client/Tools/RPM.py b/src/lib/Bcfg2/Client/Tools/RPM.py
index a4dd2b730..173623f61 100644
--- a/src/lib/Bcfg2/Client/Tools/RPM.py
+++ b/src/lib/Bcfg2/Client/Tools/RPM.py
@@ -1,12 +1,1139 @@
"""Bcfg2 Support for RPMS"""
-import os.path
+import os
import rpm
-import rpmtools
import Bcfg2.Client.Tools
+import grp
+import optparse
+import pwd
+import stat
+import sys
+try:
+ import hashlib
+ py24compat = False
+except ImportError:
+ # FIXME: Remove when client python dep is 2.5 or greater
+ py24compat = True
+ import md5
+
+# Determine what prelink tools we have available.
+# The isprelink module is a python extension that examines the ELF headers
+# to see if the file has been prelinked. If it is not present a lot of files
+# are unnecessarily run through the prelink command.
+try:
+ from isprelink import *
+ isprelink_imported = True
+except ImportError:
+ isprelink_imported = False
+
+# If the prelink command is installed on the system then we need to do
+# prelink -y on files.
+if os.access('/usr/sbin/prelink', os.X_OK):
+ prelink_exists = True
+else:
+ prelink_exists = False
+
+# If we don't have isprelink then we will use the prelink configuration file to
+# filter what we have to put through prelink -y.
+import re
+blacklist = []
+whitelist = []
+try:
+ f = open('/etc/prelink.conf', mode='r')
+ for line in f:
+ if line.startswith('#'):
+ continue
+ option, pattern = line.split()
+ if pattern.startswith('*.'):
+ pattern = pattern.replace('*.', '\.')
+ pattern += '$'
+ elif pattern.startswith('/'):
+ pattern = '^' + pattern
+ if option == '-b':
+ blacklist.append(pattern)
+ elif option == '-l':
+ whitelist.append(pattern)
+ f.close()
+except IOError:
+ pass
+
+blacklist_re = re.compile('|'.join(blacklist))
+whitelist_re = re.compile('|'.join(whitelist))
+
+# Flags that are not defined in rpm-python.
+# They are defined in lib/rpmcli.h
+# Bit(s) for verifyFile() attributes.
+#
+RPMVERIFY_NONE = 0
+RPMVERIFY_MD5 = 1 # 1 << 0 # from %verify(md5)
+RPMVERIFY_FILESIZE = 2 # 1 << 1 # from %verify(size)
+RPMVERIFY_LINKTO = 4 # 1 << 2 # from %verify(link)
+RPMVERIFY_USER = 8 # 1 << 3 # from %verify(user)
+RPMVERIFY_GROUP = 16 # 1 << 4 # from %verify(group)
+RPMVERIFY_MTIME = 32 # 1 << 5 # from %verify(mtime)
+RPMVERIFY_MODE = 64 # 1 << 6 # from %verify(mode)
+RPMVERIFY_RDEV = 128 # 1 << 7 # from %verify(rdev)
+RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # from --nocontexts
+RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # readlink failed
+RPMVERIFY_READFAIL = 536870912 # (1 << 29) # file read failed
+RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # lstat failed
+RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # lgetfilecon failed
+
+RPMVERIFY_FAILURES = \
+ (RPMVERIFY_LSTATFAIL | RPMVERIFY_READFAIL |
+ RPMVERIFY_READLINKFAIL | RPMVERIFY_LGETFILECONFAIL)
+
+# Bit(s) to control rpm_verify() operation.
+#
+VERIFY_DEFAULT = 0, # /*!< */
+VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
+VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
+VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
+VERIFY_USER = 1 << 3 # /*!< from --nouser */
+VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
+VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
+VERIFY_MODE = 1 << 6 # /*!< from --nomode */
+VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
+# /* bits 8-14 unused, reserved for rpmVerifyAttrs */
+VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
+VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
+VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
+VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
+VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
+VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
+VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
+VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
+VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
+VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
+VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
+VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
+VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
+# /* bits 28-31 used in rpmVerifyAttrs */
+
+# Comes from C cource. lib/rpmcli.h
+VERIFY_ATTRS = \
+ (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP |
+ VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
+
+VERIFY_ALL = \
+ (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT |
+ VERIFY_DIGEST | VERIFY_SIGNATURE | VERIFY_HDRCHK)
+
+
+# Some masks for what checks to NOT do on these file types.
+# The C code actiually resets these up for every file.
+DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME |
+ RPMVERIFY_LINKTO)
+
+# These file types all have the same mask, but hopefully this will make the
+# code more readable.
+FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS
+
+LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME |
+ RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP)
+
+REG_FLAGS = ~(RPMVERIFY_LINKTO)
+
+
+def s_isdev(mode):
+ """
+ Check to see if a file is a device.
+
+ """
+ return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
+
+
+def rpmpackagelist(rts):
+ """
+ Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
+ Requires rpmtransactionset() to be run first to get a ts.
+ Returns a list of pkgspec dicts.
+
+ e.g. [{'name':'foo', 'epoch':'20', 'version':'1.2',
+ 'release':'5', 'arch':'x86_64' },
+ {'name':'bar', 'epoch':'10', 'version':'5.2',
+ 'release':'2', 'arch':'x86_64' }]
+
+ """
+ return [
+ {'name': header[rpm.RPMTAG_NAME],
+ 'epoch': header[rpm.RPMTAG_EPOCH],
+ 'version': header[rpm.RPMTAG_VERSION],
+ 'release': header[rpm.RPMTAG_RELEASE],
+ 'arch': header[rpm.RPMTAG_ARCH],
+ 'gpgkeyid':
+ header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
+ for header in rts.dbMatch()]
+
+
+def getindexbykeyword(index_ts, **kwargs):
+ """
+ Return list of indexs from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ index_mi = index_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] is not None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys())
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in index_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(index_mi.instance())
+ del index_mi
+ return lst
+
+
+def getheadersbykeyword(header_ts, **kwargs):
+ """
+ Borrowed parts of this from from Yum. Need to fix it though.
+ Epoch is not handled right.
+
+ Return list of headers from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ header_mi = header_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] is not None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys())
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in header_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(hdr)
+ del header_mi
+ return lst
+
+
+def prelink_md5_check(filename):
+ """
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked md5 and file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+ Always return the md5.
+
+ """
+ prelink = False
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False, 0
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+ elif (whitelist_re.search(filename) and not
+ blacklist_re.search(filename)):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+
+ fsize = 0
+ if py24compat:
+ chksum = md5.new()
+ else:
+ chksum = hashlib.md5()
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+ chksum.update(data)
+ plf.close()
+ file_md5 = chksum.hexdigest()
+ if prelink:
+ return file_md5, fsize
+ else:
+ return file_md5, 0
+
+
+def prelink_size_check(filename):
+ """
+ This check is only done if the prelink_md5_check() is not done first.
+
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+
+ """
+ fsize = 0
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ elif (whitelist_re.search(filename) and not
+ blacklist_re.search(filename)):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ plf.close()
+
+ return fsize
+
+
+def debug_verify_flags(vflags):
+ """
+ Decodes the verify flags bits.
+ """
+ if vflags & RPMVERIFY_MD5:
+ print('RPMVERIFY_MD5')
+ if vflags & RPMVERIFY_FILESIZE:
+ print('RPMVERIFY_FILESIZE')
+ if vflags & RPMVERIFY_LINKTO:
+ print('RPMVERIFY_LINKTO')
+ if vflags & RPMVERIFY_USER:
+ print('RPMVERIFY_USER')
+ if vflags & RPMVERIFY_GROUP:
+ print('RPMVERIFY_GROUP')
+ if vflags & RPMVERIFY_MTIME:
+ print('RPMVERIFY_MTIME')
+ if vflags & RPMVERIFY_MODE:
+ print('RPMVERIFY_MODE')
+ if vflags & RPMVERIFY_RDEV:
+ print('RPMVERIFY_RDEV')
+ if vflags & RPMVERIFY_CONTEXTS:
+ print('RPMVERIFY_CONTEXTS')
+ if vflags & RPMVERIFY_READLINKFAIL:
+ print('RPMVERIFY_READLINKFAIL')
+ if vflags & RPMVERIFY_READFAIL:
+ print('RPMVERIFY_READFAIL')
+ if vflags & RPMVERIFY_LSTATFAIL:
+ print('RPMVERIFY_LSTATFAIL')
+ if vflags & RPMVERIFY_LGETFILECONFAIL:
+ print('RPMVERIFY_LGETFILECONFAIL')
+
+
+def debug_file_flags(fflags):
+ """
+ Decodes the file flags bits.
+ """
+ if fflags & rpm.RPMFILE_CONFIG:
+ print('rpm.RPMFILE_CONFIG')
+
+ if fflags & rpm.RPMFILE_DOC:
+ print('rpm.RPMFILE_DOC')
+
+ if fflags & rpm.RPMFILE_ICON:
+ print('rpm.RPMFILE_ICON')
+
+ if fflags & rpm.RPMFILE_MISSINGOK:
+ print('rpm.RPMFILE_MISSINGOK')
+
+ if fflags & rpm.RPMFILE_NOREPLACE:
+ print('rpm.RPMFILE_NOREPLACE')
+
+ if fflags & rpm.RPMFILE_GHOST:
+ print('rpm.RPMFILE_GHOST')
+
+ if fflags & rpm.RPMFILE_LICENSE:
+ print('rpm.RPMFILE_LICENSE')
+
+ if fflags & rpm.RPMFILE_README:
+ print('rpm.RPMFILE_README')
+
+ if fflags & rpm.RPMFILE_EXCLUDE:
+ print('rpm.RPMFILE_EXLUDE')
+
+ if fflags & rpm.RPMFILE_UNPATCHED:
+ print('rpm.RPMFILE_UNPATCHED')
+
+ if fflags & rpm.RPMFILE_PUBKEY:
+ print('rpm.RPMFILE_PUBKEY')
+
+
+def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
+ """
+ Verify all the files in a package.
+
+ Returns a list of error flags, the file type and file name. The list
+ entries are strings that are the same as the labels for the bitwise
+ flags used in the C code.
+
+ """
+ (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate,
+ vflags, fuser, fgroup, fmd5) = fileinfo
+
+ # 1. rpmtsRootDir stuff. What does it do and where to I get it from?
+
+ file_results = []
+ flags = vflags
+
+ # Check to see if the file was installed - if not pretend all is ok.
+ # This is what the rpm C code does!
+ if fstate != rpm.RPMFILE_STATE_NORMAL:
+ return file_results
+
+ # Get the installed files stats
+ try:
+ lstat = os.lstat(fname)
+ except OSError:
+ if not (fflags & (rpm.RPMFILE_MISSINGOK | rpm.RPMFILE_GHOST)):
+ file_results.append('RPMVERIFY_LSTATFAIL')
+ #file_results.append(fname)
+ return file_results
+
+ # 5. Contexts? SELinux stuff?
+
+ # Setup what checks to do. This is straight out of the C code.
+ if stat.S_ISDIR(lstat.st_mode):
+ flags &= DIR_FLAGS
+ elif stat.S_ISLNK(lstat.st_mode):
+ flags &= LINK_FLAGS
+ elif stat.S_ISFIFO(lstat.st_mode):
+ flags &= FIFO_FLAGS
+ elif stat.S_ISCHR(lstat.st_mode):
+ flags &= CHR_FLAGS
+ elif stat.S_ISBLK(lstat.st_mode):
+ flags &= BLK_FLAGS
+ else:
+ flags &= REG_FLAGS
+
+ if (fflags & rpm.RPMFILE_GHOST):
+ flags &= GHOST_FLAGS
+
+ flags &= ~(omitmask | RPMVERIFY_FAILURES)
+
+ # 8. SELinux stuff.
+
+ prelink_size = 0
+ if flags & RPMVERIFY_MD5:
+ prelink_md5, prelink_size = prelink_md5_check(fname)
+ if prelink_md5 is False:
+ file_results.append('RPMVERIFY_MD5')
+ file_results.append('RPMVERIFY_READFAIL')
+ elif prelink_md5 != fmd5:
+ file_results.append('RPMVERIFY_MD5')
+
+ if flags & RPMVERIFY_LINKTO:
+ linkto = os.readlink(fname)
+ if not linkto:
+ file_results.append('RPMVERIFY_READLINKFAIL')
+ file_results.append('RPMVERIFY_LINKTO')
+ else:
+ if len(rpmlinktos) == 0 or linkto != rpmlinktos:
+ file_results.append('RPMVERIFY_LINKTO')
+
+ if flags & RPMVERIFY_FILESIZE:
+ if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
+ prelink_size = prelink_size_check(fname)
+ if (prelink_size != 0): # This is a prelinked file.
+ if (prelink_size != fsize):
+ file_results.append('RPMVERIFY_FILESIZE')
+ elif lstat.st_size != fsize: # It wasn't a prelinked file.
+ file_results.append('RPMVERIFY_FILESIZE')
+
+ if flags & RPMVERIFY_MODE:
+ metamode = fmode
+ filemode = lstat.st_mode
+
+ # Comparing the type of %ghost files is meaningless, but perms are ok.
+ if fflags & rpm.RPMFILE_GHOST:
+ metamode &= ~0xf000
+ filemode &= ~0xf000
+
+ if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \
+ (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)):
+ file_results.append('RPMVERIFY_MODE')
+
+ if flags & RPMVERIFY_RDEV:
+ if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or
+ stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
+ file_results.append('RPMVERIFY_RDEV')
+ elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
+ st_rdev = lstat.st_rdev
+ if frdev != st_rdev:
+ file_results.append('RPMVERIFY_RDEV')
+
+ if flags & RPMVERIFY_MTIME:
+ if lstat.st_mtime != fmtime:
+ file_results.append('RPMVERIFY_MTIME')
+
+ if flags & RPMVERIFY_USER:
+ try:
+ user = pwd.getpwuid(lstat.st_uid)[0]
+ except KeyError:
+ user = None
+ if not user or not fuser or (user != fuser):
+ file_results.append('RPMVERIFY_USER')
+
+ if flags & RPMVERIFY_GROUP:
+ try:
+ group = grp.getgrgid(lstat.st_gid)[0]
+ except KeyError:
+ group = None
+ if not group or not fgroup or (group != fgroup):
+ file_results.append('RPMVERIFY_GROUP')
+
+ return file_results
+
+
+def rpm_verify_dependencies(header):
+ """
+ Check package dependencies. Header is an rpm.hdr.
+
+ Don't like opening another ts to do this, but
+ it was the only way I could find of clearing the ts
+ out.
+
+ Have asked on the rpm-maint list on how to do
+ this the right way (28 Feb 2007).
+
+ ts.check() returns:
+
+ ((name, version, release), (reqname, reqversion), \
+ flags, suggest, sense)
+
+ """
+ _ts1 = rpmtransactionset()
+ _ts1.addInstall(header, 'Dep Check', 'i')
+ dep_errors = _ts1.check()
+ _ts1.closeDB()
+ return dep_errors
+
+
+def rpm_verify_package(vp_ts, header, verify_options):
+ """
+ Verify a single package specified by header. Header is an rpm.hdr.
+
+ If errors are found it returns a dictionary of errors.
+
+ """
+ # Set some transaction level flags.
+ vsflags = 0
+ if 'nodigest' in verify_options:
+ vsflags |= rpm._RPMVSF_NODIGESTS
+ if 'nosignature' in verify_options:
+ vsflags |= rpm._RPMVSF_NOSIGNATURES
+ ovsflags = vp_ts.setVSFlags(vsflags)
+
+ # Map from the Python options to the rpm bitwise flags.
+ omitmask = 0
+
+ if 'nolinkto' in verify_options:
+ omitmask |= VERIFY_LINKTO
+ if 'nomd5' in verify_options:
+ omitmask |= VERIFY_MD5
+ if 'nosize' in verify_options:
+ omitmask |= VERIFY_SIZE
+ if 'nouser' in verify_options:
+ omitmask |= VERIFY_USER
+ if 'nogroup' in verify_options:
+ omitmask |= VERIFY_GROUP
+ if 'nomtime' in verify_options:
+ omitmask |= VERIFY_MTIME
+ if 'nomode' in verify_options:
+ omitmask |= VERIFY_MODE
+ if 'nordev' in verify_options:
+ omitmask |= VERIFY_RDEV
+
+ omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS)
+
+ package_results = {}
+
+ # Check Signatures and Digests.
+ # No idea what this might return. Need to break something to see.
+ # Setting the vsflags above determines what gets checked in the header.
+ hdr_stat = vp_ts.hdrCheck(header.unload())
+ if hdr_stat:
+ package_results['hdr'] = hdr_stat
+
+ # Check Package Depencies.
+ if 'nodeps' not in verify_options:
+ dep_stat = rpm_verify_dependencies(header)
+ if dep_stat:
+ package_results['deps'] = dep_stat
+
+ # Check all the package files.
+ if 'nofiles' not in verify_options:
+ vp_fi = header.fiFromHeader()
+ for fileinfo in vp_fi:
+ # Do not bother doing anything with ghost files.
+ # This is what RPM does.
+ if fileinfo[4] & rpm.RPMFILE_GHOST:
+ continue
+
+ # This is only needed because of an inconsistency in the
+ # rpm.fi interface.
+ linktos = vp_fi.FLink()
+
+ file_stat = rpm_verify_file(fileinfo, linktos, omitmask)
+
+ #if len(file_stat) > 0 or options.verbose:
+ if len(file_stat) > 0:
+ fflags = fileinfo[4]
+ if fflags & rpm.RPMFILE_CONFIG:
+ file_stat.append('c')
+ elif fflags & rpm.RPMFILE_DOC:
+ file_stat.append('d')
+ elif fflags & rpm.RPMFILE_GHOST:
+ file_stat.append('g')
+ elif fflags & rpm.RPMFILE_LICENSE:
+ file_stat.append('l')
+ elif fflags & rpm.RPMFILE_PUBKEY:
+ file_stat.append('P')
+ elif fflags & rpm.RPMFILE_README:
+ file_stat.append('r')
+ else:
+ file_stat.append(' ')
+
+ file_stat.append(fileinfo[0]) # The filename.
+ package_results.setdefault('files', []).append(file_stat)
+
+ # Run the verify script if there is one.
+ # Do we want this?
+ #if 'noscripts' not in verify_options:
+ # script_stat = rpmVerifyscript()
+ # if script_stat:
+ # package_results['script'] = script_stat
+
+ # If there have been any errors, add the package nevra to the result.
+ if len(package_results) > 0:
+ package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME],
+ header[rpm.RPMTAG_EPOCH],
+ header[rpm.RPMTAG_VERSION],
+ header[rpm.RPMTAG_RELEASE],
+ header[rpm.RPMTAG_ARCH]))
+ else:
+ package_results = None
+
+ # Put things back the way we found them.
+ vsflags = vp_ts.setVSFlags(ovsflags)
+
+ return package_results
+
+
+def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
+ """
+ Requires rpmtransactionset() to be run first to get a ts.
+
+ pkgspec is a dict specifying the package
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ For all packages
+ {}
+
+ Or any combination of keywords to select one or more packages to verify.
+
+ options is a list of 'rpm --verify' options.
+ Default is to check everything.
+ e.g.:
+ [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature',
+ 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime',
+ 'nomode', 'nordev' ]
+
+ Returns a list. One list entry per package. Each list entry is a
+ dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'.
+ Entries only get added for the failures. If nothing failed, None is
+ returned.
+
+ Its all a bit messy and probably needs reviewing.
+
+ [ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] }
+ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ]
+
+ """
+ verify_results = []
+ headers = getheadersbykeyword(verify_ts, **verify_pkgspec)
+ for header in headers:
+ result = rpm_verify_package(verify_ts, header, verify_options)
+ if result:
+ verify_results.append(result)
+
+ return verify_results
+
+
+def rpmtransactionset():
+ """
+ A simple wrapper for rpm.TransactionSet() to keep everthiing together.
+ Might use it to set some ts level flags later.
+
+ """
+ ts = rpm.TransactionSet()
+ return ts
+
+
+class Rpmtscallback(object):
+ """
+ Callback for ts.run(). Used for adding, upgrading and removing packages.
+ Starting with all possible reasons codes, but bcfg2 will probably only
+ make use of a few of them.
+
+ Mostly just printing stuff at the moment to understand how the callback
+ is used.
+ """
+ def __init__(self):
+ self.fdnos = {}
+
+ def callback(self, reason, amount, total, key, client_data):
+ """
+ Generic rpmts call back.
+ """
+ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+ pass
+ elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
+ pass
+ elif reason == rpm.RPMCALLBACK_INST_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
+ reason == rpm.RPMCALLBACK_INST_PROGRESS:
+ pass
+ # rpm.RPMCALLBACK_INST_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_TRANS_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_TRANS_STOP:
+ pass
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
+ pass
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNINST_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNINST_STOP:
+ pass
+ # How do we get at this?
+ # RPM.modified += key
+ elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
+ pass
+ elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNKNOWN:
+ pass
+ else:
+ print('ERROR - Fell through callBack')
+
+
+def rpm_erase(erase_pkgspecs, erase_flags):
+ """
+ pkgspecs is a list of pkgspec dicts specifying packages
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ """
+ erase_ts_flags = 0
+ if 'noscripts' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS
+ if 'notriggers' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS
+ if 'repackage' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE
+
+ erase_ts = rpmtransactionset()
+ erase_ts.setFlags(erase_ts_flags)
+
+ for pkgspec in erase_pkgspecs:
+ idx_list = getindexbykeyword(erase_ts, **pkgspec)
+ if len(idx_list) > 1 and not 'allmatches' in erase_flags:
+ #pass
+ print('ERROR - Multiple package match for erase', pkgspec)
+ else:
+ for idx in idx_list:
+ erase_ts.addErase(idx)
+
+ #for te in erase_ts:
+
+ erase_problems = []
+ if 'nodeps' not in erase_flags:
+ erase_problems = erase_ts.check()
+
+ if erase_problems == []:
+ erase_ts.order()
+ erase_callback = Rpmtscallback()
+ erase_ts.run(erase_callback.callback, 'Erase')
+ #else:
+
+ erase_ts.closeDB()
+ del erase_ts
+ return erase_problems
+
+
+def display_verify_file(file_results):
+ '''
+ Display file results similar to rpm --verify.
+ '''
+ filename = file_results[-1]
+ filetype = file_results[-2]
+
+ result_string = ''
+
+ if 'RPMVERIFY_LSTATFAIL' in file_results:
+ result_string = 'missing '
+ else:
+ if 'RPMVERIFY_FILESIZE' in file_results:
+ result_string = result_string + 'S'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MODE' in file_results:
+ result_string = result_string + 'M'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MD5' in file_results:
+ if 'RPMVERIFY_READFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + '5'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_RDEV' in file_results:
+ result_string = result_string + 'D'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_LINKTO' in file_results:
+ if 'RPMVERIFY_READLINKFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + 'L'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_USER' in file_results:
+ result_string = result_string + 'U'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_GROUP' in file_results:
+ result_string = result_string + 'G'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MTIME' in file_results:
+ result_string = result_string + 'T'
+ else:
+ result_string = result_string + '.'
+
+ print(result_string + ' ' + filetype + ' ' + filename)
+ sys.stdout.flush()
+
+#=============================================================================
+# Some options and output to assist with development and testing.
+# These are not intended for normal use.
+if __name__ == "__main__":
+
+ p = optparse.OptionParser()
+
+ p.add_option('--name', action='store',
+ default=None,
+ help='''Package name to verify.
+
+ ******************************************
+ NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES.
+ ******************************************
+
+ The specified operation will be carried out on all
+ instances of packages that match the package
+ specification
+ (name, epoch, version, release, arch).''')
+
+ p.add_option('--epoch', action='store',
+ default=None,
+ help='''Package epoch.''')
+
+ p.add_option('--version', action='store',
+ default=None,
+ help='''Package version.''')
+
+ p.add_option('--release', action='store',
+ default=None,
+ help='''Package release.''')
+
+ p.add_option('--arch', action='store',
+ default=None,
+ help='''Package arch.''')
+
+ p.add_option('--erase', '-e', action='store_true',
+ default=None,
+ help=
+ '''****************************************************
+ REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
+ PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
+ GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
+ INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
+ DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
+ ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
+ ****************************************************''')
+
+ p.add_option('--list', '-l', action='store_true',
+ help='''List package identity info. rpm -qa ish equivalent
+ intended for use in RefreshPackages().''')
+
+ p.add_option('--verify', action='store_true',
+ help='''Verify Package(s). Output is only produced after all
+ packages has been verified. Be patient.''')
+
+ p.add_option('--verbose', '-v', action='store_true',
+ help='''Verbose output for --verify option. Output is the
+ same as rpm -v --verify.''')
+
+ p.add_option('--nodeps', action='store_true',
+ default=False,
+ help='Do not do dependency testing.')
+
+ p.add_option('--nodigest', action='store_true',
+ help='Do not check package digests.')
+
+ p.add_option('--nofiles', action='store_true',
+ help='Do not do file checks.')
+
+ p.add_option('--noscripts', action='store_true',
+ help='Do not run verification scripts.')
+
+ p.add_option('--nosignature', action='store_true',
+ help='Do not do package signature verification.')
+
+ p.add_option('--nolinkto', action='store_true',
+ help='Do not do symlink tests.')
+
+ p.add_option('--nomd5', action='store_true',
+ help='''Do not do MD5 checksums on files. Note that this does
+ not work for prelink files yet.''')
+
+ p.add_option('--nosize', action='store_true',
+ help='''Do not do file size tests. Note that this does not
+ work for prelink files yet.''')
+
+ p.add_option('--nouser', action='store_true',
+ help='Do not check file user ownership.')
+
+ p.add_option('--nogroup', action='store_true',
+ help='Do not check file group ownership.')
+
+ p.add_option('--nomtime', action='store_true',
+ help='Do not check file modification times.')
+
+ p.add_option('--nomode', action='store_true',
+ help='Do not check file modes (permissions).')
+
+ p.add_option('--nordev', action='store_true',
+ help='Do not check device node.')
+
+ p.add_option('--notriggers', action='store_true',
+ help='Do not do not generate triggers on erase.')
+
+ p.add_option('--repackage', action='store_true',
+ help='''Do repackage on erase.i Packages are put
+ in /var/spool/repackage.''')
+
+ p.add_option('--allmatches', action='store_true',
+ help=
+ '''Remove all package instances that match the
+ pkgspec.
+
+ ***************************************************
+ NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
+ THAT MEANS ALL PACKAGES!!!!
+ ***************************************************''')
+
+ options, arguments = p.parse_args()
+
+ pkgspec = {}
+ rpm_options = []
+
+ if options.nodeps:
+ rpm_options.append('nodeps')
+
+ if options.nodigest:
+ rpm_options.append('nodigest')
+
+ if options.nofiles:
+ rpm_options.append('nofiles')
+
+ if options.noscripts:
+ rpm_options.append('noscripts')
+
+ if options.nosignature:
+ rpm_options.append('nosignature')
+
+ if options.nolinkto:
+ rpm_options.append('nolinkto')
+
+ if options.nomd5:
+ rpm_options.append('nomd5')
+
+ if options.nosize:
+ rpm_options.append('nosize')
+
+ if options.nouser:
+ rpm_options.append('nouser')
+
+ if options.nogroup:
+ rpm_options.append('nogroup')
+
+ if options.nomtime:
+ rpm_options.append('nomtime')
+
+ if options.nomode:
+ rpm_options.append('nomode')
+
+ if options.nordev:
+ rpm_options.append('nordev')
+
+ if options.repackage:
+ rpm_options.append('repackage')
+
+ if options.allmatches:
+ rpm_options.append('allmatches')
+
+ main_ts = rpmtransactionset()
+
+ cmdline_pkgspec = {}
+ if options.name != 'all':
+ if options.name:
+ cmdline_pkgspec['name'] = str(options.name)
+ if options.epoch:
+ cmdline_pkgspec['epoch'] = str(options.epoch)
+ if options.version:
+ cmdline_pkgspec['version'] = str(options.version)
+ if options.release:
+ cmdline_pkgspec['release'] = str(options.release)
+ if options.arch:
+ cmdline_pkgspec['arch'] = str(options.arch)
+
+ if options.verify:
+ results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options)
+ for r in results:
+ files = r.get('files', '')
+ for f in files:
+ display_verify_file(f)
+
+ elif options.list:
+ for p in rpmpackagelist(main_ts):
+ print(p)
+
+ elif options.erase:
+ if options.name:
+ rpm_erase([cmdline_pkgspec], rpm_options)
+ else:
+ print('You must specify the "--name" option')
+
class RPM(Bcfg2.Client.Tools.PkgTool):
"""Support for RPM packages."""
+ options = Bcfg2.Client.Tools.PkgTool.options + [
+ Bcfg2.Options.Option(
+ cf=('RPM', 'installonlypackages'), dest="rpm_installonly",
+ type=Bcfg2.Options.Types.comma_list,
+ default=['kernel', 'kernel-bigmem', 'kernel-enterprise',
+ 'kernel-smp', 'kernel-modules', 'kernel-debug',
+ 'kernel-unsupported', 'kernel-devel', 'kernel-source',
+ 'kernel-default', 'kernel-largesmp-devel',
+ 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'],
+ help='RPM install-only packages'),
+ Bcfg2.Options.BooleanOption(
+ cf=('RPM', 'pkg_checks'), default=True, dest="rpm_pkg_checks",
+ help="Perform RPM package checks"),
+ Bcfg2.Options.BooleanOption(
+ cf=('RPM', 'pkg_verify'), default=True, dest="rpm_pkg_verify",
+ help="Perform RPM package verify"),
+ Bcfg2.Options.BooleanOption(
+ cf=('RPM', 'install_missing'), default=True,
+ dest="rpm_install_missing",
+ help="Install missing packages"),
+ Bcfg2.Options.Option(
+ cf=('RPM', 'erase_flags'), default=["allmatches"],
+ dest="rpm_erase_flags",
+ help="RPM erase flags"),
+ Bcfg2.Options.BooleanOption(
+ cf=('RPM', 'fix_version'), default=True,
+ dest="rpm_fix_version",
+ help="Fix (upgrade or downgrade) packages with the wrong version"),
+ Bcfg2.Options.BooleanOption(
+ cf=('RPM', 'reinstall_broken'), default=True,
+ dest="rpm_reinstall_broken",
+ help="Reinstall packages that fail to verify"),
+ Bcfg2.Options.Option(
+ cf=('RPM', 'verify_flags'), default=[], dest="rpm_verify_flags",
+ help="RPM verify flags")]
+
__execs__ = ['/bin/rpm', '/var/lib/rpm']
__handles__ = [('Package', 'rpm')]
@@ -15,7 +1142,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
__new_req__ = {'Package': ['name'],
'Instance': ['version', 'release', 'arch']}
- __new_ireq__ = {'Package': ['uri'], \
+ __new_ireq__ = {'Package': ['uri'],
'Instance': ['simplefile']}
__gpg_req__ = {'Package': ['name', 'version']}
@@ -26,60 +1153,51 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
__new_gpg_ireq__ = {'Package': ['name'],
'Instance': ['version', 'release']}
- conflicts = ['RPMng']
-
pkgtype = 'rpm'
pkgtool = ("rpm --oldpackage --replacepkgs --quiet -U %s", ("%s", ["url"]))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
# create a global ignore list used when ignoring particular
# files during package verification
- self.ignores = [entry.get('name') for struct in config for entry in struct \
- if entry.get('type') == 'ignore']
+ self.ignores = [entry.get('name') for struct in config
+ for entry in struct if entry.get('type') == 'ignore']
self.instance_status = {}
self.extra_instances = []
self.modlists = {}
self.gpg_keyids = self.getinstalledgpg()
- opt_prefix = self.name.lower()
- self.installOnlyPkgs = self.setup["%s_installonly" % opt_prefix]
+ self.installOnlyPkgs = Bcfg2.Options.setup.rpm_installonly
if 'gpg-pubkey' not in self.installOnlyPkgs:
self.installOnlyPkgs.append('gpg-pubkey')
- self.erase_flags = self.setup['%s_erase_flags' % opt_prefix]
- self.pkg_checks = self.setup['%s_pkg_checks' % opt_prefix]
- self.pkg_verify = self.setup['%s_pkg_verify' % opt_prefix]
- self.installed_action = self.setup['%s_installed_action' % opt_prefix]
- self.version_fail_action = self.setup['%s_version_fail_action' %
- opt_prefix]
- self.verify_fail_action = self.setup['%s_verify_fail_action' %
- opt_prefix]
- self.verify_flags = self.setup['%s_verify_flags' % opt_prefix]
+ self.verify_flags = Bcfg2.Options.setup.rpm_verify_flags
if '' in self.verify_flags:
self.verify_flags.remove('')
self.logger.debug('%s: installOnlyPackages = %s' %
(self.name, self.installOnlyPkgs))
self.logger.debug('%s: erase_flags = %s' %
- (self.name, self.erase_flags))
+ (self.name, Bcfg2.Options.setup.rpm_erase_flags))
self.logger.debug('%s: pkg_checks = %s' %
- (self.name, self.pkg_checks))
+ (self.name, Bcfg2.Options.setup.rpm_pkg_checks))
self.logger.debug('%s: pkg_verify = %s' %
- (self.name, self.pkg_verify))
- self.logger.debug('%s: installed_action = %s' %
- (self.name, self.installed_action))
- self.logger.debug('%s: version_fail_action = %s' %
- (self.name, self.version_fail_action))
- self.logger.debug('%s: verify_fail_action = %s' %
- (self.name, self.verify_fail_action))
+ (self.name, Bcfg2.Options.setup.rpm_pkg_verify))
+ self.logger.debug('%s: install_missing = %s' %
+ (self.name, Bcfg2.Options.setup.install_missing))
+ self.logger.debug('%s: fix_version = %s' %
+ (self.name, Bcfg2.Options.setup.rpm_fix_version))
+ self.logger.debug('%s: reinstall_broken = %s' %
+ (self.name,
+ Bcfg2.Options.setup.rpm_reinstall_broken))
self.logger.debug('%s: verify_flags = %s' %
(self.name, self.verify_flags))
# Force a re- prelink of all packages if prelink exists.
# Many, if not most package verifies can be caused by out of
# date prelinking.
- if os.path.isfile('/usr/sbin/prelink') and not self.setup['dryrun']:
+ if (os.path.isfile('/usr/sbin/prelink') and
+ not Bcfg2.Options.setup.dry_run):
rv = self.cmd.run('/usr/sbin/prelink -a -mR')
if rv.success:
self.logger.debug('Pre-emptive prelink succeeded')
@@ -104,18 +1222,18 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
'arch':'x86_64'} ]
"""
self.installed = {}
- refresh_ts = rpmtools.rpmtransactionset()
+ refresh_ts = rpmtransactionset()
# Don't bother with signature checks at this stage. The GPG keys might
# not be installed.
- refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
- for nevra in rpmtools.rpmpackagelist(refresh_ts):
+ refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
+ for nevra in rpmpackagelist(refresh_ts):
self.installed.setdefault(nevra['name'], []).append(nevra)
- if self.setup['debug']:
+ if Bcfg2.Options.setup.debug:
print("The following package instances are installed:")
for name, instances in list(self.installed.items()):
self.logger.debug(" " + name)
for inst in instances:
- self.logger.debug(" %s" %self.str_evra(inst))
+ self.logger.debug(" %s" % self.str_evra(inst))
refresh_ts.closeDB()
del refresh_ts
@@ -145,18 +1263,19 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
Constructs the text prompts for interactive mode.
"""
- instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ instances = [inst for inst in entry if inst.tag == 'Instance' or
+ inst.tag == 'Package']
if instances == []:
# We have an old style no Instance entry. Convert it to new style.
instance = Bcfg2.Client.XML.SubElement(entry, 'Package')
for attrib in list(entry.attrib.keys()):
instance.attrib[attrib] = entry.attrib[attrib]
- if (self.pkg_checks and
- entry.get('pkg_checks', 'true').lower() == 'true'):
+ if (Bcfg2.Options.setup.rpm_pkg_checks and
+ entry.get('pkg_checks', 'true').lower() == 'true'):
if 'any' in [entry.get('version'), pinned_version]:
version, release = 'any', 'any'
elif entry.get('version') == 'auto':
- if pinned_version != None:
+ if pinned_version is not None:
version, release = pinned_version.split('-')
else:
return False
@@ -166,242 +1285,315 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
instance.set('release', release)
if entry.get('verify', 'true') == 'false':
instance.set('verify', 'false')
- instances = [ instance ]
+ instances = [instance]
- self.logger.debug("Verifying package instances for %s" % entry.get('name'))
+ self.logger.debug("Verifying package instances for %s" %
+ entry.get('name'))
package_fail = False
qtext_versions = ''
if entry.get('name') in self.installed:
# There is at least one instance installed.
- if (self.pkg_checks and
- entry.get('pkg_checks', 'true').lower() == 'true'):
+ if (Bcfg2.Options.setup.rpm_pkg_checks and
+ entry.get('pkg_checks', 'true').lower() == 'true'):
rpmTs = rpm.TransactionSet()
rpmHeader = None
for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')):
- if rpmHeader is None or rpm.versionCompare(h, rpmHeader) > 0:
+ if rpmHeader is None or \
+ rpm.versionCompare(h, rpmHeader) > 0:
rpmHeader = h
- rpmProvides = [ h['provides'] for h in \
- rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')) ]
+ rpmProvides = [h['provides'] for h in
+ rpmTs.dbMatch(rpm.RPMTAG_NAME,
+ entry.get('name'))]
rpmIntersection = set(rpmHeader['provides']) & \
- set(self.installOnlyPkgs)
+ set(self.installOnlyPkgs)
if len(rpmIntersection) > 0:
# Packages that should only be installed or removed.
# e.g. kernels.
self.logger.debug(" Install only package.")
for inst in instances:
- self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status.setdefault(inst, {})['installed']\
+ = False
self.instance_status[inst]['version_fail'] = False
- if inst.tag == 'Package' and len(self.installed[entry.get('name')]) > 1:
- self.logger.error("WARNING: Multiple instances of package %s are installed." % \
+ if inst.tag == 'Package' and \
+ len(self.installed[entry.get('name')]) > 1:
+ self.logger.error("WARNING: Multiple instances of "
+ "package %s are installed." %
(entry.get('name')))
for pkg in self.installed[entry.get('name')]:
- if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) \
- or self.inst_evra_equal(inst, pkg):
+ if inst.get('version') == 'any' or \
+ self.pkg_vr_equal(inst, pkg) or \
+ self.inst_evra_equal(inst, pkg):
if inst.get('version') == 'any':
self.logger.error("got any version")
- self.logger.debug(" %s" % self.str_evra(inst))
+ self.logger.debug(" %s" %
+ self.str_evra(inst))
self.instance_status[inst]['installed'] = True
- if (self.pkg_verify and
- inst.get('pkg_verify', 'true').lower() == 'true'):
- flags = inst.get('verify_flags', '').split(',') + self.verify_flags
+ if (Bcfg2.Options.setup.rpm_pkg_verify and
+ inst.get('pkg_verify',
+ 'true').lower() == 'true'):
+ flags = inst.get('verify_flags',
+ '').split(',') + \
+ self.verify_flags
if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
entry.get('name') != 'gpg-pubkey':
flags += ['nosignature', 'nodigest']
- self.logger.debug('WARNING: Package %s %s requires GPG Public key with ID %s'\
- % (pkg.get('name'), self.str_evra(pkg), \
- pkg.get('gpgkeyid', '')))
- self.logger.debug(' Disabling signature check.')
-
- if self.setup.get('quick', False):
- if rpmtools.prelink_exists:
+ self.logger.debug('WARNING: Package '
+ '%s %s requires GPG '
+ 'Public key with ID '
+ '%s' %
+ (pkg.get('name'),
+ self.str_evra(pkg),
+ pkg.get('gpgkeyid',
+ '')))
+ self.logger.debug(' Disabling '
+ 'signature check.')
+
+ if Bcfg2.Options.setup.quick:
+ if prelink_exists:
flags += ['nomd5', 'nosize']
else:
flags += ['nomd5']
- self.logger.debug(" verify_flags = %s" % flags)
+ self.logger.debug(" verify_flags = "
+ "%s" % flags)
if inst.get('verify', 'true') == 'false':
- self.instance_status[inst]['verify'] = None
+ self.instance_status[inst]['verify'] =\
+ None
else:
- vp_ts = rpmtools.rpmtransactionset()
- self.instance_status[inst]['verify'] = \
- rpmtools.rpm_verify( vp_ts, pkg, flags)
+ vp_ts = rpmtransactionset()
+ self.instance_status[inst]['verify'] =\
+ rpm_verify(vp_ts, pkg, flags)
vp_ts.closeDB()
del vp_ts
- if self.instance_status[inst]['installed'] == False:
- self.logger.info(" Package %s %s not installed." % \
- (entry.get('name'), self.str_evra(inst)))
+ if not self.instance_status[inst]['installed']:
+ self.logger.info(" Package %s %s not "
+ "installed." %
+ (entry.get('name'),
+ self.str_evra(inst)))
- qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ qtext_versions = qtext_versions + 'I(%s) ' % \
+ self.str_evra(inst)
entry.set('current_exists', 'false')
else:
# Normal Packages that can be upgraded.
for inst in instances:
- self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status.setdefault(inst, {})['installed']\
+ = False
self.instance_status[inst]['version_fail'] = False
- # Only installed packages with the same architecture are
- # relevant.
- if inst.get('arch', None) == None:
+ # only installed packages with the same architecture
+ # are relevant.
+ if inst.get('arch', None) is None:
arch_match = self.installed[entry.get('name')]
else:
- arch_match = [pkg for pkg in self.installed[entry.get('name')] \
- if pkg.get('arch', None) == inst.get('arch', None)]
+ arch_match = [pkg for pkg in
+ self.installed[entry.get('name')]
+ if pkg.get('arch', None) ==
+ inst.get('arch', None)]
if len(arch_match) > 1:
- self.logger.error("Multiple instances of package %s installed with the same achitecture." % \
- (entry.get('name')))
+ self.logger.error("Multiple instances of package "
+ "%s installed with the same "
+ "achitecture." %
+ (entry.get('name')))
elif len(arch_match) == 1:
# There is only one installed like there should be.
# Check that it is the right version.
for pkg in arch_match:
- if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) or \
- self.inst_evra_equal(inst, pkg):
- self.logger.debug(" %s" % self.str_evra(inst))
- self.instance_status[inst]['installed'] = True
-
- if (self.pkg_verify and
- inst.get('pkg_verify', 'true').lower() == 'true'):
- flags = inst.get('verify_flags', '').split(',') + self.verify_flags
- if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
- 'nosignature' not in flags:
- flags += ['nosignature', 'nodigest']
- self.logger.info('WARNING: Package %s %s requires GPG Public key with ID %s'\
- % (pkg.get('name'), self.str_evra(pkg), \
- pkg.get('gpgkeyid', '')))
- self.logger.info(' Disabling signature check.')
-
- if self.setup.get('quick', False):
- if rpmtools.prelink_exists:
+ if inst.get('version') == 'any' or \
+ self.pkg_vr_equal(inst, pkg) or \
+ self.inst_evra_equal(inst, pkg):
+ self.logger.debug(" %s" %
+ self.str_evra(inst))
+ self.instance_status[inst]['installed'] = \
+ True
+
+ if (Bcfg2.Options.setup.rpm_pkg_verify and
+ inst.get(
+ 'pkg_verify',
+ 'true').lower() == 'true'):
+ flags = inst.get('verify_flags',
+ '').split(',') + \
+ self.verify_flags
+ if pkg.get('gpgkeyid', '')[-8:] not in\
+ self.gpg_keyids and 'nosignature'\
+ not in flags:
+ flags += ['nosignature',
+ 'nodigest']
+ self.logger.info(
+ 'WARNING: Package %s %s '
+ 'requires GPG Public key with '
+ 'ID %s' % (pkg.get('name'),
+ self.str_evra(pkg),
+ pkg.get('gpgkeyid',
+ '')))
+ self.logger.info(
+ ' Disabling signature '
+ 'check.')
+
+ if Bcfg2.Options.setup.quick:
+ if prelink_exists:
flags += ['nomd5', 'nosize']
else:
flags += ['nomd5']
- self.logger.debug(" verify_flags = %s" % flags)
+ self.logger.debug(
+ " verify_flags = %s" %
+ flags)
- if inst.get('verify', 'true') == 'false':
+ if inst.get('verify', 'true') == \
+ 'false':
self.instance_status[inst]['verify'] = None
else:
- vp_ts = rpmtools.rpmtransactionset()
- self.instance_status[inst]['verify'] = \
- rpmtools.rpm_verify( vp_ts, pkg, flags )
+ vp_ts = rpmtransactionset()
+ self.instance_status[inst]['verify'] = rpm_verify(vp_ts, pkg, flags)
vp_ts.closeDB()
del vp_ts
else:
# Wrong version installed.
- self.instance_status[inst]['version_fail'] = True
- self.logger.info(" Wrong version installed. Want %s, but have %s"\
- % (self.str_evra(inst), self.str_evra(pkg)))
-
- qtext_versions = qtext_versions + 'U(%s -> %s) ' % \
- (self.str_evra(pkg), self.str_evra(inst))
+ self.instance_status[inst]['version_fail']\
+ = True
+ self.logger.info(" Wrong version "
+ "installed. Want %s, but "
+ "have %s" %
+ (self.str_evra(inst),
+ self.str_evra(pkg)))
+
+ qtext_versions = qtext_versions + \
+ 'U(%s -> %s) ' % (self.str_evra(pkg),
+ self.str_evra(inst))
elif len(arch_match) == 0:
# This instance is not installed.
self.instance_status[inst]['installed'] = False
- self.logger.info(" %s is not installed." % self.str_evra(inst))
- qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ self.logger.info(" %s is not installed." %
+ self.str_evra(inst))
+ qtext_versions = qtext_versions + \
+ 'I(%s) ' % self.str_evra(inst)
# Check the rpm verify results.
for inst in instances:
instance_fail = False
# Dump the rpm verify results.
#****Write something to format this nicely.*****
- if self.setup['debug'] and self.instance_status[inst].get('verify', None):
+ if (Bcfg2.Options.setup.debug and
+ self.instance_status[inst].get('verify', None)):
self.logger.debug(self.instance_status[inst]['verify'])
self.instance_status[inst]['verify_fail'] = False
if self.instance_status[inst].get('verify', None):
if len(self.instance_status[inst].get('verify')) > 1:
- self.logger.info("WARNING: Verification of more than one package instance.")
+ self.logger.info("WARNING: Verification of more "
+ "than one package instance.")
for result in self.instance_status[inst]['verify']:
# Check header results
if result.get('hdr', None):
instance_fail = True
- self.instance_status[inst]['verify_fail'] = True
+ self.instance_status[inst]['verify_fail'] = \
+ True
# Check dependency results
if result.get('deps', None):
instance_fail = True
- self.instance_status[inst]['verify_fail'] = True
-
- # Check the rpm verify file results against the modlist
- # and entry and per Instance Ignores.
- ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \
- [ig.get('name') for ig in inst.findall('Ignore')] + \
- self.ignores
+ self.instance_status[inst]['verify_fail'] = \
+ True
+
+ # check the rpm verify file results against
+ # the modlist and entry and per Instance Ignores.
+ ignores = [ig.get('name')
+ for ig in entry.findall('Ignore')] + \
+ [ig.get('name')
+ for ig in inst.findall('Ignore')] + \
+ self.ignores
for file_result in result.get('files', []):
if file_result[-1] not in modlist + ignores:
instance_fail = True
- self.instance_status[inst]['verify_fail'] = True
+ self.instance_status[inst]['verify_fail'] \
+ = True
else:
- self.logger.debug(" Modlist/Ignore match: %s" % \
- (file_result[-1]))
+ self.logger.debug(" Modlist/Ignore "
+ "match: %s" %
+ (file_result[-1]))
- if instance_fail == True:
- self.logger.debug("*** Instance %s failed RPM verification ***" % \
+ if instance_fail:
+ self.logger.debug("*** Instance %s failed RPM "
+ "verification ***" %
self.str_evra(inst))
- qtext_versions = qtext_versions + 'R(%s) ' % self.str_evra(inst)
+ qtext_versions = qtext_versions + \
+ 'R(%s) ' % self.str_evra(inst)
self.modlists[entry] = modlist
- # Attach status structure for return to server for reporting.
- inst.set('verify_status', str(self.instance_status[inst]))
+ # Attach status structure for reporting.
+ inst.set('verify_status',
+ str(self.instance_status[inst]))
- if self.instance_status[inst]['installed'] == False or \
- self.instance_status[inst].get('version_fail', False)== True or \
- self.instance_status[inst].get('verify_fail', False) == True:
+ version_fail = self.instance_status[inst].get(
+ 'version_fail', False)
+ verify_fail = self.instance_status[inst].get(
+ 'verify_fail', False)
+ if not self.instance_status[inst]['installed'] or \
+ version_fail or verify_fail:
package_fail = True
self.instance_status[inst]['pkg'] = entry
self.modlists[entry] = modlist
# Find Installed Instances that are not in the Config.
- extra_installed = self.FindExtraInstances(entry, self.installed[entry.get('name')])
- if extra_installed != None:
+ extra_installed = self.FindExtraInstances(
+ entry, self.installed[entry.get('name')])
+ if extra_installed is not None:
package_fail = True
self.extra_instances.append(extra_installed)
for inst in extra_installed.findall('Instance'):
- qtext_versions = qtext_versions + 'D(%s) ' % self.str_evra(inst)
- self.logger.debug("Found Extra Instances %s" % qtext_versions)
-
- if package_fail == True:
- self.logger.info(" Package %s failed verification." % \
- (entry.get('name')))
- qtext = 'Install/Upgrade/delete Package %s instance(s) - %s (y/N) ' % \
- (entry.get('name'), qtext_versions)
+ qtext_versions = qtext_versions + \
+ 'D(%s) ' % self.str_evra(inst)
+ self.logger.debug("Found Extra Instances %s" %
+ qtext_versions)
+
+ if package_fail:
+ self.logger.info(" Package %s failed verification."
+ % (entry.get('name')))
+ qtext = 'Install/Upgrade/delete Package %s instance(s) - '\
+ '%s (y/N) ' % (entry.get('name'), qtext_versions)
entry.set('qtext', qtext)
bcfg2_versions = ''
- for bcfg2_inst in [inst for inst in instances if inst.tag == 'Instance']:
- bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(bcfg2_inst)
+ for bcfg2_inst in [inst for inst in instances
+ if inst.tag == 'Instance']:
+ bcfg2_versions = bcfg2_versions + \
+ '(%s) ' % self.str_evra(bcfg2_inst)
if bcfg2_versions != '':
entry.set('version', bcfg2_versions)
installed_versions = ''
for installed_inst in self.installed[entry.get('name')]:
- installed_versions = installed_versions + '(%s) ' % \
- self.str_evra(installed_inst)
+ installed_versions = installed_versions + \
+ '(%s) ' % self.str_evra(installed_inst)
entry.set('current_version', installed_versions)
return False
else:
# There are no Instances of this package installed.
- self.logger.debug("Package %s has no instances installed" % (entry.get('name')))
+ self.logger.debug("Package %s has no instances installed" %
+ (entry.get('name')))
entry.set('current_exists', 'false')
bcfg2_versions = ''
for inst in instances:
- qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ qtext_versions = qtext_versions + \
+ 'I(%s) ' % self.str_evra(inst)
self.instance_status.setdefault(inst, {})['installed'] = False
self.modlists[entry] = modlist
self.instance_status[inst]['pkg'] = entry
if inst.tag == 'Instance':
- bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(inst)
+ bcfg2_versions = bcfg2_versions + \
+ '(%s) ' % self.str_evra(inst)
if bcfg2_versions != '':
entry.set('version', bcfg2_versions)
- entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % \
+ entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " %
(entry.get('name'), qtext_versions))
return False
@@ -421,26 +1613,31 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
for pkg in packages:
for inst in pkg:
if pkg.get('name') != 'gpg-pubkey':
- pkgspec = { 'name':pkg.get('name'),
- 'epoch':inst.get('epoch', None),
- 'version':inst.get('version'),
- 'release':inst.get('release'),
- 'arch':inst.get('arch') }
+ pkgspec = {'name': pkg.get('name'),
+ 'epoch': inst.get('epoch', None),
+ 'version': inst.get('version'),
+ 'release': inst.get('release'),
+ 'arch': inst.get('arch')}
pkgspec_list.append(pkgspec)
else:
- pkgspec = { 'name':pkg.get('name'),
- 'version':inst.get('version'),
- 'release':inst.get('release')}
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkgspec.get('name'), self.str_evra(pkgspec)))
- self.logger.info(" This package will be deleted in a future version of the RPM driver.")
+ pkgspec = {'name': pkg.get('name'),
+ 'version': inst.get('version'),
+ 'release': inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in "
+ "configuration %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
+ self.logger.info(" This package will be deleted "
+ "in a future version of the RPM driver.")
#pkgspec_list.append(pkg_spec)
- erase_results = rpmtools.rpm_erase(pkgspec_list, self.erase_flags)
+ erase_results = rpm_erase(pkgspec_list,
+ Bcfg2.Options.setup.rpm_erase_flags)
if erase_results == []:
self.modified += packages
for pkg in pkgspec_list:
- self.logger.info("Deleted %s %s" % (pkg.get('name'), self.str_evra(pkg)))
+ self.logger.info("Deleted %s %s" % (pkg.get('name'),
+ self.str_evra(pkg)))
else:
self.logger.info("Bulk erase failed with errors:")
self.logger.debug("Erase results = %s" % erase_results)
@@ -450,30 +1647,38 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
pkg_modified = False
for inst in pkg:
if pkg.get('name') != 'gpg-pubkey':
- pkgspec = { 'name':pkg.get('name'),
- 'epoch':inst.get('epoch', None),
- 'version':inst.get('version'),
- 'release':inst.get('release'),
- 'arch':inst.get('arch') }
+ pkgspec = {'name': pkg.get('name'),
+ 'epoch': inst.get('epoch', None),
+ 'version': inst.get('version'),
+ 'release': inst.get('release'),
+ 'arch': inst.get('arch')}
pkgspec_list.append(pkgspec)
else:
- pkgspec = { 'name':pkg.get('name'),
- 'version':inst.get('version'),
- 'release':inst.get('release')}
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkgspec.get('name'), self.str_evra(pkgspec)))
- self.logger.info(" This package will be deleted in a future version of the RPM driver.")
- continue # Don't delete the gpg-pubkey packages for now.
- erase_results = rpmtools.rpm_erase([pkgspec], self.erase_flags)
+ pkgspec = {'name': pkg.get('name'),
+ 'version': inst.get('version'),
+ 'release': inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in "
+ "configuration %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
+ self.logger.info(" This package will be "
+ "deleted in a future version of the "
+ "RPM driver.")
+ continue # don't delete the gpg-pubkey packages
+ erase_results = rpm_erase(
+ [pkgspec],
+ Bcfg2.Options.setup.rpm_erase_flags)
if erase_results == []:
pkg_modified = True
- self.logger.info("Deleted %s %s" % \
- (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.info("Deleted %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
else:
- self.logger.error("unable to delete %s %s" % \
- (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.error("unable to delete %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
self.logger.debug("Failure = %s" % erase_results)
- if pkg_modified == True:
+ if pkg_modified:
self.modified.append(pkg)
self.RefreshPackages()
@@ -489,33 +1694,35 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
"""
fix = False
- if inst_status.get('installed', False) == False:
- if instance.get('installed_action', 'install') == "install" and \
- self.installed_action == "install":
+ if not inst_status.get('installed', False):
+ if (instance.get('install_missing', 'true').lower() == "true" and
+ Bcfg2.Options.setup.rpm_install_missing):
fix = True
else:
- self.logger.debug('Installed Action for %s %s is to not install' % \
+ self.logger.debug('Installed Action for %s %s is to not '
+ 'install' %
(inst_status.get('pkg').get('name'),
self.str_evra(instance)))
- elif inst_status.get('version_fail', False) == True:
- if instance.get('version_fail_action', 'upgrade') == "upgrade" and \
- self.version_fail_action == "upgrade":
+ elif inst_status.get('version_fail', False):
+ if (instance.get('fix_version', 'true').lower() == "true" and
+ Bcfg2.Options.setup.rpm_fix_version):
fix = True
else:
- self.logger.debug('Version Fail Action for %s %s is to not upgrade' % \
+ self.logger.debug('Version Fail Action for %s %s is to '
+ 'not upgrade' %
(inst_status.get('pkg').get('name'),
self.str_evra(instance)))
- elif inst_status.get('verify_fail', False) == True and self.name == "RPM":
- # yum can't reinstall packages so only do this for rpm.
- if instance.get('verify_fail_action', 'reinstall') == "reinstall" and \
- self.verify_fail_action == "reinstall":
+ elif inst_status.get('verify_fail', False):
+ if (instance.get('reinstall_broken', 'true').lower() == "true" and
+ Bcfg2.Options.setup.rpm_reinstall_broken):
for inst in inst_status.get('verify'):
# This needs to be a for loop rather than a straight get()
# because the underlying routines handle multiple packages
# and return a list of results.
- self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % inst.get('nevra'))
+ self.logger.debug('reinstall_check: %s %s:%s-%s.%s' %
+ inst.get('nevra'))
if inst.get("hdr", False):
fix = True
@@ -523,7 +1730,8 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
elif inst.get('files', False):
# Parse rpm verify file results
for file_result in inst.get('files', []):
- self.logger.debug('reinstall_check: file: %s' % file_result)
+ self.logger.debug('reinstall_check: file: %s' %
+ file_result)
if file_result[-2] != 'c':
fix = True
break
@@ -532,13 +1740,14 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
elif inst.get("deps", False):
fix = False
else:
- self.logger.debug('Verify Fail Action for %s %s is to not reinstall' % \
- (inst_status.get('pkg').get('name'),
- self.str_evra(instance)))
+ self.logger.debug('Verify Fail Action for %s %s is to not '
+ 'reinstall' %
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
return fix
- def Install(self, packages, states):
+ def Install(self, packages):
"""
Try and fix everything that RPM.VerifyPackages() found wrong for
each Package Entry. This can result in individual RPMs being
@@ -559,6 +1768,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
"""
self.logger.info('Runing RPM.Install()')
+ states = dict()
install_only_pkgs = []
gpg_keys = []
upgrade_pkgs = []
@@ -566,20 +1776,21 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Remove extra instances.
# Can not reverify because we don't have a package entry.
if len(self.extra_instances) > 0:
- if (self.setup.get('remove') == 'all' or \
- self.setup.get('remove') == 'packages') and\
- not self.setup.get('dryrun'):
+ if (Bcfg2.Options.setup.remove in ['all', 'packages'] and
+ not Bcfg2.Options.setup.dry_run):
self.Remove(self.extra_instances)
else:
- self.logger.info("The following extra package instances will be removed by the '-r' option:")
+ self.logger.info("The following extra package instances will "
+ "be removed by the '-r' option:")
for pkg in self.extra_instances:
for inst in pkg:
- self.logger.info(" %s %s" % (pkg.get('name'), self.str_evra(inst)))
+ self.logger.info(" %s %s" % (pkg.get('name'),
+ self.str_evra(inst)))
# Figure out which instances of the packages actually need something
# doing to them and place in the appropriate work 'queue'.
for pkg in packages:
- for inst in [instn for instn in pkg if instn.tag \
+ for inst in [instn for instn in pkg if instn.tag
in ['Instance', 'Package']]:
if self.FixInstance(inst, self.instance_status[inst]):
if pkg.get('name') == 'gpg-pubkey':
@@ -592,10 +1803,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Fix installOnlyPackages
if len(install_only_pkgs) > 0:
self.logger.info("Attempting to install 'install only packages'")
- install_args = \
- " ".join(os.path.join(self.instance_status[inst].get('pkg').get('uri'),
- inst.get('simplefile'))
- for inst in install_only_pkgs)
+ install_args = " ".join(os.path.join(
+ self.instance_status[inst].get('pkg').get('uri'),
+ inst.get('simplefile'))
+ for inst in install_only_pkgs)
if self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs "
"%s" % install_args):
# The rpm command succeeded. All packages installed.
@@ -607,35 +1818,34 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.logger.error("Single Pass for InstallOnlyPackages Failed")
installed_instances = []
for inst in install_only_pkgs:
- install_args = \
- os.path.join(self.instance_status[inst].get('pkg').get('uri'),
- inst.get('simplefile'))
+ pkguri = self.instance_status[inst].get('pkg').get('uri')
+ pkgname = self.instance_status[inst].get('pkg').get('name')
+ install_args = os.path.join(pkguri, inst.get('simplefile'))
if self.cmd.run("rpm --install --quiet --oldpackage "
"--replacepkgs %s" % install_args):
installed_instances.append(inst)
else:
- self.logger.debug("InstallOnlyPackage %s %s would not install." % \
- (self.instance_status[inst].get('pkg').get('name'), \
- self.str_evra(inst)))
+ self.logger.debug("InstallOnlyPackage %s %s would not "
+ "install." % (pkgname,
+ self.str_evra(inst)))
- install_pkg_set = set([self.instance_status[inst].get('pkg') \
- for inst in install_only_pkgs])
+ install_pkg_set = set([self.instance_status[inst].get('pkg')
+ for inst in install_only_pkgs])
self.RefreshPackages()
# Install GPG keys.
if len(gpg_keys) > 0:
for inst in gpg_keys:
self.logger.info("Installing GPG keys.")
- key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile'))
+ pkguri = self.instance_status[inst].get('pkg').get('uri')
+ pkgname = self.instance_status[inst].get('pkg').get('name')
+ key_arg = os.path.join(pkguri, inst.get('simplefile'))
if not self.cmd.run("rpm --import %s" % key_arg):
self.logger.debug("Unable to install %s-%s" %
- (self.instance_status[inst].get('pkg').get('name'),
- self.str_evra(inst)))
+ (pkgname, self.str_evra(inst)))
else:
self.logger.debug("Installed %s-%s-%s" %
- (self.instance_status[inst].get('pkg').get('name'),
- inst.get('version'),
+ (pkgname, inst.get('version'),
inst.get('release')))
self.RefreshPackages()
self.gpg_keyids = self.getinstalledgpg()
@@ -645,9 +1855,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Fix upgradeable packages.
if len(upgrade_pkgs) > 0:
self.logger.info("Attempting to upgrade packages")
- upgrade_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile')) \
- for inst in upgrade_pkgs])
+ upgrade_args = " ".join([os.path.join(
+ self.instance_status[inst].get('pkg').get('uri'),
+ inst.get('simplefile'))
+ for inst in upgrade_pkgs])
if self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs "
"%s" % upgrade_args):
# The rpm command succeeded. All packages upgraded.
@@ -661,30 +1872,38 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.logger.error("Single Pass for Upgrading Packages Failed")
upgraded_instances = []
for inst in upgrade_pkgs:
- upgrade_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile'))
- #self.logger.debug("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \
- # upgrade_args)
+ upgrade_args = os.path.join(
+ self.instance_status[inst].get('pkg').get('uri'),
+ inst.get('simplefile'))
+ #self.logger.debug("rpm --upgrade --quiet --oldpackage "
+ # "--replacepkgs %s" % upgrade_args)
if self.cmd.run("rpm --upgrade --quiet --oldpackage "
"--replacepkgs %s" % upgrade_args):
upgraded_instances.append(inst)
else:
- self.logger.debug("Package %s %s would not upgrade." %
- (self.instance_status[inst].get('pkg').get('name'),
- self.str_evra(inst)))
+ self.logger.debug(
+ "Package %s %s would not upgrade." %
+ (self.instance_status[inst].get('pkg').get('name'),
+ self.str_evra(inst)))
- upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \
- for inst in upgrade_pkgs])
+ upgrade_pkg_set = set([self.instance_status[inst].get('pkg')
+ for inst in upgrade_pkgs])
self.RefreshPackages()
- if not self.setup['kevlar']:
+ if not Bcfg2.Options.setup.kevlar:
for pkg_entry in packages:
- self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name')))
- states[pkg_entry] = self.VerifyPackage(pkg_entry, \
- self.modlists.get(pkg_entry, []))
+ self.logger.debug("Reverifying Failed Package %s" %
+ (pkg_entry.get('name')))
+ states[pkg_entry] = self.VerifyPackage(
+ pkg_entry, self.modlists.get(pkg_entry, []))
- for entry in [ent for ent in packages if states[ent]]:
- self.modified.append(entry)
+ self.modified.extend(ent for ent in packages if states[ent])
+ return states
+
+ def _log_incomplete_entry_install(self, etag, ename):
+ self.logger.error("Incomplete information for entry %s:%s; "
+ "cannot install" % (etag, ename))
+ return
def canInstall(self, entry):
"""Test if entry has enough information to be installed."""
@@ -692,18 +1911,17 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
return False
if 'failure' in entry.attrib:
- self.logger.error("Cannot install entry %s:%s with bind failure" % \
+ self.logger.error("Cannot install entry %s:%s with bind failure" %
(entry.tag, entry.get('name')))
return False
-
instances = entry.findall('Instance')
- # If the entry wasn't verifiable, then we really don't want to try and fix something
- # that we don't know is broken.
+ # If the entry wasn't verifiable, then we really don't want to try
+ # and fix something that we don't know is broken.
if not self.canVerify(entry):
- self.logger.debug("WARNING: Package %s was not verifiable, not passing to Install()" \
- % entry.get('name'))
+ self.logger.debug("WARNING: Package %s was not verifiable, not "
+ "passing to Install()" % entry.get('name'))
return False
if not instances:
@@ -711,53 +1929,70 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__gpg_ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__gpg_ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
return False
else:
- if [attr for attr in self.__ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
+ if [attr for attr in self.__ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
return False
else:
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_gpg_ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_gpg_ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
- if [attr for attr in self.__new_gpg_ireq__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install"\
- % (inst.tag, entry.get('name')))
+ if [attr for attr in self.__new_gpg_ireq__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_install(inst.tag,
+ entry.get('name'))
return False
else:
# New format with Instances.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
- self.logger.error(" Required attributes that may not be present are %s" \
- % (self.__new_ireq__[entry.tag]))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
+ self.logger.error(" Required attributes that "
+ "may not be present are %s" %
+ (self.__new_ireq__[entry.tag]))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
if inst.tag == 'Instance':
- if [attr for attr in self.__new_ireq__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for %s of package %s; cannot install" \
- % (inst.tag, entry.get('name')))
- self.logger.error(" Required attributes that may not be present are %s" \
+ if [attr for attr in self.__new_ireq__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_install(
+ inst.tag,
+ entry.get('name'))
+ self.logger.error(" Required attributes "
+ "that may not be present are %s"
% (self.__new_ireq__[inst.tag]))
return False
return True
+ def _log_incomplete_entry_verify(self, etag, ename):
+ self.logger.error("Incomplete information for entry %s:%s; "
+ "cannot verify" % (etag, ename))
+ return
+
def canVerify(self, entry):
"""
Test if entry has enough information to be verified.
@@ -775,13 +2010,15 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
return False
if 'failure' in entry.attrib:
- self.logger.error("Entry %s:%s reports bind failure: %s" % \
- (entry.tag, entry.get('name'), entry.get('failure')))
+ self.logger.error("Entry %s:%s reports bind failure: %s" %
+ (entry.tag, entry.get('name'),
+ entry.get('failure')))
return False
- # We don't want to do any checks so we don't care what the entry has in it.
- if (not self.pkg_checks or
- entry.get('pkg_checks', 'true').lower() == 'false'):
+ # we don't want to do any checks so
+ # we don't care what the entry has in it.
+ if (not Bcfg2.Options.setup.rpm_pkg_checks or
+ entry.get('pkg_checks', 'true').lower() == 'false'):
return True
instances = entry.findall('Instance')
@@ -791,53 +2028,72 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__gpg_req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__gpg_req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
elif entry.tag == 'Path' and entry.get('type') == 'ignore':
# ignored Paths are only relevant during failed package
# verification
pass
else:
- if [attr for attr in self.__req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ if [attr for attr in self.__req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
else:
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_gpg_req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_gpg_req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
- if [attr for attr in self.__new_gpg_req__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (inst.tag, inst.get('name')))
+ if [attr for attr in self.__new_gpg_req__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_verify(inst.tag,
+ inst.get('name'))
return False
else:
- # New format with Instances, or old style modified.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ # new format with Instances, or old style modified.
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
if inst.tag == 'Instance':
- if [attr for attr in self.__new_req__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (inst.tag, inst.get('name')))
+ if [attr for attr in self.__new_req__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_verify(inst.tag,
+ inst.get('name'))
return False
return True
+ def _get_tmp_entry(self, extra_entry, inst):
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance',
+ version=inst.get('version'),
+ release=inst.get('release'))
+ if inst.get('epoch', None) is not None:
+ tmp_entry.set('epoch', str(inst.get('epoch')))
+ if installed_inst.get('arch', None) is not None:
+ tmp_entry.set('arch', inst.get('arch'))
+ return
+
def FindExtra(self):
"""Find extra packages."""
packages = [entry.get('name') for entry in self.getSupportedEntries()]
@@ -845,22 +2101,17 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
for (name, instances) in list(self.installed.items()):
if name not in packages:
- extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
+ extra_entry = Bcfg2.Client.XML.Element('Package',
+ name=name,
+ type=self.pkgtype)
for installed_inst in instances:
- if self.setup['extra']:
- self.logger.info("Extra Package %s %s." % \
+ if Bcfg2.Options.setup.extra:
+ self.logger.info("Extra Package %s %s." %
(name, self.str_evra(installed_inst)))
- tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
- version = installed_inst.get('version'), \
- release = installed_inst.get('release'))
- if installed_inst.get('epoch', None) != None:
- tmp_entry.set('epoch', str(installed_inst.get('epoch')))
- if installed_inst.get('arch', None) != None:
- tmp_entry.set('arch', installed_inst.get('arch'))
+ self._get_tmp_entry(extra_entry, installed_inst)
extras.append(extra_entry)
return extras
-
def FindExtraInstances(self, pkg_entry, installed_entry):
"""
Check for installed instances that are not in the config.
@@ -869,8 +2120,11 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
"""
name = pkg_entry.get('name')
- extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
- instances = [inst for inst in pkg_entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ extra_entry = Bcfg2.Client.XML.Element('Package',
+ name=name,
+ type=self.pkgtype)
+ instances = [inst for inst in pkg_entry if
+ inst.tag == 'Instance' or inst.tag == 'Package']
if name in self.installOnlyPkgs:
for installed_inst in installed_entry:
not_found = True
@@ -879,36 +2133,25 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.inst_evra_equal(inst, installed_inst):
not_found = False
break
- if not_found == True:
+ if not_found:
# Extra package.
- self.logger.info("Extra InstallOnlyPackage %s %s." % \
+ self.logger.info("Extra InstallOnlyPackage %s %s." %
(name, self.str_evra(installed_inst)))
- tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
- version = installed_inst.get('version'), \
- release = installed_inst.get('release'))
- if installed_inst.get('epoch', None) != None:
- tmp_entry.set('epoch', str(installed_inst.get('epoch')))
- if installed_inst.get('arch', None) != None:
- tmp_entry.set('arch', installed_inst.get('arch'))
+ self._get_tmp_entry(extra_entry, installed_inst)
else:
# Normal package, only check arch.
for installed_inst in installed_entry:
not_found = True
for inst in instances:
- if installed_inst.get('arch', None) == inst.get('arch', None) or\
- inst.tag == 'Package':
+ if (installed_inst.get('arch', None) ==
+ inst.get('arch', None) or
+ inst.tag == 'Package'):
not_found = False
break
if not_found:
- self.logger.info("Extra Normal Package Instance %s %s" % \
+ self.logger.info("Extra Normal Package Instance %s %s" %
(name, self.str_evra(installed_inst)))
- tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
- version = installed_inst.get('version'), \
- release = installed_inst.get('release'))
- if installed_inst.get('epoch', None) != None:
- tmp_entry.set('epoch', str(installed_inst.get('epoch')))
- if installed_inst.get('arch', None) != None:
- tmp_entry.set('arch', installed_inst.get('arch'))
+ self._get_tmp_entry(extra_entry, installed_inst)
if len(extra_entry) == 0:
extra_entry = None
@@ -932,9 +2175,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
Compare old style entry to installed entry. Which means ignore
the epoch and arch.
'''
- if (config_entry.tag == 'Package' and \
- config_entry.get('version') == installed_entry.get('version') and \
- config_entry.get('release') == installed_entry.get('release')):
+ if (config_entry.tag == 'Package' and
+ config_entry.get('version') == installed_entry.get('version')
+ and
+ config_entry.get('release') == installed_entry.get('release')):
return True
else:
return False
@@ -942,18 +2186,19 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
def inst_evra_equal(self, config_entry, installed_entry):
"""Compare new style instance to installed entry."""
- if config_entry.get('epoch', None) != None:
+ if config_entry.get('epoch', None) is not None:
epoch = int(config_entry.get('epoch'))
else:
epoch = None
- if (config_entry.tag == 'Instance' and \
- (epoch == installed_entry.get('epoch', 0) or \
- (epoch == 0 and installed_entry.get('epoch', 0) == None) or \
- (epoch == None and installed_entry.get('epoch', 0) == 0)) and \
- config_entry.get('version') == installed_entry.get('version') and \
- config_entry.get('release') == installed_entry.get('release') and \
- config_entry.get('arch', None) == installed_entry.get('arch', None)):
+ if (config_entry.tag == 'Instance' and
+ (epoch == installed_entry.get('epoch', 0) or
+ (epoch == 0 and installed_entry.get('epoch', 0) is None) or
+ (epoch is None and installed_entry.get('epoch', 0) == 0)) and
+ config_entry.get('version') == installed_entry.get('version') and
+ config_entry.get('release') == installed_entry.get('release') and
+ config_entry.get('arch', None) == installed_entry.get('arch',
+ None)):
return True
else:
return False
@@ -966,10 +2211,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
(big-endian) of the key ID which is good enough for our purposes.
"""
- init_ts = rpmtools.rpmtransactionset()
- init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
- gpg_hdrs = rpmtools.getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'})
- keyids = [ header[rpm.RPMTAG_VERSION] for header in gpg_hdrs]
+ init_ts = rpmtransactionset()
+ init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
+ gpg_hdrs = getheadersbykeyword(init_ts, **{'name': 'gpg-pubkey'})
+ keyids = [header[rpm.RPMTAG_VERSION] for header in gpg_hdrs]
keyids.append('None')
init_ts.closeDB()
del init_ts
diff --git a/src/lib/Bcfg2/Client/Tools/RPMng.py b/src/lib/Bcfg2/Client/Tools/RPMng.py
deleted file mode 100644
index 0f0e4c700..000000000
--- a/src/lib/Bcfg2/Client/Tools/RPMng.py
+++ /dev/null
@@ -1,9 +0,0 @@
-""" RPM driver called 'RPMng' for backwards compat """
-
-from Bcfg2.Client.Tools.RPM import RPM
-
-
-class RPMng(RPM):
- """ RPM driver called 'RPMng' for backwards compat """
- deprecated = True
- name = "RPM"
diff --git a/src/lib/Bcfg2/Client/Tools/RcUpdate.py b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
index e0c913dcd..a482dbc00 100644
--- a/src/lib/Bcfg2/Client/Tools/RcUpdate.py
+++ b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
@@ -98,10 +98,10 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool):
# make sure service is disabled on boot
bootcmd = '/sbin/rc-update del %s default'
bootcmdrv = self.cmd.run(bootcmd % entry.get('name')).success
- if self.setup['servicemode'] == 'disabled':
+ if Bcfg2.Options.setup.service_mode == 'disabled':
# 'disabled' means we don't attempt to modify running svcs
return bootcmdrv
- buildmode = self.setup['servicemode'] == 'build'
+ buildmode = Bcfg2.Options.setup.service_mode == 'build'
if (entry.get('status') == 'on' and not buildmode) and \
entry.get('current_status') == 'off':
svccmdrv = self.start_service(entry)
diff --git a/src/lib/Bcfg2/Client/Tools/SELinux.py b/src/lib/Bcfg2/Client/Tools/SELinux.py
index 0b4aba60d..7b5ff7813 100644
--- a/src/lib/Bcfg2/Client/Tools/SELinux.py
+++ b/src/lib/Bcfg2/Client/Tools/SELinux.py
@@ -7,6 +7,7 @@ import copy
import glob
import struct
import socket
+import logging
import selinux
import seobject
import Bcfg2.Client.XML
@@ -77,14 +78,13 @@ class SELinux(Bcfg2.Client.Tools.Tool):
SEPort=['name', 'selinuxtype'],
SEUser=['name', 'roles', 'prefix'])
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
self.handlers = {}
for handler in self.__handles__:
etype = handler[0]
self.handlers[etype] = \
- globals()["SELinux%sHandler" % etype.title()](self, logger,
- setup, config)
+ globals()["SELinux%sHandler" % etype.title()](self, config)
self.txn = False
self.post_txn_queue = []
@@ -100,10 +100,6 @@ class SELinux(Bcfg2.Client.Tools.Tool):
# http://docs.python.org/2/reference/datamodel.html#object.__getattr__
# for details
- def BundleUpdated(self, _, states):
- for handler in self.handlers.values():
- handler.BundleUpdated(states)
-
def FindExtra(self):
extra = []
for handler in self.handlers.values():
@@ -119,7 +115,7 @@ class SELinux(Bcfg2.Client.Tools.Tool):
in the specification """
return self.handlers[entry.tag].primarykey(entry)
- def Install(self, entries, states):
+ def Install(self, entries):
# start a transaction
semanage = seobject.semanageRecords("")
if hasattr(semanage, "start"):
@@ -129,13 +125,14 @@ class SELinux(Bcfg2.Client.Tools.Tool):
else:
self.logger.debug("SELinux transactions not supported; this may "
"slow things down considerably")
- Bcfg2.Client.Tools.Tool.Install(self, entries, states)
+ states = Bcfg2.Client.Tools.Tool.Install(self, entries)
if hasattr(semanage, "finish"):
self.logger.debug("Committing SELinux transaction")
semanage.finish()
self.txn = False
for func, arg, kwargs in self.post_txn_queue:
states[arg] = func(*arg, **kwargs)
+ return states
def GenericSEInstall(self, entry):
"""Dispatch install to the proper method according to entry tag"""
@@ -144,7 +141,7 @@ class SELinux(Bcfg2.Client.Tools.Tool):
def GenericSEVerify(self, entry, _):
"""Dispatch verify to the proper method according to entry tag"""
rv = self.handlers[entry.tag].Verify(entry)
- if entry.get('qtext') and self.setup['interactive']:
+ if entry.get('qtext') and Bcfg2.Options.setup.interactive:
entry.set('qtext',
'%s\nInstall %s: (y/N) ' %
(entry.get('qtext'),
@@ -174,10 +171,9 @@ class SELinuxEntryHandler(object):
custom_re = re.compile(r' (?P<name>\S+)$')
custom_format = None
- def __init__(self, tool, logger, setup, config):
+ def __init__(self, tool, config):
self.tool = tool
- self.logger = logger
- self.setup = setup
+ self.logger = logging.getLogger(self.__class__.__name__)
self.config = config
self._records = None
self._all = None
@@ -229,7 +225,7 @@ class SELinuxEntryHandler(object):
match = self.custom_re.search(cmd)
if match:
if (len(self.custom_format) == 1 and
- self.custom_format[0] == "name"):
+ self.custom_format[0] == "name"):
keys.append(match.group("name"))
else:
keys.append(tuple([match.group(k)
@@ -379,11 +375,6 @@ class SELinuxEntryHandler(object):
for key in records.keys()
if key not in specified]
- def BundleUpdated(self, states):
- """ perform any additional magic tasks that need to be run
- when a bundle is updated """
- pass
-
class SELinuxSebooleanHandler(SELinuxEntryHandler):
""" handle SELinux boolean entries """
@@ -631,8 +622,8 @@ class SELinuxSeuserHandler(SELinuxEntryHandler):
etype = "user"
value_format = ("prefix", None, None, "roles")
- def __init__(self, tool, logger, setup, config):
- SELinuxEntryHandler.__init__(self, tool, logger, setup, config)
+ def __init__(self, tool, config):
+ SELinuxEntryHandler.__init__(self, tool, config)
self.needs_prefix = False
@property
@@ -725,9 +716,9 @@ class SELinuxSemoduleHandler(SELinuxEntryHandler):
etype = "module"
value_format = (None, "disabled")
- def __init__(self, tool, logger, setup, config):
- SELinuxEntryHandler.__init__(self, tool, logger, setup, config)
- self.filetool = POSIXFile(logger, setup, config)
+ def __init__(self, tool, config):
+ SELinuxEntryHandler.__init__(self, tool, config)
+ self.filetool = POSIXFile(config)
try:
self.setype = selinux.selinux_getpolicytype()[1]
except IndexError:
diff --git a/src/lib/Bcfg2/Client/Tools/SYSV.py b/src/lib/Bcfg2/Client/Tools/SYSV.py
index aca7d593c..5698f237a 100644
--- a/src/lib/Bcfg2/Client/Tools/SYSV.py
+++ b/src/lib/Bcfg2/Client/Tools/SYSV.py
@@ -32,8 +32,8 @@ class SYSV(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'sysv'
pkgtool = ("/usr/sbin/pkgadd %s -n -d %%s", (('%s %s', ['url', 'name'])))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
# noaskfile needs to live beyond __init__ otherwise file is removed
self.noaskfile = tempfile.NamedTemporaryFile()
self.noaskname = self.noaskfile.name
@@ -80,8 +80,8 @@ class SYSV(Bcfg2.Client.Tools.PkgTool):
self.logger.debug("Package %s not installed" %
entry.get("name"))
else:
- if (self.setup['quick'] or
- entry.attrib.get('verify', 'true') == 'false'):
+ if (Bcfg2.Options.setup.quick or
+ entry.attrib.get('verify', 'true') == 'false'):
return True
rv = self.cmd.run("/usr/sbin/pkgchk -n %s" % entry.get('name'))
if rv.success:
diff --git a/src/lib/Bcfg2/Client/Tools/VCS.py b/src/lib/Bcfg2/Client/Tools/VCS.py
index aca5dbbc7..4e8ac76a4 100644
--- a/src/lib/Bcfg2/Client/Tools/VCS.py
+++ b/src/lib/Bcfg2/Client/Tools/VCS.py
@@ -88,8 +88,10 @@ class VCS(Bcfg2.Client.Tools.Tool):
return False
try:
- client, path = dulwich.client.get_transport_and_path(entry.get('sourceurl'))
- remote_refs = client.fetch_pack(path, (lambda x: None), None, None, None)
+ client, path = dulwich.client.get_transport_and_path(
+ entry.get('sourceurl'))
+ remote_refs = client.fetch_pack(path,
+ (lambda x: None), None, None, None)
if expected_rev in remote_refs:
expected_rev = remote_refs[expected_rev]
except:
@@ -119,10 +121,12 @@ class VCS(Bcfg2.Client.Tools.Tool):
dulwich.file.ensure_dir_exists(destname)
destr = dulwich.repo.Repo.init(destname)
- cl, host_path = dulwich.client.get_transport_and_path(entry.get('sourceurl'))
+ determine_wants = destr.object_store.determine_wants_all
+ cl, host_path = dulwich.client.get_transport_and_path(
+ entry.get('sourceurl'))
remote_refs = cl.fetch(host_path,
destr,
- determine_wants=destr.object_store.determine_wants_all,
+ determine_wants=determine_wants,
progress=sys.stdout.write)
if entry.get('revision') in remote_refs:
@@ -161,15 +165,18 @@ class VCS(Bcfg2.Client.Tools.Tool):
def Verifysvn(self, entry, _):
"""Verify svn repositories"""
- headrev = pysvn.Revision( pysvn.opt_revision_kind.head )
+ headrev = pysvn.Revision(pysvn.opt_revision_kind.head)
client = pysvn.Client()
try:
cur_rev = str(client.info(entry.get('name')).revision.number)
- server = client.info2(entry.get('sourceurl'), headrev, recurse=False)
+ server = client.info2(entry.get('sourceurl'),
+ headrev,
+ recurse=False)
if server:
server_rev = str(server[0][1].rev.number)
except:
- self.logger.info("Repository %s does not exist" % entry.get('name'))
+ self.logger.info("Repository %s does not exist" %
+ entry.get('name'))
return False
if entry.get('revision') == 'latest' and cur_rev == server_rev:
diff --git a/src/lib/Bcfg2/Client/Tools/YUM.py b/src/lib/Bcfg2/Client/Tools/YUM.py
index c30c0a13a..8bb87540c 100644
--- a/src/lib/Bcfg2/Client/Tools/YUM.py
+++ b/src/lib/Bcfg2/Client/Tools/YUM.py
@@ -3,6 +3,7 @@
import copy
import os.path
import sys
+import logging
import yum
import yum.packages
import yum.rpmtrans
@@ -12,6 +13,7 @@ import yum.misc
import rpmUtils.arch
import Bcfg2.Client.XML
import Bcfg2.Client.Tools
+import Bcfg2.Options
def build_yname(pkgname, inst):
@@ -65,13 +67,13 @@ class RPMDisplay(yum.rpmtrans.RPMBaseCallback):
"""We subclass the default RPM transaction callback so that we
can control Yum's verbosity and pipe it through the right logger."""
- def __init__(self, logger):
+ def __init__(self):
yum.rpmtrans.RPMBaseCallback.__init__(self)
# we want to log events to *both* the Bcfg2 logger (which goes
# to stderr or syslog or wherever the user wants it to go)
# *and* the yum file logger, which will go to yum.log (ticket
# #1103)
- self.bcfg2_logger = logger
+ self.bcfg2_logger = logging.getLogger(self.__class__.__name__)
self.state = None
self.package = None
@@ -110,13 +112,41 @@ class YumDisplay(yum.callbacks.ProcessTransBaseCallback):
"""Class to handle display of what step we are in the Yum transaction
such as downloading packages, etc."""
- def __init__(self, logger):
+ def __init__(self):
yum.callbacks.ProcessTransBaseCallback.__init__(self)
- self.logger = logger
+ self.logger = logging.getLogger(self.__class__.__name__)
class YUM(Bcfg2.Client.Tools.PkgTool):
"""Support for Yum packages."""
+
+ options = Bcfg2.Client.Tools.PkgTool.options + [
+ Bcfg2.Options.BooleanOption(
+ cf=('YUM', 'pkg_checks'), default=True, dest="yum_pkg_checks",
+ help="Perform YUM package checks"),
+ Bcfg2.Options.BooleanOption(
+ cf=('YUM', 'pkg_verify'), default=True, dest="yum_pkg_verify",
+ help="Perform YUM package verify"),
+ Bcfg2.Options.BooleanOption(
+ cf=('YUM', 'install_missing'), default=True,
+ dest="yum_install_missing",
+ help="Install missing packages"),
+ Bcfg2.Options.Option(
+ cf=('YUM', 'erase_flags'), default=["allmatches"],
+ dest="yum_erase_flags",
+ help="YUM erase flags"),
+ Bcfg2.Options.BooleanOption(
+ cf=('YUM', 'fix_version'), default=True,
+ dest="yum_fix_version",
+ help="Fix (upgrade or downgrade) packages with the wrong version"),
+ Bcfg2.Options.BooleanOption(
+ cf=('YUM', 'reinstall_broken'), default=True,
+ dest="yum_reinstall_broken",
+ help="Reinstall packages that fail to verify"),
+ Bcfg2.Options.Option(
+ cf=('YUM', 'verify_flags'), default=[], dest="yum_verify_flags",
+ help="YUM verify flags")]
+
pkgtype = 'yum'
__execs__ = []
__handles__ = [('Package', 'yum'),
@@ -126,11 +156,11 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
__req__ = {'Package': ['type'],
'Path': ['type']}
- conflicts = ['YUM24', 'RPM', 'RPMng', 'YUMng']
+ conflicts = ['RPM']
- def __init__(self, logger, setup, config):
- self.yumbase = self._loadYumBase(setup=setup, logger=logger)
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ self.yumbase = self._loadYumBase()
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
self.ignores = []
for struct in config:
self.ignores.extend([entry.get('name')
@@ -171,47 +201,41 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
else:
dest[pname] = dict(data)
- # Process the Yum section from the config file. These are all
- # boolean flags, either we do stuff or we don't
- self.pkg_checks = self.setup["yum_pkg_checks"]
- self.pkg_verify = self.setup["yum_pkg_verify"]
- self.do_install = self.setup["yum_installed_action"] == "install"
- self.do_upgrade = self.setup["yum_version_fail_action"] == "upgrade"
- self.do_reinst = self.setup["yum_verify_fail_action"] == "reinstall"
- self.verify_flags = self.setup["yum_verify_flags"]
-
self.installonlypkgs = self.yumbase.conf.installonlypkgs
if 'gpg-pubkey' not in self.installonlypkgs:
self.installonlypkgs.append('gpg-pubkey')
- self.logger.debug("Yum: Install missing: %s" % self.do_install)
- self.logger.debug("Yum: pkg_checks: %s" % self.pkg_checks)
- self.logger.debug("Yum: pkg_verify: %s" % self.pkg_verify)
- self.logger.debug("Yum: Upgrade on version fail: %s" % self.do_upgrade)
- self.logger.debug("Yum: Reinstall on verify fail: %s" % self.do_reinst)
+ self.logger.debug("Yum: Install missing: %s" %
+ Bcfg2.Options.setup.yum_install_missing)
+ self.logger.debug("Yum: pkg_checks: %s" %
+ Bcfg2.Options.setup.yum_pkg_checks)
+ self.logger.debug("Yum: pkg_verify: %s" %
+ Bcfg2.Options.setup.yum_pkg_verify)
+ self.logger.debug("Yum: Upgrade on version fail: %s" %
+ Bcfg2.Options.setup.yum_fix_version)
+ self.logger.debug("Yum: Reinstall on verify fail: %s" %
+ Bcfg2.Options.setup.yum_reinstall_broken)
self.logger.debug("Yum: installonlypkgs: %s" % self.installonlypkgs)
- self.logger.debug("Yum: verify_flags: %s" % self.verify_flags)
+ self.logger.debug("Yum: verify_flags: %s" %
+ Bcfg2.Options.setup.yum_verify_flags)
- def _loadYumBase(self, setup=None, logger=None):
+ def _loadYumBase(self):
''' this may be called before PkgTool.__init__() is called on
this object (when the YUM object is first instantiated;
PkgTool.__init__() calls RefreshPackages(), which requires a
YumBase object already exist), or after __init__() has
completed, when we reload the yum config before installing
- packages. Consequently, we support both methods by allowing
- setup and logger, the only object properties we use in this
- function, to be passed as keyword arguments or to be omitted
- and drawn from the object itself.'''
+ packages. '''
rv = yum.YumBase() # pylint: disable=C0103
- if setup is None:
- setup = self.setup
- if logger is None:
+ if hasattr(self, "logger"):
logger = self.logger
+ else:
+ logger = logging.getLogger(self.name)
- if setup['debug']:
+ if Bcfg2.Options.setup.debug:
debuglevel = 3
- elif setup['verbose']:
+ elif Bcfg2.Options.setup.verbose:
debuglevel = 2
else:
debuglevel = 0
@@ -242,7 +266,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
to the newest available """
# old style entry; synthesize Instances from current installed
if (entry.get('name') not in self.yum_installed and
- entry.get('name') not in self.yum_avail):
+ entry.get('name') not in self.yum_avail):
# new entry; fall back to default
entry.set('version', 'any')
else:
@@ -296,7 +320,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
missing = Bcfg2.Client.Tools.PkgTool.missing_attrs(self, entry)
if (entry.get('name', None) is None and
- entry.get('group', None) is None):
+ entry.get('group', None) is None):
missing += ['name', 'group']
return missing
@@ -311,7 +335,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
using. Disabling file checksums is a new feature yum
3.2.17-ish """
try:
- return pkg.verify(fast=self.setup.get('quick', False))
+ return pkg.verify(fast=Bcfg2.Options.setup.quick)
except TypeError:
# Older Yum API
return pkg.verify()
@@ -436,9 +460,9 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
package_fail = False
qtext_versions = []
virt_pkg = False
- pkg_checks = (self.pkg_checks and
+ pkg_checks = (Bcfg2.Options.setup.yum_pkg_checks and
entry.get('pkg_checks', 'true').lower() == 'true')
- pkg_verify = (self.pkg_verify and
+ pkg_verify = (Bcfg2.Options.setup.yum_pkg_verify and
entry.get('pkg_verify', 'true').lower() == 'true')
yum_group = False
@@ -531,7 +555,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
inst.get('verify_flags').lower().replace(' ',
',').split(',')
else:
- verify_flags = self.verify_flags
+ verify_flags = Bcfg2.Options.setup.yum_verify_flags
if 'arch' in nevra:
# If arch is specified use it to select the package
@@ -619,7 +643,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
qtext_versions.append("U(%s)" % str(all_pkg_objs[0]))
continue
- if self.setup.get('quick', False):
+ if Bcfg2.Options.setup.quick:
# Passed -q on the command line
continue
if not (pkg_verify and
@@ -693,7 +717,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
install_only = False
if virt_pkg or \
- (install_only and not self.setup['kevlar']) or \
+ (install_only and not Bcfg2.Options.setup.kevlar) or \
yum_group:
# virtual capability supplied, we are probably dealing
# with multiple packages of different names. This check
@@ -816,8 +840,8 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
self.yumbase.closeRpmDB()
self.RefreshPackages()
- rpm_display = RPMDisplay(self.logger)
- yum_display = YumDisplay(self.logger)
+ rpm_display = RPMDisplay()
+ yum_display = YumDisplay()
# Run the Yum Transaction
try:
rescode, restring = self.yumbase.buildTransaction()
@@ -866,7 +890,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
cleanup()
- def Install(self, packages, states): # pylint: disable=R0912,R0914
+ def Install(self, packages): # pylint: disable=R0912,R0914
""" Try and fix everything that Yum.VerifyPackages() found
wrong for each Package Entry. This can result in individual
RPMs being installed (for the first time), deleted, downgraded
@@ -884,6 +908,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
entry is set to True. """
self.logger.debug('Running Yum.Install()')
+ states = dict()
install_pkgs = []
gpg_keys = []
upgrade_pkgs = []
@@ -900,8 +925,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
# Remove extra instances.
# Can not reverify because we don't have a package entry.
if self.extra_instances is not None and len(self.extra_instances) > 0:
- if (self.setup.get('remove') == 'all' or
- self.setup.get('remove') == 'packages'):
+ if Bcfg2.Options.setup.remove in ['all', 'packages']:
self.Remove(self.extra_instances)
else:
self.logger.info("The following extra package instances will "
@@ -926,11 +950,14 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
nevra2string(build_yname(pkg.get('name'), inst)))
continue
status = self.instance_status[inst]
- if not status.get('installed', False) and self.do_install:
+ if (not status.get('installed', False) and
+ Bcfg2.Options.setup.yum_install_missing):
queue_pkg(pkg, inst, install_pkgs)
- elif status.get('version_fail', False) and self.do_upgrade:
+ elif (status.get('version_fail', False) and
+ Bcfg2.Options.setup.yum_fix_version):
queue_pkg(pkg, inst, upgrade_pkgs)
- elif status.get('verify_fail', False) and self.do_reinst:
+ elif (status.get('verify_fail', False) and
+ Bcfg2.Options.setup.yum_reinstall_broken):
queue_pkg(pkg, inst, reinstall_pkgs)
else:
# Either there was no Install/Version/Verify
@@ -1006,7 +1033,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
self._runYumTransaction()
- if not self.setup['kevlar']:
+ if not Bcfg2.Options.setup.kevlar:
for pkg_entry in [p for p in packages if self.canVerify(p)]:
self.logger.debug("Reverifying Failed Package %s" %
pkg_entry.get('name'))
@@ -1014,8 +1041,8 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
self.VerifyPackage(pkg_entry,
self.modlists.get(pkg_entry, []))
- for entry in [ent for ent in packages if states[ent]]:
- self.modified.append(entry)
+ self.modified.extend(ent for ent in packages if states[ent])
+ return states
def Remove(self, packages):
"""
diff --git a/src/lib/Bcfg2/Client/Tools/YUM24.py b/src/lib/Bcfg2/Client/Tools/YUM24.py
deleted file mode 100644
index d78127ddd..000000000
--- a/src/lib/Bcfg2/Client/Tools/YUM24.py
+++ /dev/null
@@ -1,399 +0,0 @@
-"""This provides bcfg2 support for yum."""
-
-import copy
-import os.path
-import sys
-import yum
-import Bcfg2.Client.XML
-from Bcfg2.Client.Tools.RPM import RPM
-
-
-def build_yname(pkgname, inst):
- """Build yum appropriate package name."""
- ypname = pkgname
- if inst.get('version') != 'any':
- ypname += '-'
- if inst.get('epoch', False):
- ypname += "%s:" % inst.get('epoch')
- if inst.get('version', False) and inst.get('version') != 'any':
- ypname += "%s" % (inst.get('version'))
- if inst.get('release', False) and inst.get('release') != 'any':
- ypname += "-%s" % (inst.get('release'))
- if inst.get('arch', False) and inst.get('arch') != 'any':
- ypname += ".%s" % (inst.get('arch'))
- return ypname
-
-
-class YUM24(RPM):
- """Support for Yum packages."""
- pkgtype = 'yum'
- deprecated = True
- __execs__ = ['/usr/bin/yum', '/var/lib/rpm']
- __handles__ = [('Package', 'yum'),
- ('Package', 'rpm'),
- ('Path', 'ignore')]
-
- __req__ = {'Package': ['name', 'version']}
- __ireq__ = {'Package': ['name']}
- #__ireq__ = {'Package': ['name', 'version']}
-
- __new_req__ = {'Package': ['name'],
- 'Instance': ['version', 'release', 'arch']}
- __new_ireq__ = {'Package': ['name'], \
- 'Instance': []}
- #__new_ireq__ = {'Package': ['name', 'uri'], \
- # 'Instance': ['simplefile', 'version', 'release', 'arch']}
-
- __gpg_req__ = {'Package': ['name', 'version']}
- __gpg_ireq__ = {'Package': ['name', 'version']}
-
- __new_gpg_req__ = {'Package': ['name'],
- 'Instance': ['version', 'release']}
- __new_gpg_ireq__ = {'Package': ['name'],
- 'Instance': ['version', 'release']}
-
- def __init__(self, logger, setup, config):
- RPM.__init__(self, logger, setup, config)
- self.__important__ = self.__important__ + \
- [entry.get('name') for struct in config \
- for entry in struct \
- if entry.tag in ['Path', 'ConfigFile'] and \
- (entry.get('name').startswith('/etc/yum.d') \
- or entry.get('name').startswith('/etc/yum.repos.d')) \
- or entry.get('name') == '/etc/yum.conf']
- self.autodep = setup.get("yum24_autodep")
- self.yum_avail = dict()
- self.yum_installed = dict()
- self.yb = yum.YumBase()
- self.yb.doConfigSetup()
- self.yb.doTsSetup()
- self.yb.doRpmDBSetup()
- yup = self.yb.doPackageLists(pkgnarrow='updates')
- if hasattr(self.yb.rpmdb, 'pkglist'):
- yinst = self.yb.rpmdb.pkglist
- else:
- yinst = self.yb.rpmdb.getPkgList()
- for dest, source in [(self.yum_avail, yup.updates),
- (self.yum_installed, yinst)]:
- for pkg in source:
- if dest is self.yum_avail:
- pname = pkg.name
- data = {pkg.arch: (pkg.epoch, pkg.version, pkg.release)}
- else:
- pname = pkg[0]
- if pkg[1] is None:
- a = 'noarch'
- else:
- a = pkg[1]
- if pkg[2] is None:
- e = '0'
- else:
- e = pkg[2]
- data = {a: (e, pkg[3], pkg[4])}
- if pname in dest:
- dest[pname].update(data)
- else:
- dest[pname] = dict(data)
-
- def VerifyPackage(self, entry, modlist):
- pinned_version = None
- if entry.get('version', False) == 'auto':
- # old style entry; synthesize Instances from current installed
- if entry.get('name') not in self.yum_installed and \
- entry.get('name') not in self.yum_avail:
- # new entry; fall back to default
- entry.set('version', 'any')
- else:
- data = copy.copy(self.yum_installed[entry.get('name')])
- if entry.get('name') in self.yum_avail:
- # installed but out of date
- data.update(self.yum_avail[entry.get('name')])
- for (arch, (epoch, vers, rel)) in list(data.items()):
- x = Bcfg2.Client.XML.SubElement(entry, "Instance",
- name=entry.get('name'),
- version=vers, arch=arch,
- release=rel, epoch=epoch)
- if 'verify_flags' in entry.attrib:
- x.set('verify_flags', entry.get('verify_flags'))
- if 'verify' in entry.attrib:
- x.set('verify', entry.get('verify'))
-
- if entry.get('type', False) == 'yum':
- # Check for virtual provides or packages. If we don't have
- # this package use Yum to resolve it to a real package name
- knownPkgs = list(self.yum_installed.keys()) + list(self.yum_avail.keys())
- if entry.get('name') not in knownPkgs:
- # If the package name matches something installed
- # or available the that's the correct package.
- try:
- pkgDict = dict([(i.name, i) for i in \
- self.yb.returnPackagesByDep(entry.get('name'))])
- except yum.Errors.YumBaseError:
- e = sys.exc_info()[1]
- self.logger.error('Yum Error Depsolving for %s: %s' % \
- (entry.get('name'), str(e)))
- pkgDict = {}
-
- if len(pkgDict) > 1:
- # What do we do with multiple packages?
- s = "YUM24: returnPackagesByDep(%s) returned many packages"
- self.logger.info(s % entry.get('name'))
- s = "YUM24: matching packages: %s"
- self.logger.info(s % str(list(pkgDict.keys())))
- pkgs = set(pkgDict.keys()) & set(self.yum_installed.keys())
- if len(pkgs) > 0:
- # Virtual packages matches an installed real package
- pkg = pkgDict[pkgs.pop()]
- s = "YUM24: chosing: %s" % pkg.name
- self.logger.info(s)
- else:
- # What's the right package? This will fail verify
- # and Yum should Do The Right Thing on package install
- pkg = None
- elif len(pkgDict) == 1:
- pkg = list(pkgDict.values())[0]
- else: # len(pkgDict) == 0
- s = "YUM24: returnPackagesByDep(%s) returned no results"
- self.logger.info(s % entry.get('name'))
- pkg = None
-
- if pkg is not None:
- s = "YUM24: remapping virtual package %s to %s"
- self.logger.info(s % (entry.get('name'), pkg.name))
- entry.set('name', pkg.name)
-
- return RPM.VerifyPackage(self, entry, modlist)
-
- def Install(self, packages, states):
- """
- Try and fix everything that YUM24.VerifyPackages() found wrong for
- each Package Entry. This can result in individual RPMs being
- installed (for the first time), deleted, downgraded
- or upgraded.
-
- NOTE: YUM can not reinstall a package that it thinks is already
- installed.
-
- packages is a list of Package Elements that has
- states[<Package Element>] == False
-
- The following effects occur:
- - states{} is conditionally updated for each package.
- - self.installed{} is rebuilt, possibly multiple times.
- - self.instance_status{} is conditionally updated for each instance
- of a package.
- - Each package will be added to self.modified[] if its states{}
- entry is set to True.
-
- """
- self.logger.info('Running YUM24.Install()')
-
- install_pkgs = []
- gpg_keys = []
- upgrade_pkgs = []
-
- # Remove extra instances.
- # Can not reverify because we don't have a package entry.
- if len(self.extra_instances) > 0:
- if (self.setup.get('remove') == 'all' or \
- self.setup.get('remove') == 'packages'):
- self.Remove(self.extra_instances)
- else:
- self.logger.info("The following extra package instances will be removed by the '-r' option:")
- for pkg in self.extra_instances:
- for inst in pkg:
- self.logger.info(" %s %s" % \
- ((pkg.get('name'), self.str_evra(inst))))
-
- # Figure out which instances of the packages actually need something
- # doing to them and place in the appropriate work 'queue'.
- for pkg in packages:
- insts = [pinst for pinst in pkg \
- if pinst.tag in ['Instance', 'Package']]
- if insts:
- for inst in insts:
- if self.FixInstance(inst, self.instance_status[inst]):
- if self.instance_status[inst].get('installed', False) \
- == False:
- if pkg.get('name') == 'gpg-pubkey':
- gpg_keys.append(inst)
- else:
- install_pkgs.append(inst)
- elif self.instance_status[inst].get('version_fail', \
- False) == True:
- upgrade_pkgs.append(inst)
- else:
- install_pkgs.append(pkg)
-
- # Install GPG keys.
- # Alternatively specify the required keys using 'gpgkey' in the
- # repository definition in yum.conf. YUM will install the keys
- # automatically.
- if len(gpg_keys) > 0:
- for inst in gpg_keys:
- self.logger.info("Installing GPG keys.")
- if inst.get('simplefile') is None:
- self.logger.error("GPG key has no simplefile attribute")
- continue
- key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile'))
- if self.cmd.run("rpm --import %s" % key_arg).success:
- self.logger.debug("Unable to install %s-%s" % \
- (self.instance_status[inst].get('pkg').get('name'), \
- self.str_evra(inst)))
- else:
- self.logger.debug("Installed %s-%s-%s" % \
- (self.instance_status[inst].get('pkg').get('name'), \
- inst.get('version'), inst.get('release')))
- self.RefreshPackages()
- self.gpg_keyids = self.getinstalledgpg()
- pkg = self.instance_status[gpg_keys[0]].get('pkg')
- states[pkg] = self.VerifyPackage(pkg, [])
-
- # Install packages.
- if len(install_pkgs) > 0:
- self.logger.info("Attempting to install packages")
-
- if self.autodep:
- pkgtool = "/usr/bin/yum -d0 -y install %s"
- else:
- pkgtool = "/usr/bin/yum -d0 install %s"
-
- install_args = []
- for inst in install_pkgs:
- pkg_arg = self.instance_status[inst].get('pkg').get('name')
- install_args.append(build_yname(pkg_arg, inst))
-
- if self.cmd.run(pkgtool % " ".join(install_args)).success:
- # The yum command succeeded. All packages installed.
- self.logger.info("Single Pass for Install Succeeded")
- self.RefreshPackages()
- else:
- # The yum command failed. No packages installed.
- # Try installing instances individually.
- self.logger.error("Single Pass Install of Packages Failed")
- installed_instances = []
- for inst in install_pkgs:
- pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
-
- if self.cmd.run(pkgtool % pkg_arg).success:
- installed_instances.append(inst)
- else:
- self.logger.debug("%s %s would not install." %
- (self.instance_status[inst].get('pkg').get('name'),
- self.str_evra(inst)))
- self.RefreshPackages()
-
- # Fix upgradeable packages.
- if len(upgrade_pkgs) > 0:
- self.logger.info("Attempting to upgrade packages")
-
- if self.autodep:
- pkgtool = "/usr/bin/yum -d0 -y update %s"
- else:
- pkgtool = "/usr/bin/yum -d0 update %s"
-
- upgrade_args = []
- for inst in upgrade_pkgs:
- pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
- upgrade_args.append(pkg_arg)
-
- if self.cmd.run(pkgtool % " ".join(upgrade_args)).success:
- # The yum command succeeded. All packages installed.
- self.logger.info("Single Pass for Install Succeeded")
- self.RefreshPackages()
- else:
- # The yum command failed. No packages installed.
- # Try installing instances individually.
- self.logger.error("Single Pass Install of Packages Failed")
- installed_instances = []
- for inst in upgrade_pkgs:
- pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
- if self.cmd.run(pkgtool % pkg_arg).success:
- installed_instances.append(inst)
- else:
- self.logger.debug("%s %s would not install." % \
- (self.instance_status[inst].get('pkg').get('name'), \
- self.str_evra(inst)))
-
- self.RefreshPackages()
-
- if not self.setup['kevlar']:
- for pkg_entry in [p for p in packages if self.canVerify(p)]:
- self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name')))
- states[pkg_entry] = self.VerifyPackage(pkg_entry, \
- self.modlists.get(pkg_entry, []))
-
- for entry in [ent for ent in packages if states[ent]]:
- self.modified.append(entry)
-
- def Remove(self, packages):
- """
- Remove specified entries.
-
- packages is a list of Package Entries with Instances generated
- by FindExtra().
- """
- self.logger.debug('Running YUM24.Remove()')
-
- if self.autodep:
- pkgtool = "/usr/bin/yum -d0 -y erase %s"
- else:
- pkgtool = "/usr/bin/yum -d0 erase %s"
-
- erase_args = []
- for pkg in packages:
- for inst in pkg:
- if pkg.get('name') != 'gpg-pubkey':
- pkg_arg = pkg.get('name') + '-'
- if inst.get('epoch', False):
- pkg_arg = pkg_arg + inst.get('epoch') + ':'
- pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release')
- if inst.get('arch', False):
- pkg_arg = pkg_arg + '.' + inst.get('arch')
- erase_args.append(pkg_arg)
- else:
- pkgspec = {'name': pkg.get('name'),
- 'version': inst.get('version'),
- 'release': inst.get('release')}
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkgspec.get('name'), self.str_evra(pkgspec)))
- self.logger.info(" This package will be deleted in a future version of the YUM24 driver.")
-
- rv = self.cmd.run(pkgtool % " ".join(erase_args))
- if rv.success:
- self.modified += packages
- for pkg in erase_args:
- self.logger.info("Deleted %s" % (pkg))
- else:
- self.logger.info("Bulk erase failed with errors:")
- self.logger.debug("Erase results: %s" % rv.error)
- self.logger.info("Attempting individual erase for each package.")
- for pkg in packages:
- pkg_modified = False
- for inst in pkg:
- if pkg.get('name') != 'gpg-pubkey':
- pkg_arg = pkg.get('name') + '-'
- if 'epoch' in inst.attrib:
- pkg_arg = pkg_arg + inst.get('epoch') + ':'
- pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release')
- if 'arch' in inst.attrib:
- pkg_arg = pkg_arg + '.' + inst.get('arch')
- else:
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkg.get('name'), self.str_evra(pkg)))
- self.logger.info(" This package will be deleted in a future version of the YUM24 driver.")
- continue
-
- rv = self.cmd.run(self.pkgtool % pkg_arg)
- if rv.success:
- pkg_modified = True
- self.logger.info("Deleted %s" % pkg_arg)
- else:
- self.logger.error("Unable to delete %s" % pkg_arg)
- self.logger.debug("Failure: %s" % rv.error)
- if pkg_modified == True:
- self.modified.append(pkg)
-
- self.RefreshPackages()
- self.extra = self.FindExtra()
diff --git a/src/lib/Bcfg2/Client/Tools/YUMng.py b/src/lib/Bcfg2/Client/Tools/YUMng.py
deleted file mode 100644
index 22fbba537..000000000
--- a/src/lib/Bcfg2/Client/Tools/YUMng.py
+++ /dev/null
@@ -1,9 +0,0 @@
-""" YUM driver called 'YUMng' for backwards compat """
-
-from Bcfg2.Client.Tools.YUM import YUM
-
-
-class YUMng(YUM):
- """ YUM driver called 'YUMng' for backwards compat """
- deprecated = True
- conflicts = ['YUM24', 'RPM', 'RPMng']
diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py
index 703b8ff57..cd294db98 100644
--- a/src/lib/Bcfg2/Client/Tools/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/__init__.py
@@ -3,21 +3,11 @@
import os
import sys
import stat
+import logging
+import Bcfg2.Options
import Bcfg2.Client
import Bcfg2.Client.XML
from Bcfg2.Utils import Executor, ClassName
-from Bcfg2.Compat import walk_packages # pylint: disable=W0622
-
-__all__ = [m[1] for m in walk_packages(path=__path__)]
-
-# pylint: disable=C0103
-#: All available tools
-drivers = [item for item in __all__ if item not in ['rpmtools']]
-
-#: The default set of tools that will be used if "drivers" is not set
-#: in bcfg2.conf
-default = drivers[:]
-# pylint: enable=C0103
class ToolInstantiationError(Exception):
@@ -35,6 +25,12 @@ class Tool(object):
.. autoattribute:: Bcfg2.Client.Tools.Tool.__important__
"""
+ options = [
+ Bcfg2.Options.Option(
+ cf=('client', 'command_timeout'),
+ help="Timeout when running external commands other than probes",
+ type=Bcfg2.Options.Types.timeout)]
+
#: The name of the tool. By default this uses
#: :class:`Bcfg2.Client.Tools.ClassName` to ensure that it is the
#: same as the name of the class.
@@ -78,30 +74,22 @@ class Tool(object):
#: runtime with a warning.
conflicts = []
- def __init__(self, logger, setup, config):
+ def __init__(self, config):
"""
- :param logger: Logger that will be used for logging by this tool
- :type logger: logging.Logger
- :param setup: The option set Bcfg2 was invoked with
- :type setup: Bcfg2.Options.OptionParser
:param config: The XML configuration for this client
:type config: lxml.etree._Element
:raises: :exc:`Bcfg2.Client.Tools.ToolInstantiationError`
"""
- #: A :class:`Bcfg2.Options.OptionParser` object describing the
- #: option set Bcfg2 was invoked with
- self.setup = setup
-
#: A :class:`logging.Logger` object that will be used by this
#: tool for logging
- self.logger = logger
+ self.logger = logging.getLogger(self.name)
#: The XML configuration for this client
self.config = config
#: An :class:`Bcfg2.Utils.Executor` object for
#: running external commands.
- self.cmd = Executor(timeout=self.setup['command_timeout'])
+ self.cmd = Executor(timeout=Bcfg2.Options.setup.command_timeout)
#: A list of entries that have been modified by this tool
self.modified = []
@@ -122,7 +110,7 @@ class Tool(object):
for struct in self.config:
for entry in struct:
if (entry.tag == 'Path' and
- entry.get('important', 'false').lower() == 'true'):
+ entry.get('important', 'false').lower() == 'true'):
self.__important__.append(entry.get('name'))
self.handled = self.getSupportedEntries()
@@ -141,27 +129,27 @@ class Tool(object):
raise ToolInstantiationError("%s: %s not executable" %
(self.name, filename))
- def BundleUpdated(self, bundle, states): # pylint: disable=W0613
+ def BundleUpdated(self, bundle): # pylint: disable=W0613
""" Callback that is invoked when a bundle has been updated.
:param bundle: The bundle that has been updated
:type bundle: lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
- return
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Client.states`
+ """
+ return dict()
- def BundleNotUpdated(self, bundle, states): # pylint: disable=W0613
+ def BundleNotUpdated(self, bundle): # pylint: disable=W0613
""" Callback that is invoked when a bundle has been updated.
:param bundle: The bundle that has been updated
:type bundle: lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
- return
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Client.states`
+ """
+ return dict()
- def Inventory(self, states, structures=None):
+ def Inventory(self, structures=None):
""" Take an inventory of the system as it exists. This
involves two steps:
@@ -176,18 +164,19 @@ class Tool(object):
is the entry tag. E.g., a Path entry would be verified by
calling :func:`VerifyPath`.
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
:param structures: The list of structures (i.e., bundles) to
get entries from. If this is not given,
all children of
:attr:`Bcfg2.Client.Tools.Tool.config` will
be used.
:type structures: list of lxml.etree._Element
- :returns: None """
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Client.states`
+ """
if not structures:
structures = self.config.getchildren()
mods = self.buildModlist()
+ states = dict()
for struct in structures:
for entry in struct.getchildren():
if self.canVerify(entry):
@@ -205,8 +194,9 @@ class Tool(object):
self.primarykey(entry)),
exc_info=1)
self.extra = self.FindExtra()
+ return states
- def Install(self, entries, states):
+ def Install(self, entries):
""" Install entries. 'Install' in this sense means either
initially install, or update as necessary to match the
specification.
@@ -218,9 +208,10 @@ class Tool(object):
:param entries: The entries to install
:type entries: list of lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Client.states`
+ """
+ states = dict()
for entry in entries:
try:
func = getattr(self, "Install%s" % entry.tag)
@@ -236,6 +227,7 @@ class Tool(object):
self.logger.error("%s: Unexpected failure installing %s" %
(self.name, self.primarykey(entry)),
exc_info=1)
+ return states
def Remove(self, entries):
""" Remove specified extra entries.
@@ -396,8 +388,8 @@ class PkgTool(Tool):
#: The ``type`` attribute of Packages handled by this tool.
pkgtype = 'echo'
- def __init__(self, logger, setup, config):
- Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Tool.__init__(self, config)
#: A dict of installed packages; the keys should be package
#: names and the values should be simple strings giving the
@@ -434,32 +426,27 @@ class PkgTool(Tool):
for pkg in packages)
return self.pkgtool[0] % pkgargs
- def Install(self, packages, states):
+ def Install(self, packages):
""" Run a one-pass install where all required packages are
installed with a single command, followed by single package
installs in case of failure.
:param entries: The entries to install
:type entries: list of lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Client.states`
+ """
self.logger.info("Trying single pass package install for pkgtype %s" %
self.pkgtype)
- pkgcmd = self._get_package_command(packages)
- self.logger.debug("Running command: %s" % pkgcmd)
- if self.cmd.run(pkgcmd):
+ states = dict()
+ if self.cmd.run(self._get_package_command(packages)):
self.logger.info("Single Pass Succeded")
# set all package states to true and flush workqueues
- pkgnames = [pkg.get('name') for pkg in packages]
- for entry in list(states.keys()):
- if (entry.tag == 'Package'
- and entry.get('type') == self.pkgtype
- and entry.get('name') in pkgnames):
- self.logger.debug('Setting state to true for pkg %s' %
- entry.get('name'))
- states[entry] = True
+ for entry in packages:
+ self.logger.debug('Setting state to true for %s' %
+ self.primarykey(entry))
+ states[entry] = True
self.RefreshPackages()
else:
self.logger.error("Single Pass Failed")
@@ -477,10 +464,13 @@ class PkgTool(Tool):
if self.cmd.run(self._get_package_command([pkg])):
states[pkg] = True
else:
+ states[pkg] = False
self.logger.error("Failed to install package %s" %
pkg.get('name'))
self.RefreshPackages()
- self.modified.extend(entry for entry in packages if states[entry])
+ self.modified.extend(entry for entry in packages
+ if entry in states and states[entry])
+ return states
def RefreshPackages(self):
""" Refresh the internal representation of the package
@@ -502,8 +492,14 @@ class PkgTool(Tool):
class SvcTool(Tool):
""" Base class for tools that handle Service entries """
- def __init__(self, logger, setup, config):
- Tool.__init__(self, logger, setup, config)
+ options = Tool.options + [
+ Bcfg2.Options.Option(
+ '-s', '--service-mode', default='default',
+ choices=['default', 'disabled', 'build'],
+ help='Set client service mode')]
+
+ def __init__(self, config):
+ Tool.__init__(self, config)
#: List of services that have been restarted
self.restarted = []
__init__.__doc__ = Tool.__init__.__doc__
@@ -580,14 +576,14 @@ class SvcTool(Tool):
return bool(self.cmd.run(self.get_svc_command(service, 'status')))
def Remove(self, services):
- if self.setup['servicemode'] != 'disabled':
+ if Bcfg2.Options.setup.service_mode != 'disabled':
for entry in services:
entry.set("status", "off")
self.InstallService(entry)
Remove.__doc__ = Tool.Remove.__doc__
- def BundleUpdated(self, bundle, states):
- if self.setup['servicemode'] == 'disabled':
+ def BundleUpdated(self, bundle):
+ if Bcfg2.Options.setup.service_mode == 'disabled':
return
for entry in bundle:
@@ -597,15 +593,16 @@ class SvcTool(Tool):
estatus = entry.get('status')
restart = entry.get("restart", "true").lower()
if (restart == "false" or estatus == 'ignore' or
- (restart == "interactive" and not self.setup['interactive'])):
+ (restart == "interactive" and
+ not Bcfg2.Options.setup.interactive)):
continue
success = False
if estatus == 'on':
- if self.setup['servicemode'] == 'build':
+ if Bcfg2.Options.setup.service_mode == 'build':
success = self.stop_service(entry)
elif entry.get('name') not in self.restarted:
- if self.setup['interactive']:
+ if Bcfg2.Options.setup.interactive:
if not Bcfg2.Client.prompt('Restart service %s? (y/N) '
% entry.get('name')):
continue
@@ -617,9 +614,10 @@ class SvcTool(Tool):
if not success:
self.logger.error("Failed to manipulate service %s" %
(entry.get('name')))
+ return dict()
BundleUpdated.__doc__ = Tool.BundleUpdated.__doc__
- def Install(self, entries, states):
+ def Install(self, entries):
install_entries = []
for entry in entries:
if entry.get('install', 'true').lower() == 'false':
@@ -627,7 +625,7 @@ class SvcTool(Tool):
(entry.tag, entry.get('name')))
else:
install_entries.append(entry)
- return Tool.Install(self, install_entries, states)
+ return Tool.Install(self, install_entries)
Install.__doc__ = Tool.Install.__doc__
def InstallService(self, entry):
diff --git a/src/lib/Bcfg2/Client/Tools/launchd.py b/src/lib/Bcfg2/Client/Tools/launchd.py
index b0661b26b..a4aeab6c7 100644
--- a/src/lib/Bcfg2/Client/Tools/launchd.py
+++ b/src/lib/Bcfg2/Client/Tools/launchd.py
@@ -12,8 +12,8 @@ class launchd(Bcfg2.Client.Tools.Tool): # pylint: disable=C0103
__execs__ = ['/bin/launchctl', '/usr/bin/defaults']
__req__ = {'Service': ['name', 'status']}
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
# Locate plist file that provides given reverse-fqdn name:
#
@@ -117,9 +117,11 @@ class launchd(Bcfg2.Client.Tools.Tool): # pylint: disable=C0103
status='on')
for name in allsrv]
- def BundleUpdated(self, bundle, states):
+ def BundleUpdated(self, bundle):
"""Reload launchd plist."""
- for entry in [entry for entry in bundle if self.handlesEntry(entry)]:
+ for entry in bundle:
+ if not self.handlesEntry(entry):
+ continue
if not self.canInstall(entry):
self.logger.error("Insufficient information to restart "
"service %s" % entry.get('name'))
diff --git a/src/lib/Bcfg2/Client/Tools/rpmtools.py b/src/lib/Bcfg2/Client/Tools/rpmtools.py
deleted file mode 100755
index 32a04262d..000000000
--- a/src/lib/Bcfg2/Client/Tools/rpmtools.py
+++ /dev/null
@@ -1,1091 +0,0 @@
-#!/usr/bin/env python
-"""
- Module that uses rpm-python to implement the following rpm
- functionality for the bcfg2 RPM and YUM client drivers:
-
- rpm -qa
- rpm --verify
- rpm --erase
-
- The code closely follows the rpm C code.
-
- The code was written to be used in the bcfg2 RPM/YUM drivers.
-
- Some command line options have been provided to assist with
- testing and development, but the output isn't pretty and looks
- nothing like rpm output.
-
- Run 'rpmtools' -h for the options.
-
-"""
-
-import grp
-import optparse
-import os
-import pwd
-import rpm
-import stat
-import sys
-if sys.version_info >= (2, 5):
- import hashlib
- py24compat = False
-else:
- # FIXME: Remove when client python dep is 2.5 or greater
- py24compat = True
- import md5
-
-# Determine what prelink tools we have available.
-# The isprelink module is a python extension that examines the ELF headers
-# to see if the file has been prelinked. If it is not present a lot of files
-# are unnecessarily run through the prelink command.
-try:
- from isprelink import *
- isprelink_imported = True
-except ImportError:
- isprelink_imported = False
-
-# If the prelink command is installed on the system then we need to do
-# prelink -y on files.
-if os.access('/usr/sbin/prelink', os.X_OK):
- prelink_exists = True
-else:
- prelink_exists = False
-
-# If we don't have isprelink then we will use the prelink configuration file to
-# filter what we have to put through prelink -y.
-import re
-blacklist = []
-whitelist = []
-try:
- f = open('/etc/prelink.conf', mode='r')
- for line in f:
- if line.startswith('#'):
- continue
- option, pattern = line.split()
- if pattern.startswith('*.'):
- pattern = pattern.replace('*.', '\.')
- pattern += '$'
- elif pattern.startswith('/'):
- pattern = '^' + pattern
- if option == '-b':
- blacklist.append(pattern)
- elif option == '-l':
- whitelist.append(pattern)
- f.close()
-except IOError:
- pass
-
-blacklist_re = re.compile('|'.join(blacklist))
-whitelist_re = re.compile('|'.join(whitelist))
-
-# Flags that are not defined in rpm-python.
-# They are defined in lib/rpmcli.h
-# Bit(s) for verifyFile() attributes.
-#
-RPMVERIFY_NONE = 0 # /*!< */
-RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */
-RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */
-RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */
-RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */
-RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */
-RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */
-RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */
-RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */
-RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */
-RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */
-RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */
-RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */
-RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */
-
-RPMVERIFY_FAILURES = \
- (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \
- RPMVERIFY_LGETFILECONFAIL)
-
-# Bit(s) to control rpm_verify() operation.
-#
-VERIFY_DEFAULT = 0, # /*!< */
-VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
-VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
-VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
-VERIFY_USER = 1 << 3 # /*!< from --nouser */
-VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
-VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
-VERIFY_MODE = 1 << 6 # /*!< from --nomode */
-VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
-# /* bits 8-14 unused, reserved for rpmVerifyAttrs */
-VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
-VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
-VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
-VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
-VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
-VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
-VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
-VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
-VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
-VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
-VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
-VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
-VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
-# /* bits 28-31 used in rpmVerifyAttrs */
-
-# Comes from C cource. lib/rpmcli.h
-VERIFY_ATTRS = \
- (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \
- VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
-
-VERIFY_ALL = \
- (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\
- VERIFY_SIGNATURE | VERIFY_HDRCHK)
-
-
-# Some masks for what checks to NOT do on these file types.
-# The C code actiually resets these up for every file.
-DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
- RPMVERIFY_LINKTO)
-
-# These file types all have the same mask, but hopefully this will make the
-# code more readable.
-FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS
-
-LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
- RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP)
-
-REG_FLAGS = ~(RPMVERIFY_LINKTO)
-
-
-def s_isdev(mode):
- """
- Check to see if a file is a device.
-
- """
- return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
-
-def rpmpackagelist(rts):
- """
- Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
- Requires rpmtransactionset() to be run first to get a ts.
- Returns a list of pkgspec dicts.
-
- e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' },
- {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ]
-
- """
- return [{'name':header[rpm.RPMTAG_NAME],
- 'epoch':header[rpm.RPMTAG_EPOCH],
- 'version':header[rpm.RPMTAG_VERSION],
- 'release':header[rpm.RPMTAG_RELEASE],
- 'arch':header[rpm.RPMTAG_ARCH],
- 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
- for header in rts.dbMatch()]
-
-def getindexbykeyword(index_ts, **kwargs):
- """
- Return list of indexs from the rpmdb matching keywords
- ex: getHeadersByKeyword(name='foo', version='1', release='1')
-
- Can be passed any structure that can be indexed by the pkgspec
- keyswords as other keys are filtered out.
-
- """
- lst = []
- name = kwargs.get('name')
- if name:
- index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name)
- else:
- index_mi = index_ts.dbMatch()
-
- if 'epoch' in kwargs:
- if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
- kwargs['epoch'] = int(kwargs['epoch'])
- else:
- del(kwargs['epoch'])
-
- keywords = [key for key in list(kwargs.keys()) \
- if key in ('name', 'epoch', 'version', 'release', 'arch')]
- keywords_len = len(keywords)
- for hdr in index_mi:
- match = 0
- for keyword in keywords:
- if hdr[keyword] == kwargs[keyword]:
- match += 1
- if match == keywords_len:
- lst.append(index_mi.instance())
- del index_mi
- return lst
-
-def getheadersbykeyword(header_ts, **kwargs):
- """
- Borrowed parts of this from from Yum. Need to fix it though.
- Epoch is not handled right.
-
- Return list of headers from the rpmdb matching keywords
- ex: getHeadersByKeyword(name='foo', version='1', release='1')
-
- Can be passed any structure that can be indexed by the pkgspec
- keyswords as other keys are filtered out.
-
- """
- lst = []
- name = kwargs.get('name')
- if name:
- header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name)
- else:
- header_mi = header_ts.dbMatch()
-
- if 'epoch' in kwargs:
- if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
- kwargs['epoch'] = int(kwargs['epoch'])
- else:
- del(kwargs['epoch'])
-
- keywords = [key for key in list(kwargs.keys()) \
- if key in ('name', 'epoch', 'version', 'release', 'arch')]
- keywords_len = len(keywords)
- for hdr in header_mi:
- match = 0
- for keyword in keywords:
- if hdr[keyword] == kwargs[keyword]:
- match += 1
- if match == keywords_len:
- lst.append(hdr)
- del header_mi
- return lst
-
-def prelink_md5_check(filename):
- """
- Checks if a file is prelinked. If it is run it through prelink -y
- to get the unprelinked md5 and file size.
-
- Return 0 if the file was not prelinked, otherwise return the file size.
- Always return the md5.
-
- """
- prelink = False
- try:
- plf = open(filename, "rb")
- except IOError:
- return False, 0
-
- if prelink_exists:
- if isprelink_imported:
- plfd = plf.fileno()
- if isprelink(plfd):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
- prelink = True
- elif whitelist_re.search(filename) and not blacklist_re.search(filename):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
- prelink = True
-
- fsize = 0
- if py24compat:
- chksum = md5.new()
- else:
- chksum = hashlib.md5()
- while 1:
- data = plf.read()
- if not data:
- break
- fsize += len(data)
- chksum.update(data)
- plf.close()
- file_md5 = chksum.hexdigest()
- if prelink:
- return file_md5, fsize
- else:
- return file_md5, 0
-
-def prelink_size_check(filename):
- """
- This check is only done if the prelink_md5_check() is not done first.
-
- Checks if a file is prelinked. If it is run it through prelink -y
- to get the unprelinked file size.
-
- Return 0 if the file was not prelinked, otherwise return the file size.
-
- """
- fsize = 0
- try:
- plf = open(filename, "rb")
- except IOError:
- return False
-
- if prelink_exists:
- if isprelink_imported:
- plfd = plf.fileno()
- if isprelink(plfd):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
-
- while 1:
- data = plf.read()
- if not data:
- break
- fsize += len(data)
-
- elif whitelist_re.search(filename) and not blacklist_re.search(filename):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
-
- while 1:
- data = plf.read()
- if not data:
- break
- fsize += len(data)
-
- plf.close()
-
- return fsize
-
-def debug_verify_flags(vflags):
- """
- Decodes the verify flags bits.
- """
- if vflags & RPMVERIFY_MD5:
- print('RPMVERIFY_MD5')
- if vflags & RPMVERIFY_FILESIZE:
- print('RPMVERIFY_FILESIZE')
- if vflags & RPMVERIFY_LINKTO:
- print('RPMVERIFY_LINKTO')
- if vflags & RPMVERIFY_USER:
- print('RPMVERIFY_USER')
- if vflags & RPMVERIFY_GROUP:
- print('RPMVERIFY_GROUP')
- if vflags & RPMVERIFY_MTIME:
- print('RPMVERIFY_MTIME')
- if vflags & RPMVERIFY_MODE:
- print('RPMVERIFY_MODE')
- if vflags & RPMVERIFY_RDEV:
- print('RPMVERIFY_RDEV')
- if vflags & RPMVERIFY_CONTEXTS:
- print('RPMVERIFY_CONTEXTS')
- if vflags & RPMVERIFY_READLINKFAIL:
- print('RPMVERIFY_READLINKFAIL')
- if vflags & RPMVERIFY_READFAIL:
- print('RPMVERIFY_READFAIL')
- if vflags & RPMVERIFY_LSTATFAIL:
- print('RPMVERIFY_LSTATFAIL')
- if vflags & RPMVERIFY_LGETFILECONFAIL:
- print('RPMVERIFY_LGETFILECONFAIL')
-
-def debug_file_flags(fflags):
- """
- Decodes the file flags bits.
- """
- if fflags & rpm.RPMFILE_CONFIG:
- print('rpm.RPMFILE_CONFIG')
-
- if fflags & rpm.RPMFILE_DOC:
- print('rpm.RPMFILE_DOC')
-
- if fflags & rpm.RPMFILE_ICON:
- print('rpm.RPMFILE_ICON')
-
- if fflags & rpm.RPMFILE_MISSINGOK:
- print('rpm.RPMFILE_MISSINGOK')
-
- if fflags & rpm.RPMFILE_NOREPLACE:
- print('rpm.RPMFILE_NOREPLACE')
-
- if fflags & rpm.RPMFILE_GHOST:
- print('rpm.RPMFILE_GHOST')
-
- if fflags & rpm.RPMFILE_LICENSE:
- print('rpm.RPMFILE_LICENSE')
-
- if fflags & rpm.RPMFILE_README:
- print('rpm.RPMFILE_README')
-
- if fflags & rpm.RPMFILE_EXCLUDE:
- print('rpm.RPMFILE_EXLUDE')
-
- if fflags & rpm.RPMFILE_UNPATCHED:
- print('rpm.RPMFILE_UNPATCHED')
-
- if fflags & rpm.RPMFILE_PUBKEY:
- print('rpm.RPMFILE_PUBKEY')
-
-def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
- """
- Verify all the files in a package.
-
- Returns a list of error flags, the file type and file name. The list
- entries are strings that are the same as the labels for the bitwise
- flags used in the C code.
-
- """
- (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \
- vflags, fuser, fgroup, fmd5) = fileinfo
-
- # 1. rpmtsRootDir stuff. What does it do and where to I get it from?
-
- file_results = []
- flags = vflags
-
- # Check to see if the file was installed - if not pretend all is ok.
- # This is what the rpm C code does!
- if fstate != rpm.RPMFILE_STATE_NORMAL:
- return file_results
-
- # Get the installed files stats
- try:
- lstat = os.lstat(fname)
- except OSError:
- if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)):
- file_results.append('RPMVERIFY_LSTATFAIL')
- #file_results.append(fname)
- return file_results
-
- # 5. Contexts? SELinux stuff?
-
- # Setup what checks to do. This is straight out of the C code.
- if stat.S_ISDIR(lstat.st_mode):
- flags &= DIR_FLAGS
- elif stat.S_ISLNK(lstat.st_mode):
- flags &= LINK_FLAGS
- elif stat.S_ISFIFO(lstat.st_mode):
- flags &= FIFO_FLAGS
- elif stat.S_ISCHR(lstat.st_mode):
- flags &= CHR_FLAGS
- elif stat.S_ISBLK(lstat.st_mode):
- flags &= BLK_FLAGS
- else:
- flags &= REG_FLAGS
-
- if (fflags & rpm.RPMFILE_GHOST):
- flags &= GHOST_FLAGS
-
- flags &= ~(omitmask | RPMVERIFY_FAILURES)
-
- # 8. SELinux stuff.
-
- prelink_size = 0
- if flags & RPMVERIFY_MD5:
- prelink_md5, prelink_size = prelink_md5_check(fname)
- if prelink_md5 == False:
- file_results.append('RPMVERIFY_MD5')
- file_results.append('RPMVERIFY_READFAIL')
- elif prelink_md5 != fmd5:
- file_results.append('RPMVERIFY_MD5')
-
- if flags & RPMVERIFY_LINKTO:
- linkto = os.readlink(fname)
- if not linkto:
- file_results.append('RPMVERIFY_READLINKFAIL')
- file_results.append('RPMVERIFY_LINKTO')
- else:
- if len(rpmlinktos) == 0 or linkto != rpmlinktos:
- file_results.append('RPMVERIFY_LINKTO')
-
- if flags & RPMVERIFY_FILESIZE:
- if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
- prelink_size = prelink_size_check(fname)
- if (prelink_size != 0): # This is a prelinked file.
- if (prelink_size != fsize):
- file_results.append('RPMVERIFY_FILESIZE')
- elif lstat.st_size != fsize: # It wasn't a prelinked file.
- file_results.append('RPMVERIFY_FILESIZE')
-
- if flags & RPMVERIFY_MODE:
- metamode = fmode
- filemode = lstat.st_mode
-
- # Comparing the type of %ghost files is meaningless, but perms are ok.
- if fflags & rpm.RPMFILE_GHOST:
- metamode &= ~0xf000
- filemode &= ~0xf000
-
- if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \
- (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)):
- file_results.append('RPMVERIFY_MODE')
-
- if flags & RPMVERIFY_RDEV:
- if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or
- stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
- file_results.append('RPMVERIFY_RDEV')
- elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
- st_rdev = lstat.st_rdev
- if frdev != st_rdev:
- file_results.append('RPMVERIFY_RDEV')
-
- if flags & RPMVERIFY_MTIME:
- if lstat.st_mtime != fmtime:
- file_results.append('RPMVERIFY_MTIME')
-
- if flags & RPMVERIFY_USER:
- try:
- user = pwd.getpwuid(lstat.st_uid)[0]
- except KeyError:
- user = None
- if not user or not fuser or (user != fuser):
- file_results.append('RPMVERIFY_USER')
-
- if flags & RPMVERIFY_GROUP:
- try:
- group = grp.getgrgid(lstat.st_gid)[0]
- except KeyError:
- group = None
- if not group or not fgroup or (group != fgroup):
- file_results.append('RPMVERIFY_GROUP')
-
- return file_results
-
-def rpm_verify_dependencies(header):
- """
- Check package dependencies. Header is an rpm.hdr.
-
- Don't like opening another ts to do this, but
- it was the only way I could find of clearing the ts
- out.
-
- Have asked on the rpm-maint list on how to do
- this the right way (28 Feb 2007).
-
- ts.check() returns:
-
- ((name, version, release), (reqname, reqversion), \
- flags, suggest, sense)
-
- """
- _ts1 = rpmtransactionset()
- _ts1.addInstall(header, 'Dep Check', 'i')
- dep_errors = _ts1.check()
- _ts1.closeDB()
- return dep_errors
-
-def rpm_verify_package(vp_ts, header, verify_options):
- """
- Verify a single package specified by header. Header is an rpm.hdr.
-
- If errors are found it returns a dictionary of errors.
-
- """
- # Set some transaction level flags.
- vsflags = 0
- if 'nodigest' in verify_options:
- vsflags |= rpm._RPMVSF_NODIGESTS
- if 'nosignature' in verify_options:
- vsflags |= rpm._RPMVSF_NOSIGNATURES
- ovsflags = vp_ts.setVSFlags(vsflags)
-
- # Map from the Python options to the rpm bitwise flags.
- omitmask = 0
-
- if 'nolinkto' in verify_options:
- omitmask |= VERIFY_LINKTO
- if 'nomd5' in verify_options:
- omitmask |= VERIFY_MD5
- if 'nosize' in verify_options:
- omitmask |= VERIFY_SIZE
- if 'nouser' in verify_options:
- omitmask |= VERIFY_USER
- if 'nogroup' in verify_options:
- omitmask |= VERIFY_GROUP
- if 'nomtime' in verify_options:
- omitmask |= VERIFY_MTIME
- if 'nomode' in verify_options:
- omitmask |= VERIFY_MODE
- if 'nordev' in verify_options:
- omitmask |= VERIFY_RDEV
-
- omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS)
-
- package_results = {}
-
- # Check Signatures and Digests.
- # No idea what this might return. Need to break something to see.
- # Setting the vsflags above determines what gets checked in the header.
- hdr_stat = vp_ts.hdrCheck(header.unload())
- if hdr_stat:
- package_results['hdr'] = hdr_stat
-
- # Check Package Depencies.
- if 'nodeps' not in verify_options:
- dep_stat = rpm_verify_dependencies(header)
- if dep_stat:
- package_results['deps'] = dep_stat
-
- # Check all the package files.
- if 'nofiles' not in verify_options:
- vp_fi = header.fiFromHeader()
- for fileinfo in vp_fi:
- # Do not bother doing anything with ghost files.
- # This is what RPM does.
- if fileinfo[4] & rpm.RPMFILE_GHOST:
- continue
-
- # This is only needed because of an inconsistency in the
- # rpm.fi interface.
- linktos = vp_fi.FLink()
-
- file_stat = rpm_verify_file(fileinfo, linktos, omitmask)
-
- #if len(file_stat) > 0 or options.verbose:
- if len(file_stat) > 0:
- fflags = fileinfo[4]
- if fflags & rpm.RPMFILE_CONFIG:
- file_stat.append('c')
- elif fflags & rpm.RPMFILE_DOC:
- file_stat.append('d')
- elif fflags & rpm.RPMFILE_GHOST:
- file_stat.append('g')
- elif fflags & rpm.RPMFILE_LICENSE:
- file_stat.append('l')
- elif fflags & rpm.RPMFILE_PUBKEY:
- file_stat.append('P')
- elif fflags & rpm.RPMFILE_README:
- file_stat.append('r')
- else:
- file_stat.append(' ')
-
- file_stat.append(fileinfo[0]) # The filename.
- package_results.setdefault('files', []).append(file_stat)
-
- # Run the verify script if there is one.
- # Do we want this?
- #if 'noscripts' not in verify_options:
- # script_stat = rpmVerifyscript()
- # if script_stat:
- # package_results['script'] = script_stat
-
- # If there have been any errors, add the package nevra to the result.
- if len(package_results) > 0:
- package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \
- header[rpm.RPMTAG_EPOCH], \
- header[rpm.RPMTAG_VERSION], \
- header[rpm.RPMTAG_RELEASE], \
- header[rpm.RPMTAG_ARCH]))
- else:
- package_results = None
-
- # Put things back the way we found them.
- vsflags = vp_ts.setVSFlags(ovsflags)
-
- return package_results
-
-def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
- """
- Requires rpmtransactionset() to be run first to get a ts.
-
- pkgspec is a dict specifying the package
- e.g.:
- For a single package
- { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
-
- For all packages
- {}
-
- Or any combination of keywords to select one or more packages to verify.
-
- options is a list of 'rpm --verify' options. Default is to check everything.
- e.g.:
- [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature',
- 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime',
- 'nomode', 'nordev' ]
-
- Returns a list. One list entry per package. Each list entry is a
- dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'.
- Entries only get added for the failures. If nothing failed, None is
- returned.
-
- Its all a bit messy and probably needs reviewing.
-
- [ { 'hdr': [???],
- 'deps: [((name, version, release), (reqname, reqversion),
- flags, suggest, sense), .... ]
- 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ],
- ['filename2', 'RPMVERFIY_LSTATFAIL']]
- 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] }
- { 'hdr': [???],
- 'deps: [((name, version, release), (reqname, reqversion),
- flags, suggest, sense), .... ]
- 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ],
- ['filename2', 'RPMVERFIY_LSTATFAIL']]
- 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ]
-
- """
- verify_results = []
- headers = getheadersbykeyword(verify_ts, **verify_pkgspec)
- for header in headers:
- result = rpm_verify_package(verify_ts, header, verify_options)
- if result:
- verify_results.append(result)
-
- return verify_results
-
-def rpmtransactionset():
- """
- A simple wrapper for rpm.TransactionSet() to keep everthiing together.
- Might use it to set some ts level flags later.
-
- """
- ts = rpm.TransactionSet()
- return ts
-
-class Rpmtscallback(object):
- """
- Callback for ts.run(). Used for adding, upgrading and removing packages.
- Starting with all possible reasons codes, but bcfg2 will probably only
- make use of a few of them.
-
- Mostly just printing stuff at the moment to understand how the callback
- is used.
-
- """
- def __init__(self):
- self.fdnos = {}
-
- def callback(self, reason, amount, total, key, client_data):
- """
- Generic rpmts call back.
- """
- if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
- pass
- elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
- pass
- elif reason == rpm.RPMCALLBACK_INST_START:
- pass
- elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
- reason == rpm.RPMCALLBACK_INST_PROGRESS:
- pass
- # rpm.RPMCALLBACK_INST_PROGRESS'
- elif reason == rpm.RPMCALLBACK_TRANS_START:
- pass
- elif reason == rpm.RPMCALLBACK_TRANS_STOP:
- pass
- elif reason == rpm.RPMCALLBACK_REPACKAGE_START:
- pass
- elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
- pass
- elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP:
- pass
- elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS:
- pass
- elif reason == rpm.RPMCALLBACK_UNINST_START:
- pass
- elif reason == rpm.RPMCALLBACK_UNINST_STOP:
- pass
- # How do we get at this?
- # RPM.modified += key
- elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
- pass
- elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
- pass
- elif reason == rpm.RPMCALLBACK_UNKNOWN:
- pass
- else:
- print('ERROR - Fell through callBack')
-
-
-def rpm_erase(erase_pkgspecs, erase_flags):
- """
- pkgspecs is a list of pkgspec dicts specifying packages
- e.g.:
- For a single package
- { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
-
- """
- erase_ts_flags = 0
- if 'noscripts' in erase_flags:
- erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS
- if 'notriggers' in erase_flags:
- erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS
- if 'repackage' in erase_flags:
- erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE
-
- erase_ts = rpmtransactionset()
- erase_ts.setFlags(erase_ts_flags)
-
- for pkgspec in erase_pkgspecs:
- idx_list = getindexbykeyword(erase_ts, **pkgspec)
- if len(idx_list) > 1 and not 'allmatches' in erase_flags:
- #pass
- print('ERROR - Multiple package match for erase', pkgspec)
- else:
- for idx in idx_list:
- erase_ts.addErase(idx)
-
- #for te in erase_ts:
-
- erase_problems = []
- if 'nodeps' not in erase_flags:
- erase_problems = erase_ts.check()
-
- if erase_problems == []:
- erase_ts.order()
- erase_callback = Rpmtscallback()
- erase_ts.run(erase_callback.callback, 'Erase')
- #else:
-
- erase_ts.closeDB()
- del erase_ts
- return erase_problems
-
-def display_verify_file(file_results):
- '''
- Display file results similar to rpm --verify.
- '''
- filename = file_results[-1]
- filetype = file_results[-2]
-
- result_string = ''
-
- if 'RPMVERIFY_LSTATFAIL' in file_results:
- result_string = 'missing '
- else:
- if 'RPMVERIFY_FILESIZE' in file_results:
- result_string = result_string + 'S'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_MODE' in file_results:
- result_string = result_string + 'M'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_MD5' in file_results:
- if 'RPMVERIFY_READFAIL' in file_results:
- result_string = result_string + '?'
- else:
- result_string = result_string + '5'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_RDEV' in file_results:
- result_string = result_string + 'D'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_LINKTO' in file_results:
- if 'RPMVERIFY_READLINKFAIL' in file_results:
- result_string = result_string + '?'
- else:
- result_string = result_string + 'L'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_USER' in file_results:
- result_string = result_string + 'U'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_GROUP' in file_results:
- result_string = result_string + 'G'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_MTIME' in file_results:
- result_string = result_string + 'T'
- else:
- result_string = result_string + '.'
-
- print(result_string + ' ' + filetype + ' ' + filename)
- sys.stdout.flush()
-
-#===============================================================================
-# Some options and output to assist with development and testing.
-# These are not intended for normal use.
-if __name__ == "__main__":
-
- p = optparse.OptionParser()
-
- p.add_option('--name', action='store', \
- default=None, \
- help='''Package name to verify.
-
- ******************************************
- NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES.
- ******************************************
-
- The specified operation will be carried out on all
- instances of packages that match the package specification
- (name, epoch, version, release, arch).''')
-
- p.add_option('--epoch', action='store', \
- default=None, \
- help='''Package epoch.''')
-
- p.add_option('--version', action='store', \
- default=None, \
- help='''Package version.''')
-
- p.add_option('--release', action='store', \
- default=None, \
- help='''Package release.''')
-
- p.add_option('--arch', action='store', \
- default=None, \
- help='''Package arch.''')
-
- p.add_option('--erase', '-e', action='store_true', \
- default=None, \
- help='''****************************************************
- REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
- PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
- GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
- INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
- DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
- ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
- ****************************************************''')
-
- p.add_option('--list', '-l', action='store_true', \
- help='''List package identity info. rpm -qa ish equivalent
- intended for use in RefreshPackages().''')
-
- p.add_option('--verify', action='store_true', \
- help='''Verify Package(s). Output is only produced after all
- packages has been verified. Be patient.''')
-
- p.add_option('--verbose', '-v', action='store_true', \
- help='''Verbose output for --verify option. Output is the
- same as rpm -v --verify.''')
-
- p.add_option('--nodeps', action='store_true', \
- default=False, \
- help='Do not do dependency testing.')
-
- p.add_option('--nodigest', action='store_true', \
- help='Do not check package digests.')
-
- p.add_option('--nofiles', action='store_true', \
- help='Do not do file checks.')
-
- p.add_option('--noscripts', action='store_true', \
- help='Do not run verification scripts.')
-
- p.add_option('--nosignature', action='store_true', \
- help='Do not do package signature verification.')
-
- p.add_option('--nolinkto', action='store_true', \
- help='Do not do symlink tests.')
-
- p.add_option('--nomd5', action='store_true', \
- help='''Do not do MD5 checksums on files. Note that this does
- not work for prelink files yet.''')
-
- p.add_option('--nosize', action='store_true', \
- help='''Do not do file size tests. Note that this does not work
- for prelink files yet.''')
-
- p.add_option('--nouser', action='store_true', \
- help='Do not check file user ownership.')
-
- p.add_option('--nogroup', action='store_true', \
- help='Do not check file group ownership.')
-
- p.add_option('--nomtime', action='store_true', \
- help='Do not check file modification times.')
-
- p.add_option('--nomode', action='store_true', \
- help='Do not check file modes (permissions).')
-
- p.add_option('--nordev', action='store_true', \
- help='Do not check device node.')
-
- p.add_option('--notriggers', action='store_true', \
- help='Do not do not generate triggers on erase.')
-
- p.add_option('--repackage', action='store_true', \
- help='''Do repackage on erase.i Packages are put
- in /var/spool/repackage.''')
-
- p.add_option('--allmatches', action='store_true', \
- help='''Remove all package instances that match the
- pkgspec.
-
- ***************************************************
- NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
- THAT MEANS ALL PACKAGES!!!!
- ***************************************************''')
-
- options, arguments = p.parse_args()
-
- pkgspec = {}
- rpm_options = []
-
- if options.nodeps:
- rpm_options.append('nodeps')
-
- if options.nodigest:
- rpm_options.append('nodigest')
-
- if options.nofiles:
- rpm_options.append('nofiles')
-
- if options.noscripts:
- rpm_options.append('noscripts')
-
- if options.nosignature:
- rpm_options.append('nosignature')
-
- if options.nolinkto:
- rpm_options.append('nolinkto')
-
- if options.nomd5:
- rpm_options.append('nomd5')
-
- if options.nosize:
- rpm_options.append('nosize')
-
- if options.nouser:
- rpm_options.append('nouser')
-
- if options.nogroup:
- rpm_options.append('nogroup')
-
- if options.nomtime:
- rpm_options.append('nomtime')
-
- if options.nomode:
- rpm_options.append('nomode')
-
- if options.nordev:
- rpm_options.append('nordev')
-
- if options.repackage:
- rpm_options.append('repackage')
-
- if options.allmatches:
- rpm_options.append('allmatches')
-
- main_ts = rpmtransactionset()
-
- cmdline_pkgspec = {}
- if options.name != 'all':
- if options.name:
- cmdline_pkgspec['name'] = str(options.name)
- if options.epoch:
- cmdline_pkgspec['epoch'] = str(options.epoch)
- if options.version:
- cmdline_pkgspec['version'] = str(options.version)
- if options.release:
- cmdline_pkgspec['release'] = str(options.release)
- if options.arch:
- cmdline_pkgspec['arch'] = str(options.arch)
-
- if options.verify:
- results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options)
- for r in results:
- files = r.get('files', '')
- for f in files:
- display_verify_file(f)
-
- elif options.list:
- for p in rpmpackagelist(main_ts):
- print(p)
-
- elif options.erase:
- if options.name:
- rpm_erase([cmdline_pkgspec], rpm_options)
- else:
- print('You must specify the "--name" option')
diff --git a/src/lib/Bcfg2/Client/__init__.py b/src/lib/Bcfg2/Client/__init__.py
index 6d1cb9d40..bae81f480 100644
--- a/src/lib/Bcfg2/Client/__init__.py
+++ b/src/lib/Bcfg2/Client/__init__.py
@@ -2,8 +2,55 @@
import os
import sys
-import select
-from Bcfg2.Compat import input # pylint: disable=W0622
+import stat
+import time
+import fcntl
+import socket
+import fnmatch
+import logging
+import argparse
+import tempfile
+import Bcfg2.Logger
+import Bcfg2.Options
+from Bcfg2.Client import XML
+from Bcfg2.Client import Proxy
+from Bcfg2.Client import Tools
+from Bcfg2.Utils import locked, Executor, safe_input
+from Bcfg2.version import __version__
+# pylint: disable=W0622
+from Bcfg2.Compat import xmlrpclib, walk_packages, any, all, cmp
+# pylint: enable=W0622
+
+
+def cmpent(ent1, ent2):
+ """Sort entries."""
+ if ent1.tag != ent2.tag:
+ return cmp(ent1.tag, ent2.tag)
+ else:
+ return cmp(ent1.get('name'), ent2.get('name'))
+
+
+def matches_entry(entryspec, entry):
+ """ Determine if the Decisions-style entry specification matches
+ the entry. Both are tuples of (tag, name). The entryspec can
+ handle the wildcard * in either position. """
+ if entryspec == entry:
+ return True
+ return all(fnmatch.fnmatch(entry[i], entryspec[i]) for i in [0, 1])
+
+
+def matches_white_list(entry, whitelist):
+ """ Return True if (<entry tag>, <entry name>) is in the given
+ whitelist. """
+ return any(matches_entry(we, (entry.tag, entry.get('name')))
+ for we in whitelist)
+
+
+def passes_black_list(entry, blacklist):
+ """ Return True if (<entry tag>, <entry name>) is not in the given
+ blacklist. """
+ return not any(matches_entry(be, (entry.tag, entry.get('name')))
+ for be in blacklist)
def prompt(msg):
@@ -16,10 +63,8 @@ def prompt(msg):
contain "[y/N]" if desired, etc.
:type msg: string
:returns: bool - True if yes, False if no """
- while len(select.select([sys.stdin.fileno()], [], [], 0.0)[0]) > 0:
- os.read(sys.stdin.fileno(), 4096)
try:
- ans = input(msg)
+ ans = safe_input(msg)
return ans in ['y', 'Y']
except UnicodeEncodeError:
ans = input(msg.encode('utf-8'))
@@ -27,3 +72,820 @@ def prompt(msg):
except EOFError:
# handle ^C on rhel-based platforms
raise SystemExit(1)
+ except:
+ print("Error while reading input: %s" % sys.exc_info()[1])
+ return False
+
+
+class ClientDriverAction(Bcfg2.Options.ComponentAction):
+ """ Action to load client drivers """
+ bases = ['Bcfg2.Client.Tools']
+ fail_silently = True
+
+
+class Client(object):
+ """ The main Bcfg2 client class """
+
+ options = Proxy.ComponentProxy.options + [
+ Bcfg2.Options.Common.syslog,
+ Bcfg2.Options.Common.interactive,
+ Bcfg2.Options.BooleanOption(
+ "-q", "--quick", help="Disable some checksum verification"),
+ Bcfg2.Options.Option(
+ cf=('client', 'probe_timeout'),
+ type=Bcfg2.Options.Types.timeout,
+ help="Timeout when running client probes"),
+ Bcfg2.Options.Option(
+ "-b", "--only-bundles", default=[],
+ type=Bcfg2.Options.Types.colon_list,
+ help='Only configure the given bundle(s)'),
+ Bcfg2.Options.Option(
+ "-B", "--except-bundles", default=[],
+ type=Bcfg2.Options.Types.colon_list,
+ help='Configure everything except the given bundle(s)'),
+ Bcfg2.Options.ExclusiveOptionGroup(
+ Bcfg2.Options.BooleanOption(
+ "-Q", "--bundle-quick",
+ help='Only verify the given bundle(s)'),
+ Bcfg2.Options.Option(
+ '-r', '--remove',
+ choices=['all', 'services', 'packages', 'users'],
+ help='Force removal of additional configuration items')),
+ Bcfg2.Options.ExclusiveOptionGroup(
+ Bcfg2.Options.PathOption(
+ '-f', '--file', type=argparse.FileType('r'),
+ help='Configure from a file rather than querying the server'),
+ Bcfg2.Options.PathOption(
+ '-c', '--cache', type=argparse.FileType('w'),
+ help='Store the configuration in a file')),
+ Bcfg2.Options.BooleanOption(
+ '--exit-on-probe-failure', default=True,
+ cf=('client', 'exit_on_probe_failure'),
+ help="The client should exit if a probe fails"),
+ Bcfg2.Options.Option(
+ '-p', '--profile', cf=('client', 'profile'),
+ help='Assert the given profile for the host'),
+ Bcfg2.Options.Option(
+ '-l', '--decision', cf=('client', 'decision'),
+ choices=['whitelist', 'blacklist', 'none'],
+ help='Run client in server decision list mode'),
+ Bcfg2.Options.BooleanOption(
+ "-O", "--no-lock", help='Omit lock check'),
+ Bcfg2.Options.PathOption(
+ cf=('components', 'lockfile'), default='/var/lock/bcfg2.run',
+ help='Client lock file'),
+ Bcfg2.Options.BooleanOption(
+ "-n", "--dry-run", help='Do not actually change the system'),
+ Bcfg2.Options.Option(
+ "-D", "--drivers", cf=('client', 'drivers'),
+ type=Bcfg2.Options.Types.comma_list,
+ default=[m[1] for m in walk_packages(path=Tools.__path__)],
+ action=ClientDriverAction, help='Client drivers'),
+ Bcfg2.Options.BooleanOption(
+ "-e", "--show-extra", help='Enable extra entry output'),
+ Bcfg2.Options.BooleanOption(
+ "-k", "--kevlar", help='Run in bulletproof mode')]
+
+ def __init__(self):
+ self.config = None
+ self._proxy = None
+ self.logger = logging.getLogger('bcfg2')
+ self.cmd = Executor(Bcfg2.Options.setup.probe_timeout)
+ self.tools = []
+ self.times = dict()
+ self.times['initialization'] = time.time()
+
+ if Bcfg2.Options.setup.bundle_quick:
+ if (not Bcfg2.Options.setup.only_bundles and
+ not Bcfg2.Options.setup.except_bundles):
+ self.logger.error("-Q option requires -b or -B")
+ raise SystemExit(1)
+ if Bcfg2.Options.setup.remove == 'services':
+ self.logger.error("Service removal is nonsensical; "
+ "removed services will only be disabled")
+ if not Bcfg2.Options.setup.server.startswith('https://'):
+ Bcfg2.Options.setup.server = \
+ 'https://' + Bcfg2.Options.setup.server
+
+ #: A dict of the state of each entry. Keys are the entries.
+ #: Values are boolean: True means that the entry is good,
+ #: False means that the entry is bad.
+ self.states = {}
+ self.whitelist = []
+ self.blacklist = []
+ self.removal = []
+ self.unhandled = []
+ self.logger = logging.getLogger(__name__)
+
+ def _probe_failure(self, probename, msg):
+ """ handle failure of a probe in the way the user wants us to
+ (exit or continue) """
+ message = "Failed to execute probe %s: %s" % (probename, msg)
+ if Bcfg2.Options.setup.exit_on_probe_failure:
+ self.fatal_error(message)
+ else:
+ self.logger.error(message)
+
+ def run_probe(self, probe):
+ """Execute probe."""
+ name = probe.get('name')
+ self.logger.info("Running probe %s" % name)
+ ret = XML.Element("probe-data", name=name, source=probe.get('source'))
+ try:
+ scripthandle, scriptname = tempfile.mkstemp()
+ if sys.hexversion >= 0x03000000:
+ script = os.fdopen(scripthandle, 'w',
+ encoding=Bcfg2.Options.setup.encoding)
+ else:
+ script = os.fdopen(scripthandle, 'w')
+ try:
+ script.write("#!%s\n" %
+ (probe.attrib.get('interpreter', '/bin/sh')))
+ if sys.hexversion >= 0x03000000:
+ script.write(probe.text)
+ else:
+ script.write(probe.text.encode('utf-8'))
+ script.close()
+ os.chmod(scriptname,
+ stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
+ stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
+ stat.S_IWUSR) # 0755
+ rv = self.cmd.run(scriptname)
+ if rv.stderr:
+ self.logger.warning("Probe %s has error output: %s" %
+ (name, rv.stderr))
+ if not rv.success:
+ self._probe_failure(name, "Return value %s" % rv.retval)
+ self.logger.info("Probe %s has result:" % name)
+ self.logger.info(rv.stdout)
+ if sys.hexversion >= 0x03000000:
+ ret.text = rv.stdout
+ else:
+ ret.text = rv.stdout.decode('utf-8')
+ finally:
+ os.unlink(scriptname)
+ except SystemExit:
+ raise
+ except:
+ self._probe_failure(name, sys.exc_info()[1])
+ return ret
+
+ def fatal_error(self, message):
+ """Signal a fatal error."""
+ self.logger.error("Fatal error: %s" % (message))
+ raise SystemExit(1)
+
+ @property
+ def proxy(self):
+ """ get an XML-RPC proxy to the server """
+ if self._proxy is None:
+ self._proxy = Proxy.ComponentProxy()
+ return self._proxy
+
+ def run_probes(self):
+ """ run probes and upload probe data """
+ try:
+ probes = XML.XML(str(self.proxy.GetProbes()))
+ except (Proxy.ProxyError,
+ Proxy.CertificateError,
+ socket.gaierror,
+ socket.error):
+ err = sys.exc_info()[1]
+ self.fatal_error("Failed to download probes from bcfg2: %s" % err)
+ except XML.ParseError:
+ err = sys.exc_info()[1]
+ self.fatal_error("Server returned invalid probe requests: %s" %
+ err)
+
+ self.times['probe_download'] = time.time()
+
+ # execute probes
+ probedata = XML.Element("ProbeData")
+ for probe in probes.findall(".//probe"):
+ probedata.append(self.run_probe(probe))
+
+ if len(probes.findall(".//probe")) > 0:
+ try:
+ # upload probe responses
+ self.proxy.RecvProbeData(
+ XML.tostring(probedata,
+ xml_declaration=False).decode('utf-8'))
+ except Proxy.ProxyError:
+ err = sys.exc_info()[1]
+ self.fatal_error("Failed to upload probe data: %s" % err)
+
+ self.times['probe_upload'] = time.time()
+
+ def get_config(self):
+ """ load the configuration, either from the cached
+ configuration file (-f), or from the server """
+ if Bcfg2.Options.setup.file:
+ # read config from file
+ try:
+ self.logger.debug("Reading cached configuration from %s" %
+ Bcfg2.Options.setup.file.name)
+ return Bcfg2.Options.setup.file.read()
+ except IOError:
+ self.fatal_error("Failed to read cached configuration from: %s"
+ % Bcfg2.Options.setup.file.name)
+ else:
+ # retrieve config from server
+ if Bcfg2.Options.setup.profile:
+ try:
+ self.proxy.AssertProfile(Bcfg2.Options.setup.profile)
+ except Proxy.ProxyError:
+ err = sys.exc_info()[1]
+ self.fatal_error("Failed to set client profile: %s" % err)
+
+ try:
+ self.proxy.DeclareVersion(__version__)
+ except (xmlrpclib.Fault,
+ Proxy.ProxyError,
+ Proxy.CertificateError,
+ socket.gaierror,
+ socket.error):
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to declare version: %s" % err)
+
+ self.run_probes()
+
+ if Bcfg2.Options.setup.decision in ['whitelist', 'blacklist']:
+ try:
+ # TODO: read decision list from --decision-list
+ Bcfg2.Options.setup.decision_list = \
+ self.proxy.GetDecisionList(
+ Bcfg2.Options.setup.decision)
+ self.logger.info("Got decision list from server:")
+ self.logger.info(Bcfg2.Options.setup.decision_list)
+ except Proxy.ProxyError:
+ err = sys.exc_info()[1]
+ self.fatal_error("Failed to get decision list: %s" % err)
+
+ try:
+ rawconfig = self.proxy.GetConfig().encode('utf-8')
+ except Proxy.ProxyError:
+ err = sys.exc_info()[1]
+ self.fatal_error("Failed to download configuration from "
+ "Bcfg2: %s" % err)
+
+ self.times['config_download'] = time.time()
+
+ if Bcfg2.Options.setup.cache:
+ try:
+ Bcfg2.Options.setup.cache.write(rawconfig)
+ os.chmod(Bcfg2.Options.setup.cache.name, 384) # 0600
+ except IOError:
+ self.logger.warning("Failed to write config cache file %s" %
+ (Bcfg2.Options.setup.cache))
+ self.times['caching'] = time.time()
+
+ return rawconfig
+
+ def parse_config(self, rawconfig):
+ """ Parse the XML configuration received from the Bcfg2 server """
+ try:
+ self.config = XML.XML(rawconfig)
+ except XML.ParseError:
+ syntax_error = sys.exc_info()[1]
+ self.fatal_error("The configuration could not be parsed: %s" %
+ syntax_error)
+
+ self.load_tools()
+
+ # find entries not handled by any tools
+ self.unhandled = [entry for struct in self.config
+ for entry in struct
+ if entry not in self.handled]
+
+ if self.unhandled:
+ self.logger.error("The following entries are not handled by any "
+ "tool:")
+ for entry in self.unhandled:
+ self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'),
+ entry.get('name')))
+
+ # find duplicates
+ self.find_dups(self.config)
+
+ pkgs = [(entry.get('name'), entry.get('origin'))
+ for struct in self.config
+ for entry in struct
+ if entry.tag == 'Package']
+ if pkgs:
+ self.logger.debug("The following packages are specified in bcfg2:")
+ self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None])
+ self.logger.debug("The following packages are prereqs added by "
+ "Packages:")
+ self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages'])
+
+ self.times['config_parse'] = time.time()
+
+ def run(self):
+ """Perform client execution phase."""
+ # begin configuration
+ self.times['start'] = time.time()
+
+ self.logger.info("Starting Bcfg2 client run at %s" %
+ self.times['start'])
+
+ self.parse_config(self.get_config().decode('utf-8'))
+
+ if self.config.tag == 'error':
+ self.fatal_error("Server error: %s" % (self.config.text))
+
+ if Bcfg2.Options.setup.bundle_quick:
+ newconfig = XML.XML('<Configuration/>')
+ for bundle in self.config.getchildren():
+ name = bundle.get("name")
+ if (name and (name in Bcfg2.Options.setup.only_bundles or
+ name not in Bcfg2.Options.setup.except_bundles)):
+ newconfig.append(bundle)
+ self.config = newconfig
+
+ if not Bcfg2.Options.setup.no_lock:
+ #check lock here
+ try:
+ lockfile = open(Bcfg2.Options.setup.lockfile, 'w')
+ if locked(lockfile.fileno()):
+ self.fatal_error("Another instance of Bcfg2 is running. "
+ "If you want to bypass the check, run "
+ "with the -O/--no-lock option")
+ except SystemExit:
+ raise
+ except:
+ lockfile = None
+ self.logger.error("Failed to open lockfile %s: %s" %
+ (Bcfg2.Options.setup.lockfile,
+ sys.exc_info()[1]))
+
+ # execute the configuration
+ self.Execute()
+
+ if not Bcfg2.Options.setup.no_lock:
+ # unlock here
+ if lockfile:
+ try:
+ fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
+ os.remove(Bcfg2.Options.setup.lockfile)
+ except OSError:
+ self.logger.error("Failed to unlock lockfile %s" %
+ lockfile.name)
+
+ if (not Bcfg2.Options.setup.file and
+ not Bcfg2.Options.setup.bundle_quick):
+ # upload statistics
+ feedback = self.GenerateStats()
+
+ try:
+ self.proxy.RecvStats(
+ XML.tostring(feedback,
+ xml_declaration=False).decode('utf-8'))
+ except Proxy.ProxyError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to upload configuration statistics: "
+ "%s" % err)
+ raise SystemExit(2)
+
+ self.logger.info("Finished Bcfg2 client run at %s" % time.time())
+
+ def load_tools(self):
+ """ Load all applicable client tools """
+ for tool in Bcfg2.Options.setup.drivers:
+ try:
+ self.tools.append(tool(self.config))
+ except Tools.ToolInstantiationError:
+ continue
+ except:
+ self.logger.error("Failed to instantiate tool %s" % tool,
+ exc_info=1)
+
+ for tool in self.tools[:]:
+ for conflict in getattr(tool, 'conflicts', []):
+ for item in self.tools:
+ if item.name == conflict:
+ self.tools.remove(item)
+
+ self.logger.info("Loaded tool drivers:")
+ self.logger.info([tool.name for tool in self.tools])
+
+ deprecated = [tool.name for tool in self.tools if tool.deprecated]
+ if deprecated:
+ self.logger.warning("Loaded deprecated tool drivers:")
+ self.logger.warning(deprecated)
+ experimental = [tool.name for tool in self.tools if tool.experimental]
+ if experimental:
+ self.logger.warning("Loaded experimental tool drivers:")
+ self.logger.warning(experimental)
+
+ def find_dups(self, config):
+ """ Find duplicate entries and warn about them """
+ entries = dict()
+ for struct in config:
+ for entry in struct:
+ for tool in self.tools:
+ if tool.handlesEntry(entry):
+ pkey = tool.primarykey(entry)
+ if pkey in entries:
+ entries[pkey] += 1
+ else:
+ entries[pkey] = 1
+ multi = [e for e, c in entries.items() if c > 1]
+ if multi:
+ self.logger.debug("The following entries are included multiple "
+ "times:")
+ for entry in multi:
+ self.logger.debug(entry)
+
+ def promptFilter(self, msg, entries):
+ """Filter a supplied list based on user input."""
+ ret = []
+ entries.sort(key=lambda e: e.tag + ":" + e.get('name'))
+ for entry in entries[:]:
+ if entry in self.unhandled:
+ # don't prompt for entries that can't be installed
+ continue
+ if 'qtext' in entry.attrib:
+ iprompt = entry.get('qtext')
+ else:
+ iprompt = msg % (entry.tag, entry.get('name'))
+ if prompt(iprompt):
+ ret.append(entry)
+ return ret
+
+ def __getattr__(self, name):
+ if name in ['extra', 'handled', 'modified', '__important__']:
+ ret = []
+ for tool in self.tools:
+ ret += getattr(tool, name)
+ return ret
+ elif name in self.__dict__:
+ return self.__dict__[name]
+ raise AttributeError(name)
+
+ def InstallImportant(self):
+ """Install important entries
+
+ We also process the decision mode stuff here because we want to prevent
+ non-whitelisted/blacklisted 'important' entries from being installed
+ prior to determining the decision mode on the client.
+ """
+ # Need to process decision stuff early so that dryrun mode
+ # works with it
+ self.whitelist = [entry for entry in self.states
+ if not self.states[entry]]
+ if not Bcfg2.Options.setup.file:
+ if Bcfg2.Options.setup.decision == 'whitelist':
+ dwl = Bcfg2.Options.setup.decision_list
+ w_to_rem = [e for e in self.whitelist
+ if not matches_white_list(e, dwl)]
+ if w_to_rem:
+ self.logger.info("In whitelist mode: "
+ "suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name'))
+ for e in w_to_rem])
+ self.whitelist = [x for x in self.whitelist
+ if x not in w_to_rem]
+ elif Bcfg2.Options.setup.decision == 'blacklist':
+ b_to_rem = \
+ [e for e in self.whitelist
+ if not
+ passes_black_list(e, Bcfg2.Options.setup.decision_list)]
+ if b_to_rem:
+ self.logger.info("In blacklist mode: "
+ "suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name'))
+ for e in b_to_rem])
+ self.whitelist = [x for x in self.whitelist
+ if x not in b_to_rem]
+
+ # take care of important entries first
+ if not Bcfg2.Options.setup.dry_run:
+ for parent in self.config.findall(".//Path/.."):
+ name = parent.get("name")
+ if (name and (name in Bcfg2.Options.setup.only_bundles or
+ name not in Bcfg2.Options.setup.except_bundles)):
+ continue
+ for cfile in parent.findall("./Path"):
+ if (cfile.get('name') not in self.__important__ or
+ cfile.get('type') != 'file' or
+ cfile not in self.whitelist):
+ continue
+ tools = [t for t in self.tools
+ if t.handlesEntry(cfile) and t.canVerify(cfile)]
+ if not tools:
+ continue
+ if (Bcfg2.Options.setup.interactive and not
+ self.promptFilter("Install %s: %s? (y/N):",
+ [cfile])):
+ self.whitelist.remove(cfile)
+ continue
+ try:
+ self.states[cfile] = tools[0].InstallPath(cfile)
+ if self.states[cfile]:
+ tools[0].modified.append(cfile)
+ except: # pylint: disable=W0702
+ self.logger.error("Unexpected tool failure",
+ exc_info=1)
+ cfile.set('qtext', '')
+ if tools[0].VerifyPath(cfile, []):
+ self.whitelist.remove(cfile)
+
+ def Inventory(self):
+ """
+ Verify all entries,
+ find extra entries,
+ and build up workqueues
+
+ """
+ # initialize all states
+ for struct in self.config.getchildren():
+ for entry in struct.getchildren():
+ self.states[entry] = False
+ for tool in self.tools:
+ try:
+ self.states.update(tool.Inventory())
+ except: # pylint: disable=W0702
+ self.logger.error("%s.Inventory() call failed:" % tool.name,
+ exc_info=1)
+
+ def Decide(self): # pylint: disable=R0912
+ """Set self.whitelist based on user interaction."""
+ iprompt = "Install %s: %s? (y/N): "
+ rprompt = "Remove %s: %s? (y/N): "
+ if Bcfg2.Options.setup.remove:
+ if Bcfg2.Options.setup.remove == 'all':
+ self.removal = self.extra
+ elif Bcfg2.Options.setup.remove == 'services':
+ self.removal = [entry for entry in self.extra
+ if entry.tag == 'Service']
+ elif Bcfg2.Options.setup.remove == 'packages':
+ self.removal = [entry for entry in self.extra
+ if entry.tag == 'Package']
+ elif Bcfg2.Options.setup.remove == 'users':
+ self.removal = [entry for entry in self.extra
+ if entry.tag in ['POSIXUser', 'POSIXGroup']]
+
+ candidates = [entry for entry in self.states
+ if not self.states[entry]]
+
+ if Bcfg2.Options.setup.dry_run:
+ if self.whitelist:
+ self.logger.info("In dryrun mode: "
+ "suppressing entry installation for:")
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
+ for entry in self.whitelist])
+ self.whitelist = []
+ if self.removal:
+ self.logger.info("In dryrun mode: "
+ "suppressing entry removal for:")
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
+ for entry in self.removal])
+ self.removal = []
+
+ # Here is where most of the work goes
+ # first perform bundle filtering
+ all_bundle_names = [b.get('name')
+ for b in self.config.findall('./Bundle')]
+ bundles = self.config.getchildren()
+ if Bcfg2.Options.setup.only_bundles:
+ # warn if non-existent bundle given
+ for bundle in Bcfg2.Options.setup.only_bundles:
+ if bundle not in all_bundle_names:
+ self.logger.info("Warning: Bundle %s not found" % bundle)
+ bundles = [b for b in bundles
+ if b.get('name') in Bcfg2.Options.setup.only_bundles]
+ if Bcfg2.Options.setup.except_bundles:
+ # warn if non-existent bundle given
+ if not Bcfg2.Options.setup.bundle_quick:
+ for bundle in Bcfg2.Options.setup.except_bundles:
+ if bundle not in all_bundle_names:
+ self.logger.info("Warning: Bundle %s not found" %
+ bundle)
+ bundles = [
+ b for b in bundles
+ if b.get('name') not in Bcfg2.Options.setup.except_bundles]
+ self.whitelist = [e for e in self.whitelist
+ if any(e in b for b in bundles)]
+
+ # first process prereq actions
+ for bundle in bundles[:]:
+ if bundle.tag == 'Bundle':
+ bmodified = any((item in self.whitelist or
+ item in self.modified) for item in bundle)
+ else:
+ bmodified = False
+ actions = [a for a in bundle.findall('./Action')
+ if (a.get('timing') in ['pre', 'both'] and
+ (bmodified or a.get('when') == 'always'))]
+ # now we process all "pre" and "both" actions that are either
+ # always or the bundle has been modified
+ if Bcfg2.Options.setup.interactive:
+ self.promptFilter(iprompt, actions)
+ self.DispatchInstallCalls(actions)
+
+ if bundle.tag != 'Bundle':
+ continue
+
+ # need to test to fail entries in whitelist
+ if not all(self.states[a] for a in actions):
+ # then display bundles forced off with entries
+ self.logger.info("%s %s failed prerequisite action" %
+ (bundle.tag, bundle.get('name')))
+ bundles.remove(bundle)
+ b_to_remv = [ent for ent in self.whitelist if ent in bundle]
+ if b_to_remv:
+ self.logger.info("Not installing entries from %s %s" %
+ (bundle.tag, bundle.get('name')))
+ self.logger.info(["%s:%s" % (e.tag, e.get('name'))
+ for e in b_to_remv])
+ for ent in b_to_remv:
+ self.whitelist.remove(ent)
+
+ self.logger.debug("Installing entries in the following bundle(s):")
+ self.logger.debug(" %s" % ", ".join(b.get("name") for b in bundles
+ if b.get("name")))
+
+ if Bcfg2.Options.setup.interactive:
+ self.whitelist = self.promptFilter(iprompt, self.whitelist)
+ self.removal = self.promptFilter(rprompt, self.removal)
+
+ for entry in candidates:
+ if entry not in self.whitelist:
+ self.blacklist.append(entry)
+
+ def DispatchInstallCalls(self, entries):
+ """Dispatch install calls to underlying tools."""
+ for tool in self.tools:
+ handled = [entry for entry in entries if tool.canInstall(entry)]
+ if not handled:
+ continue
+ try:
+ self.states.update(tool.Install(handled))
+ except: # pylint: disable=W0702
+ self.logger.error("%s.Install() call failed:" % tool.name,
+ exc_info=1)
+
+ def Install(self):
+ """Install all entries."""
+ self.DispatchInstallCalls(self.whitelist)
+ mods = self.modified
+ mbundles = [struct for struct in self.config.findall('Bundle')
+ if any(True for mod in mods if mod in struct)]
+
+ if self.modified:
+ # Handle Bundle interdeps
+ if mbundles:
+ self.logger.info("The Following Bundles have been modified:")
+ self.logger.info([mbun.get('name') for mbun in mbundles])
+ tbm = [(t, b) for t in self.tools for b in mbundles]
+ for tool, bundle in tbm:
+ try:
+ self.states.update(tool.Inventory(structures=[bundle]))
+ except: # pylint: disable=W0702
+ self.logger.error("%s.Inventory() call failed:" %
+ tool.name,
+ exc_info=1)
+ clobbered = [entry for bundle in mbundles for entry in bundle
+ if (not self.states[entry] and
+ entry not in self.blacklist)]
+ if clobbered:
+ self.logger.debug("Found clobbered entries:")
+ self.logger.debug(["%s:%s" % (entry.tag, entry.get('name'))
+ for entry in clobbered])
+ if not Bcfg2.Options.setup.interactive:
+ self.DispatchInstallCalls(clobbered)
+
+ for bundle in self.config.findall('.//Bundle'):
+ if (Bcfg2.Options.setup.only_bundles and
+ bundle.get('name') not in
+ Bcfg2.Options.setup.only_bundles):
+ # prune out unspecified bundles when running with -b
+ continue
+ if bundle in mbundles:
+ self.logger.debug("Bundle %s was modified" %
+ bundle.get('name'))
+ func = "BundleUpdated"
+ else:
+ self.logger.debug("Bundle %s was not modified" %
+ bundle.get('name'))
+ func = "BundleNotUpdated"
+ for tool in self.tools:
+ try:
+ self.states.update(getattr(tool, func)(bundle))
+ except: # pylint: disable=W0702
+ self.logger.error("%s.%s(%s:%s) call failed:" %
+ (tool.name, func, bundle.tag,
+ bundle.get("name")), exc_info=1)
+
+ for indep in self.config.findall('.//Independent'):
+ for tool in self.tools:
+ try:
+ self.states.update(tool.BundleNotUpdated(indep))
+ except: # pylint: disable=W0702
+ self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:"
+ % (tool.name, indep.tag,
+ indep.get("name")), exc_info=1)
+
+ def Remove(self):
+ """Remove extra entries."""
+ for tool in self.tools:
+ extras = [entry for entry in self.removal
+ if tool.handlesEntry(entry)]
+ if extras:
+ try:
+ tool.Remove(extras)
+ except: # pylint: disable=W0702
+ self.logger.error("%s.Remove() failed" % tool.name,
+ exc_info=1)
+
+ def CondDisplayState(self, phase):
+ """Conditionally print tracing information."""
+ self.logger.info('Phase: %s' % phase)
+ self.logger.info('Correct entries: %d' %
+ list(self.states.values()).count(True))
+ self.logger.info('Incorrect entries: %d' %
+ list(self.states.values()).count(False))
+ if phase == 'final' and list(self.states.values()).count(False):
+ for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" +
+ e.get('name')):
+ if not self.states[entry]:
+ etype = entry.get('type')
+ if etype:
+ self.logger.info("%s:%s:%s" % (entry.tag, etype,
+ entry.get('name')))
+ else:
+ self.logger.info("%s:%s" % (entry.tag,
+ entry.get('name')))
+ self.logger.info('Total managed entries: %d' %
+ len(list(self.states.values())))
+ self.logger.info('Unmanaged entries: %d' % len(self.extra))
+ if phase == 'final' and Bcfg2.Options.setup.show_extra:
+ for entry in sorted(self.extra,
+ key=lambda e: e.tag + ":" + e.get('name')):
+ etype = entry.get('type')
+ if etype:
+ self.logger.info("%s:%s:%s" % (entry.tag, etype,
+ entry.get('name')))
+ else:
+ self.logger.info("%s:%s" % (entry.tag,
+ entry.get('name')))
+
+ if ((list(self.states.values()).count(False) == 0) and not self.extra):
+ self.logger.info('All entries correct.')
+
+ def ReInventory(self):
+ """Recheck everything."""
+ if not Bcfg2.Options.setup.dry_run and Bcfg2.Options.setup.kevlar:
+ self.logger.info("Rechecking system inventory")
+ self.Inventory()
+
+ def Execute(self):
+ """Run all methods."""
+ self.Inventory()
+ self.times['inventory'] = time.time()
+ self.CondDisplayState('initial')
+ self.InstallImportant()
+ self.Decide()
+ self.Install()
+ self.times['install'] = time.time()
+ self.Remove()
+ self.times['remove'] = time.time()
+ if self.modified:
+ self.ReInventory()
+ self.times['reinventory'] = time.time()
+ self.times['finished'] = time.time()
+ self.CondDisplayState('final')
+
+ def GenerateStats(self):
+ """Generate XML summary of execution statistics."""
+ feedback = XML.Element("upload-statistics")
+ stats = XML.SubElement(feedback,
+ 'Statistics', total=str(len(self.states)),
+ version='2.0',
+ revision=self.config.get('revision', '-1'))
+ good_entries = [key for key, val in list(self.states.items()) if val]
+ good = len(good_entries)
+ stats.set('good', str(good))
+ if any(not val for val in list(self.states.values())):
+ stats.set('state', 'dirty')
+ else:
+ stats.set('state', 'clean')
+
+ # List bad elements of the configuration
+ for (data, ename) in [(self.modified, 'Modified'),
+ (self.extra, "Extra"),
+ (good_entries, "Good"),
+ ([entry for entry in self.states
+ if not self.states[entry]], "Bad")]:
+ container = XML.SubElement(stats, ename)
+ for item in data:
+ item.set('qtext', '')
+ container.append(item)
+ item.text = None
+
+ timeinfo = XML.Element("OpStamps")
+ feedback.append(stats)
+ for (event, timestamp) in list(self.times.items()):
+ timeinfo.set(event, str(timestamp))
+ stats.append(timeinfo)
+ return feedback