diff options
Diffstat (limited to 'src')
163 files changed, 10285 insertions, 5831 deletions
diff --git a/src/lib/Bcfg2/Bcfg2Py3k.py b/src/lib/Bcfg2/Bcfg2Py3k.py index 6af8b3e5c..be5175e62 100644 --- a/src/lib/Bcfg2/Bcfg2Py3k.py +++ b/src/lib/Bcfg2/Bcfg2Py3k.py @@ -14,6 +14,7 @@ try: from urllib2 import install_opener from urllib2 import urlopen from urllib2 import HTTPError + from urllib2 import URLError except ImportError: from urllib.parse import urljoin, urlparse from urllib.request import HTTPBasicAuthHandler @@ -22,6 +23,7 @@ except ImportError: from urllib.request import install_opener from urllib.request import urlopen from urllib.error import HTTPError + from urllib.error import URLError try: from cStringIO import StringIO @@ -62,6 +64,12 @@ try: except ImportError: import http.client as httplib +# py3k compatibility +if sys.hexversion >= 0x03000000: + unicode = str +else: + unicode = unicode + # print to file compatibility def u_str(string, encoding=None): if sys.hexversion >= 0x03000000: @@ -75,7 +83,49 @@ def u_str(string, encoding=None): else: return unicode(string) -if sys.hexversion >= 0x03000000: - from io import FileIO as file -else: - file = file +try: + unicode = unicode +except: + unicode = str + +# base64 compat +from base64 import b64encode as _b64encode, b64decode as _b64decode +b64encode = lambda s: _b64encode(s.encode('ascii')).decode('ascii') +b64decode = lambda s: _b64decode(s.encode('ascii')).decode('ascii') + +try: + input = raw_input +except: + input = input + +try: + reduce = reduce +except NameError: + from functools import reduce + +try: + from collections import MutableMapping +except ImportError: + from UserDict import DictMixin as MutableMapping + + +# in py3k __cmp__ is no longer magical, so we define a mixin that can +# be used to define the rich comparison operators from __cmp__ +class CmpMixin(object): + def __lt__(self, other): + return self.__cmp__(other) < 0 + + def __gt__(self, other): + return self.__cmp__(other) > 0 + + def __eq__(self, other): + return self.__cmp__(other) == 0 + + def __ne__(self, other): + return not self.__eq__(other) + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py index 9ad669ad6..2218b890b 100644 --- a/src/lib/Bcfg2/Client/Frame.py +++ b/src/lib/Bcfg2/Client/Frame.py @@ -17,32 +17,6 @@ def cmpent(ent1, ent2): return cmp(ent1.get('name'), ent2.get('name')) -def promptFilter(prompt, entries): - """Filter a supplied list based on user input.""" - ret = [] - entries.sort(cmpent) - for entry in entries[:]: - if 'qtext' in entry.attrib: - iprompt = entry.get('qtext') - else: - iprompt = prompt % (entry.tag, entry.get('name')) - try: - # py3k compatibility - try: - ans = raw_input(iprompt.encode(sys.stdout.encoding, 'replace')) - except NameError: - ans = input(iprompt) - if ans in ['y', 'Y']: - ret.append(entry) - except EOFError: - # python 2.4.3 on CentOS doesn't like ^C for some reason - break - except: - print("Error while reading input") - continue - return ret - - def matches_entry(entryspec, entry): # both are (tag, name) if entryspec == entry: @@ -71,7 +45,7 @@ def passes_black_list(entry, blacklist): for be in blacklist] -class Frame: +class Frame(object): """Frame is the container for all Tool objects and state information.""" def __init__(self, config, setup, times, drivers, dryrun): self.config = config @@ -84,7 +58,7 @@ class Frame: self.whitelist = [] self.blacklist = [] self.removal = [] - self.logger = logging.getLogger("Bcfg2.Client.Frame") + self.logger = logging.getLogger(__name__) for driver in drivers[:]: if driver not in Bcfg2.Client.Tools.drivers and \ isinstance(driver, str): @@ -124,33 +98,74 @@ class Frame: self.logger.info([tool.name for tool in self.tools]) # find entries not handled by any tools - problems = [entry for struct in config for \ - entry in struct if entry not in self.handled] + self.unhandled = [entry for struct in config + for entry in struct + if entry not in self.handled] - if problems: + if self.unhandled: self.logger.error("The following entries are not handled by any tool:") - self.logger.error(["%s:%s:%s" % (entry.tag, entry.get('type'), \ - entry.get('name')) for entry in problems]) - self.logger.error("") - entries = [(entry.tag, entry.get('name')) - for struct in config for entry in struct] + for entry in self.unhandled: + self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'), + entry.get('name'))) + + self.find_dups(config) + pkgs = [(entry.get('name'), entry.get('origin')) - for struct in config for entry in struct if entry.tag == 'Package'] - multi = [] - for entry in entries[:]: - if entries.count(entry) > 1: - multi.append(entry) - entries.remove(entry) - if multi: - self.logger.debug("The following entries are included multiple times:") - self.logger.debug(["%s:%s" % entry for entry in multi]) - self.logger.debug("") + for struct in config + for entry in struct + if entry.tag == 'Package'] if pkgs: self.logger.debug("The following packages are specified in bcfg2:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == None]) self.logger.debug("The following packages are prereqs added by Packages:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) + def find_dups(self, config): + entries = dict() + for struct in config: + for entry in struct: + for tool in self.tools: + if tool.handlesEntry(entry): + pkey = tool.primarykey(entry) + if pkey in entries: + entries[pkey] += 1 + else: + entries[pkey] = 1 + multi = [e for e, c in entries.items() if c > 1] + if multi: + self.logger.debug("The following entries are included multiple times:") + for entry in multi: + self.logger.debug(entry) + + def promptFilter(self, prompt, entries): + """Filter a supplied list based on user input.""" + ret = [] + entries.sort(cmpent) + for entry in entries[:]: + if entry in self.unhandled: + # don't prompt for entries that can't be installed + continue + if 'qtext' in entry.attrib: + iprompt = entry.get('qtext') + else: + iprompt = prompt % (entry.tag, entry.get('name')) + try: + # py3k compatibility + try: + ans = raw_input(iprompt.encode(sys.stdout.encoding, + 'replace')) + except NameError: + ans = input(iprompt) + if ans in ['y', 'Y']: + ret.append(entry) + except EOFError: + # python 2.4.3 on CentOS doesn't like ^C for some reason + break + except: + print("Error while reading input") + continue + return ret + def __getattr__(self, name): if name in ['extra', 'handled', 'modified', '__important__']: ret = [] @@ -190,17 +205,26 @@ class Frame: self.whitelist = [x for x in self.whitelist if x not in b_to_rem] # take care of important entries first - if not self.dryrun and not self.setup['bundle']: - for cfile in [cfl for cfl in self.config.findall(".//Path") \ - if cfl.get('name') in self.__important__ and \ - cfl.get('type') == 'file']: - if cfile not in self.whitelist: + if not self.dryrun: + for cfile in self.config.findall(".//Path"): + if (cfile.get('name') not in self.__important__ or + cfile.get('type') != 'file' or + cfile not in self.whitelist): + continue + parent = cfile.getparent() + if ((parent.tag == "Bundle" and + ((self.setup['bundle'] and + parent.get("name") not in self.setup['bundle']) or + (self.setup['skipbundle'] and + parent.get("name") in self.setup['skipbundle']))) or + (parent.tag == "Independent" and + (self.setup['bundle'] or self.setup['skipindep']))): continue - tl = [t for t in self.tools if t.handlesEntry(cfile) \ - and t.canVerify(cfile)] + tl = [t for t in self.tools + if t.handlesEntry(cfile) and t.canVerify(cfile)] if tl: if self.setup['interactive'] and not \ - promptFilter("Install %s: %s? (y/N):", [cfile]): + self.promptFilter("Install %s: %s? (y/N):", [cfile]): self.whitelist.remove(cfile) continue try: @@ -262,22 +286,33 @@ class Frame: return # Here is where most of the work goes # first perform bundle filtering + all_bundle_names = [b.get('name') + for b in self.config.findall('./Bundle')] + bundles = self.config.getchildren() if self.setup['bundle']: - all_bundle_names = [b.get('name') for b in - self.config.findall('./Bundle')] # warn if non-existent bundle given for bundle in self.setup['bundle']: if bundle not in all_bundle_names: self.logger.info("Warning: Bundle %s not found" % bundle) - bundles = [b for b in self.config.findall('./Bundle') - if b.get('name') in self.setup['bundle']] - self.whitelist = [e for e in self.whitelist - if True in [e in b for b in bundles]] + bundles = filter(lambda b: b.get('name') in self.setup['bundle'], + bundles) elif self.setup['indep']: - bundles = [nb for nb in self.config.getchildren() - if nb.tag != 'Bundle'] - else: - bundles = self.config.getchildren() + bundles = filter(lambda b: b.tag != 'Bundle', bundles) + if self.setup['skipbundle']: + # warn if non-existent bundle given + if not self.setup['bundle_quick']: + for bundle in self.setup['skipbundle']: + if bundle not in all_bundle_names: + self.logger.info("Warning: Bundle %s not found" % + bundle) + bundles = filter(lambda b: \ + b.get('name') not in self.setup['skipbundle'], + bundles) + if self.setup['skipindep']: + bundles = filter(lambda b: b.tag == 'Bundle', bundles) + + self.whitelist = [e for e in self.whitelist + if True in [e in b for b in bundles]] # first process prereq actions for bundle in bundles[:]: @@ -289,7 +324,7 @@ class Frame: (bmodified or a.get('when') == 'always'))] # now we process all "always actions" if self.setup['interactive']: - promptFilter(prompt, actions) + self.promptFilter(prompt, actions) self.DispatchInstallCalls(actions) # need to test to fail entries in whitelist @@ -307,8 +342,8 @@ class Frame: [self.whitelist.remove(ent) for ent in b_to_remv] if self.setup['interactive']: - self.whitelist = promptFilter(prompt, self.whitelist) - self.removal = promptFilter(rprompt, self.removal) + self.whitelist = self.promptFilter(prompt, self.whitelist) + self.removal = self.promptFilter(rprompt, self.removal) for entry in candidates: if entry not in self.whitelist: @@ -337,7 +372,6 @@ class Frame: if mbundles: self.logger.info("The Following Bundles have been modified:") self.logger.info([mbun.get('name') for mbun in mbundles]) - self.logger.info("") tbm = [(t, b) for t in self.tools for b in mbundles] for tool, bundle in tbm: try: @@ -380,19 +414,33 @@ class Frame: def CondDisplayState(self, phase): """Conditionally print tracing information.""" - self.logger.info('\nPhase: %s' % phase) - self.logger.info('Correct entries:\t%d' % list(self.states.values()).count(True)) - self.logger.info('Incorrect entries:\t%d' % list(self.states.values()).count(False)) + self.logger.info('Phase: %s' % phase) + self.logger.info('Correct entries: %d' % + list(self.states.values()).count(True)) + self.logger.info('Incorrect entries: %d' % + list(self.states.values()).count(False)) if phase == 'final' and list(self.states.values()).count(False): - self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for \ - entry in self.states if not self.states[entry]]) - self.logger.info('Total managed entries:\t%d' % len(list(self.states.values()))) - self.logger.info('Unmanaged entries:\t%d' % len(self.extra)) + for entry in self.states.keys(): + if not self.states[entry]: + etype = entry.get('type') + if etype: + self.logger.info( "%s:%s:%s" % (entry.tag, etype, + entry.get('name'))) + else: + self.logger.info(" %s:%s" % (entry.tag, + entry.get('name'))) + self.logger.info('Total managed entries: %d' % + len(list(self.states.values()))) + self.logger.info('Unmanaged entries: %d' % len(self.extra)) if phase == 'final' and self.setup['extra']: - self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) \ - for entry in self.extra]) - - self.logger.info("") + for entry in self.extra: + etype = entry.get('type') + if etype: + self.logger.info( "%s:%s:%s" % (entry.tag, etype, + entry.get('name'))) + else: + self.logger.info(" %s:%s" % (entry.tag, + entry.get('name'))) if ((list(self.states.values()).count(False) == 0) and not self.extra): self.logger.info('All entries correct.') @@ -428,7 +476,8 @@ class Frame: total=str(len(self.states)), version='2.0', revision=self.config.get('revision', '-1')) - good = len([key for key, val in list(self.states.items()) if val]) + good_entries = [key for key, val in list(self.states.items()) if val] + good = len(good_entries) stats.set('good', str(good)) if len([key for key, val in list(self.states.items()) if not val]) == 0: stats.set('state', 'clean') @@ -437,6 +486,7 @@ class Frame: # List bad elements of the configuration for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), \ + (good_entries, "Good"), ([entry for entry in self.states if not \ self.states[entry]], "Bad")]: container = Bcfg2.Client.XML.SubElement(stats, ename) diff --git a/src/lib/Bcfg2/Client/Tools/APK.py b/src/lib/Bcfg2/Client/Tools/APK.py index aaaf2472f..d70916792 100644 --- a/src/lib/Bcfg2/Client/Tools/APK.py +++ b/src/lib/Bcfg2/Client/Tools/APK.py @@ -24,8 +24,8 @@ class APK(Bcfg2.Client.Tools.PkgTool): for pkg in zip(names, nameversions): pkgname = pkg[0] version = pkg[1][len(pkgname) + 1:] - self.logger.debug(" pkgname: %s\n version: %s" % - (pkgname, version)) + self.logger.debug(" pkgname: %s" % pkgname) + self.logger.debug(" version: %s" % version) self.installed[pkgname] = version def VerifyPackage(self, entry, modlist): diff --git a/src/lib/Bcfg2/Client/Tools/APT.py b/src/lib/Bcfg2/Client/Tools/APT.py index 6b839ffbc..ce7e9701f 100644 --- a/src/lib/Bcfg2/Client/Tools/APT.py +++ b/src/lib/Bcfg2/Client/Tools/APT.py @@ -6,22 +6,7 @@ warnings.filterwarnings("ignore", "apt API not stable yet", FutureWarning) import apt.cache import os - import Bcfg2.Client.Tools -import Bcfg2.Options - -# Options for tool locations -opts = {'install_path': Bcfg2.Options.CLIENT_APT_TOOLS_INSTALL_PATH, - 'var_path': Bcfg2.Options.CLIENT_APT_TOOLS_VAR_PATH, - 'etc_path': Bcfg2.Options.CLIENT_SYSTEM_ETC_PATH} -setup = Bcfg2.Options.OptionParser(opts) -setup.parse([]) -install_path = setup['install_path'] -var_path = setup['var_path'] -etc_path = setup['etc_path'] -DEBSUMS = '%s/bin/debsums' % install_path -APTGET = '%s/bin/apt-get' % install_path -DPKG = '%s/bin/dpkg' % install_path class APT(Bcfg2.Client.Tools.Tool): """The Debian toolset implements package and service operations and inherits @@ -29,18 +14,26 @@ class APT(Bcfg2.Client.Tools.Tool): """ name = 'APT' - __execs__ = [DEBSUMS, APTGET, DPKG] + __execs__ = [] __handles__ = [('Package', 'deb'), ('Path', 'ignore')] __req__ = {'Package': ['name', 'version'], 'Path': ['type']} def __init__(self, logger, setup, config): Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + + self.install_path = setup.get('apt_install_path', '/usr') + self.var_path = setup.get('apt_var_path', '/var') + self.etc_path = setup.get('apt_etc_path', '/etc') + self.debsums = '%s/bin/debsums' % self.install_path + self.aptget = '%s/bin/apt-get' % self.install_path + self.dpkg = '%s/bin/dpkg' % self.install_path + self.__execs__ = [self.debsums, self.aptget, self.dpkg] + path_entries = os.environ['PATH'].split(':') for reqdir in ['/sbin', '/usr/sbin']: if reqdir not in path_entries: os.environ['PATH'] = os.environ['PATH'] + ':' + reqdir - self.pkgcmd = '%s ' % APTGET + \ - '-o DPkg::Options::=--force-overwrite ' + \ + self.pkgcmd = '%s ' % self.aptget + \ '-o DPkg::Options::=--force-confold ' + \ '-o DPkg::Options::=--force-confmiss ' + \ '--reinstall ' + \ @@ -53,21 +46,21 @@ class APT(Bcfg2.Client.Tools.Tool): if entry.tag == 'Path' and \ entry.get('type') == 'ignore'] self.__important__ = self.__important__ + \ - ["%s/cache/debconf/config.dat" % var_path, - "%s/cache/debconf/templates.dat" % var_path, + ["%s/cache/debconf/config.dat" % self.var_path, + "%s/cache/debconf/templates.dat" % self.var_path, '/etc/passwd', '/etc/group', - '%s/apt/apt.conf' % etc_path, - '%s/dpkg/dpkg.cfg' % etc_path] + \ + '%s/apt/apt.conf' % self.etc_path, + '%s/dpkg/dpkg.cfg' % self.etc_path] + \ [entry.get('name') for struct in config for entry in struct \ if entry.tag == 'Path' and \ - entry.get('name').startswith('%s/apt/sources.list' % etc_path)] + entry.get('name').startswith('%s/apt/sources.list' % self.etc_path)] self.nonexistent = [entry.get('name') for struct in config for entry in struct \ if entry.tag == 'Path' and entry.get('type') == 'nonexistent'] os.environ["DEBIAN_FRONTEND"] = 'noninteractive' self.actions = {} if self.setup['kevlar'] and not self.setup['dryrun']: - self.cmd.run("%s --force-confold --configure --pending" % DPKG) - self.cmd.run("%s clean" % APTGET) + self.cmd.run("%s --force-confold --configure --pending" % self.dpkg) + self.cmd.run("%s clean" % self.aptget) try: self.pkg_cache = apt.cache.Cache() except SystemError: @@ -95,7 +88,8 @@ class APT(Bcfg2.Client.Tools.Tool): for (name, version) in extras] def VerifyDebsums(self, entry, modlist): - output = self.cmd.run("%s -as %s" % (DEBSUMS, entry.get('name')))[1] + output = self.cmd.run("%s -as %s" % (self.debsums, + entry.get('name')))[1] if len(output) == 1 and "no md5sums for" in output[0]: self.logger.info("Package %s has no md5sums. Cannot verify" % \ entry.get('name')) diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py index dc49347e9..52d4e6a3f 100644 --- a/src/lib/Bcfg2/Client/Tools/Action.py +++ b/src/lib/Bcfg2/Client/Tools/Action.py @@ -2,6 +2,7 @@ import Bcfg2.Client.Tools from Bcfg2.Client.Frame import matches_white_list, passes_black_list +from Bcfg2.Bcfg2Py3k import input """ <Action timing='pre|post|both' @@ -44,11 +45,7 @@ class Action(Bcfg2.Client.Tools.Tool): if self.setup['interactive']: prompt = ('Run Action %s, %s: (y/N): ' % (entry.get('name'), entry.get('command'))) - # py3k compatibility - try: - ans = raw_input(prompt) - except NameError: - ans = input(prompt) + ans = input(prompt) if ans not in ['y', 'Y']: return False if self.setup['servicemode'] == 'build': @@ -64,7 +61,7 @@ class Action(Bcfg2.Client.Tools.Tool): else: return rc == 0 else: - self.logger.debug("In dryrun mode: not running action:\n %s" % + self.logger.debug("In dryrun mode: not running action: %s" % (entry.get('name'))) return False diff --git a/src/lib/Bcfg2/Client/Tools/Chkconfig.py b/src/lib/Bcfg2/Client/Tools/Chkconfig.py index 12ea5f132..0169b12da 100644 --- a/src/lib/Bcfg2/Client/Tools/Chkconfig.py +++ b/src/lib/Bcfg2/Client/Tools/Chkconfig.py @@ -45,30 +45,14 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool): except IndexError: onlevels = [] + pstatus = self.check_service(entry) if entry.get('status') == 'on': - status = (len(onlevels) > 0) + status = (len(onlevels) > 0 and pstatus) command = 'start' else: - status = (len(onlevels) == 0) + status = (len(onlevels) == 0 and not pstatus) command = 'stop' - if entry.get('mode', 'default') == 'supervised': - # turn on or off the service in supervised mode - pstatus = self.cmd.run('/sbin/service %s status' % \ - entry.get('name'))[0] - needs_modification = ((command == 'start' and pstatus) or \ - (command == 'stop' and not pstatus)) - if (not self.setup.get('dryrun') and - self.setup['servicemode'] != 'disabled' and - needs_modification): - self.cmd.run(self.get_svc_command(entry, command)) - # service was modified, so it failed - pstatus = False - - # chkconfig/init.d service - if entry.get('status') == 'on': - status = status and not pstatus - if not status: if entry.get('status') == 'on': entry.set('current_status', 'off') @@ -78,22 +62,22 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool): def InstallService(self, entry): """Install Service entry.""" - # don't take any actions for mode='manual' - if entry.get('mode', 'default') == 'manual': - self.logger.info("Service %s mode set to manual. Skipping " - "installation." % (entry.get('name'))) - return False rcmd = "/sbin/chkconfig %s %s" self.cmd.run("/sbin/chkconfig --add %s" % (entry.attrib['name'])) self.logger.info("Installing Service %s" % (entry.get('name'))) - pass1 = True + rv = True if entry.get('status') == 'off': - rc = self.cmd.run(rcmd % (entry.get('name'), - entry.get('status')) + \ - " --level 0123456")[0] - pass1 = rc == 0 - rc = self.cmd.run(rcmd % (entry.get('name'), entry.get('status')))[0] - return pass1 and rc == 0 + rv &= self.cmd.run(rcmd + " --level 0123456" % + (entry.get('name'), + entry.get('status')))[0] == 0 + if entry.get("current_status") == "on": + rv &= self.stop_service(entry) + else: + rv &= self.cmd.run(rcmd % (entry.get('name'), + entry.get('status')))[0] == 0 + if entry.get("current_status") == "off": + rv &= self.start_service(entry) + return rv def FindExtra(self): """Locate extra chkconfig Services.""" diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py index ca6fc439e..7d5af1127 100644 --- a/src/lib/Bcfg2/Client/Tools/DebInit.py +++ b/src/lib/Bcfg2/Client/Tools/DebInit.py @@ -76,11 +76,6 @@ class DebInit(Bcfg2.Client.Tools.SvcTool): def InstallService(self, entry): """Install Service for entry.""" - # don't take any actions for mode='manual' - if entry.get('mode', 'default') == 'manual': - self.logger.info("Service %s mode set to manual. Skipping " - "installation." % (entry.get('name'))) - return False self.logger.info("Installing Service %s" % (entry.get('name'))) try: os.stat('/etc/init.d/%s' % entry.get('name')) diff --git a/src/lib/Bcfg2/Client/Tools/OpenCSW.py b/src/lib/Bcfg2/Client/Tools/OpenCSW.py new file mode 100644 index 000000000..6aafe316f --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/OpenCSW.py @@ -0,0 +1,33 @@ +# This is the bcfg2 support for opencsw packages (pkgutil) +"""This provides Bcfg2 support for OpenCSW packages.""" + +import tempfile +import Bcfg2.Client.Tools.SYSV + + +class OpenCSW(Bcfg2.Client.Tools.SYSV.SYSV): + """Support for OpenCSW packages.""" + pkgtype = 'opencsw' + pkgtool = ("/opt/csw/bin/pkgutil -y -i %s", ("%s", ["bname"])) + name = 'OpenCSW' + __execs__ = ['/opt/csw/bin/pkgutil', "/usr/bin/pkginfo"] + __handles__ = [('Package', 'opencsw')] + __ireq__ = {'Package': ['name', 'version', 'bname']} + + def __init__(self, logger, setup, config): + # dont use the sysv constructor + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + noaskfile = tempfile.NamedTemporaryFile() + self.noaskname = noaskfile.name + try: + noaskfile.write(Bcfg2.Client.Tools.SYSV.noask) + except: + pass + + # VerifyPackage comes from Bcfg2.Client.Tools.SYSV + # Install comes from Bcfg2.Client.Tools.PkgTool + # Extra comes from Bcfg2.Client.Tools.Tool + # Remove comes from Bcfg2.Client.Tools.SYSV + def FindExtraPackages(self): + """Pass through to null FindExtra call.""" + return [] diff --git a/src/lib/Bcfg2/Client/Tools/POSIX.py b/src/lib/Bcfg2/Client/Tools/POSIX.py deleted file mode 100644 index 0d67dbbab..000000000 --- a/src/lib/Bcfg2/Client/Tools/POSIX.py +++ /dev/null @@ -1,943 +0,0 @@ -"""All POSIX Type client support for Bcfg2.""" - -import binascii -from datetime import datetime -import difflib -import errno -import grp -import logging -import os -import pwd -import shutil -import stat -import sys -import time -# py3k compatibility -if sys.hexversion >= 0x03000000: - unicode = str - -import Bcfg2.Client.Tools -import Bcfg2.Options -from Bcfg2.Client import XML - -log = logging.getLogger('POSIX') - -# map between dev_type attribute and stat constants -device_map = {'block': stat.S_IFBLK, - 'char': stat.S_IFCHR, - 'fifo': stat.S_IFIFO} - - -def calcPerms(initial, perms): - """This compares ondisk permissions with specified ones.""" - pdisp = [{1:stat.S_ISVTX, 2:stat.S_ISGID, 4:stat.S_ISUID}, - {1:stat.S_IXUSR, 2:stat.S_IWUSR, 4:stat.S_IRUSR}, - {1:stat.S_IXGRP, 2:stat.S_IWGRP, 4:stat.S_IRGRP}, - {1:stat.S_IXOTH, 2:stat.S_IWOTH, 4:stat.S_IROTH}] - tempperms = initial - if len(perms) == 3: - perms = '0%s' % (perms) - pdigits = [int(perms[digit]) for digit in range(4)] - for index in range(4): - for (num, perm) in list(pdisp[index].items()): - if pdigits[index] & num: - tempperms |= perm - return tempperms - - -def normGid(entry): - """ - This takes a group name or gid and - returns the corresponding gid or False. - """ - try: - try: - return int(entry.get('group')) - except: - return int(grp.getgrnam(entry.get('group'))[2]) - except (OSError, KeyError): - log.error('GID normalization failed for %s. Does group %s exist?' - % (entry.get('name'), entry.get('group'))) - return False - - -def normUid(entry): - """ - This takes a user name or uid and - returns the corresponding uid or False. - """ - try: - try: - return int(entry.get('owner')) - except: - return int(pwd.getpwnam(entry.get('owner'))[2]) - except (OSError, KeyError): - log.error('UID normalization failed for %s. Does owner %s exist?' - % (entry.get('name'), entry.get('owner'))) - return False - - -def isString(strng, encoding): - """ - Returns true if the string contains no ASCII control characters - and can be decoded from the specified encoding. - """ - for char in strng: - if ord(char) < 9 or ord(char) > 13 and ord(char) < 32: - return False - try: - strng.decode(encoding) - return True - except: - return False - - -class POSIX(Bcfg2.Client.Tools.Tool): - """POSIX File support code.""" - name = 'POSIX' - __handles__ = [('Path', 'device'), - ('Path', 'directory'), - ('Path', 'file'), - ('Path', 'hardlink'), - ('Path', 'nonexistent'), - ('Path', 'permissions'), - ('Path', 'symlink')] - __req__ = {'Path': ['name', 'type']} - - # grab paranoid options from /etc/bcfg2.conf - opts = {'ppath': Bcfg2.Options.PARANOID_PATH, - 'max_copies': Bcfg2.Options.PARANOID_MAX_COPIES} - setup = Bcfg2.Options.OptionParser(opts) - setup.parse([]) - ppath = setup['ppath'] - max_copies = setup['max_copies'] - - def canInstall(self, entry): - """Check if entry is complete for installation.""" - if Bcfg2.Client.Tools.Tool.canInstall(self, entry): - if (entry.tag, - entry.get('type'), - entry.text, - entry.get('empty', 'false')) == ('Path', - 'file', - None, - 'false'): - return False - return True - else: - return False - - def gatherCurrentData(self, entry): - if entry.tag == 'Path' and entry.get('type') == 'file': - try: - ondisk = os.stat(entry.get('name')) - except OSError: - entry.set('current_exists', 'false') - self.logger.debug("%s %s does not exist" % - (entry.tag, entry.get('name'))) - return False - try: - entry.set('current_owner', str(ondisk[stat.ST_UID])) - entry.set('current_group', str(ondisk[stat.ST_GID])) - except (OSError, KeyError): - pass - entry.set('perms', str(oct(ondisk[stat.ST_MODE])[-4:])) - - def Verifydevice(self, entry, _): - """Verify device entry.""" - if entry.get('dev_type') == None or \ - entry.get('owner') == None or \ - entry.get('group') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % (entry.get('name'))) - return False - if entry.get('dev_type') in ['block', 'char']: - # check if major/minor are properly specified - if entry.get('major') == None or \ - entry.get('minor') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % (entry.get('name'))) - return False - try: - # check for file existence - filestat = os.stat(entry.get('name')) - except OSError: - entry.set('current_exists', 'false') - self.logger.debug("%s %s does not exist" % - (entry.tag, entry.get('name'))) - return False - - try: - # attempt to verify device properties as specified in config - dev_type = entry.get('dev_type') - mode = calcPerms(device_map[dev_type], - entry.get('mode', '0600')) - owner = normUid(entry) - group = normGid(entry) - if dev_type in ['block', 'char']: - # check for incompletely specified entries - if entry.get('major') == None or \ - entry.get('minor') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % (entry.get('name'))) - return False - major = int(entry.get('major')) - minor = int(entry.get('minor')) - if major == os.major(filestat.st_rdev) and \ - minor == os.minor(filestat.st_rdev) and \ - mode == filestat.st_mode and \ - owner == filestat.st_uid and \ - group == filestat.st_gid: - return True - else: - return False - elif dev_type == 'fifo' and \ - mode == filestat.st_mode and \ - owner == filestat.st_uid and \ - group == filestat.st_gid: - return True - else: - self.logger.info('Device properties for %s incorrect' % \ - entry.get('name')) - return False - except OSError: - self.logger.debug("%s %s failed to verify" % - (entry.tag, entry.get('name'))) - return False - - def Installdevice(self, entry): - """Install device entries.""" - try: - # check for existing paths and remove them - os.lstat(entry.get('name')) - try: - os.unlink(entry.get('name')) - exists = False - except OSError: - self.logger.info('Failed to unlink %s' % \ - entry.get('name')) - return False - except OSError: - exists = False - - if not exists: - try: - dev_type = entry.get('dev_type') - mode = calcPerms(device_map[dev_type], - entry.get('mode', '0600')) - if dev_type in ['block', 'char']: - # check if major/minor are properly specified - if entry.get('major') == None or \ - entry.get('minor') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % (entry.get('name'))) - return False - major = int(entry.get('major')) - minor = int(entry.get('minor')) - device = os.makedev(major, minor) - os.mknod(entry.get('name'), mode, device) - else: - os.mknod(entry.get('name'), mode) - """ - Python uses the OS mknod(2) implementation which modifies the - mode based on the umask of the running process. Therefore, the - following chmod(2) call is needed to make sure the permissions - are set as specified by the user. - """ - os.chmod(entry.get('name'), mode) - os.chown(entry.get('name'), normUid(entry), normGid(entry)) - return True - except KeyError: - self.logger.error('Failed to install %s' % entry.get('name')) - except OSError: - self.logger.error('Failed to install %s' % entry.get('name')) - return False - - def Verifydirectory(self, entry, modlist): - """Verify Path type='directory' entry.""" - if entry.get('perms') == None or \ - entry.get('owner') == None or \ - entry.get('group') == None: - self.logger.error("POSIX: Entry %s not completely specified. " - "Try running bcfg2-lint." % (entry.get('name'))) - return False - while len(entry.get('perms', '')) < 4: - entry.set('perms', '0' + entry.get('perms', '')) - try: - ondisk = os.stat(entry.get('name')) - except OSError: - entry.set('current_exists', 'false') - self.logger.info("POSIX: %s %s does not exist" % - (entry.tag, entry.get('name'))) - return False - try: - owner = str(ondisk[stat.ST_UID]) - group = str(ondisk[stat.ST_GID]) - except (OSError, KeyError): - self.logger.info("POSIX: User/Group resolution failed " - "for path %s" % entry.get('name')) - owner = 'root' - group = '0' - finfo = os.stat(entry.get('name')) - perms = oct(finfo[stat.ST_MODE])[-4:] - if entry.get('mtime', '-1') != '-1': - mtime = str(finfo[stat.ST_MTIME]) - else: - mtime = '-1' - pTrue = ((owner == str(normUid(entry))) and - (group == str(normGid(entry))) and - (perms == entry.get('perms')) and - (mtime == entry.get('mtime', '-1'))) - - pruneTrue = True - ex_ents = [] - if entry.get('prune', 'false') == 'true' \ - and (entry.tag == 'Path' and entry.get('type') == 'directory'): - # check for any extra entries when prune='true' attribute is set - try: - entries = ['/'.join([entry.get('name'), ent]) \ - for ent in os.listdir(entry.get('name'))] - ex_ents = [e for e in entries if e not in modlist] - if ex_ents: - pruneTrue = False - self.logger.info("POSIX: Directory %s contains " - "extra entries:" % entry.get('name')) - self.logger.info(ex_ents) - nqtext = entry.get('qtext', '') + '\n' - nqtext += "Directory %s contains extra entries: " % \ - entry.get('name') - nqtext += ":".join(ex_ents) - entry.set('qtest', nqtext) - [entry.append(XML.Element('Prune', path=x)) \ - for x in ex_ents] - except OSError: - ex_ents = [] - pruneTrue = True - - if not pTrue: - if owner != str(normUid(entry)): - entry.set('current_owner', owner) - self.logger.debug("%s %s ownership wrong" % \ - (entry.tag, entry.get('name'))) - nqtext = entry.get('qtext', '') + '\n' - nqtext += "%s owner wrong. is %s should be %s" % \ - (entry.get('name'), owner, entry.get('owner')) - entry.set('qtext', nqtext) - if group != str(normGid(entry)): - entry.set('current_group', group) - self.logger.debug("%s %s group wrong" % \ - (entry.tag, entry.get('name'))) - nqtext = entry.get('qtext', '') + '\n' - nqtext += "%s group is %s should be %s" % \ - (entry.get('name'), group, entry.get('group')) - entry.set('qtext', nqtext) - if perms != entry.get('perms'): - entry.set('current_perms', perms) - self.logger.debug("%s %s permissions are %s should be %s" % - (entry.tag, - entry.get('name'), - perms, - entry.get('perms'))) - nqtext = entry.get('qtext', '') + '\n' - nqtext += "%s %s perms are %s should be %s" % \ - (entry.tag, - entry.get('name'), - perms, - entry.get('perms')) - entry.set('qtext', nqtext) - if mtime != entry.get('mtime', '-1'): - entry.set('current_mtime', mtime) - self.logger.debug("%s %s mtime is %s should be %s" \ - % (entry.tag, entry.get('name'), mtime, - entry.get('mtime'))) - nqtext = entry.get('qtext', '') + '\n' - nqtext += "%s mtime is %s should be %s" % \ - (entry.get('name'), mtime, entry.get('mtime')) - entry.set('qtext', nqtext) - if entry.get('type') != 'file': - nnqtext = entry.get('qtext') - nnqtext += '\nInstall %s %s: (y/N) ' % (entry.get('type'), - entry.get('name')) - entry.set('qtext', nnqtext) - return pTrue and pruneTrue - - def Installdirectory(self, entry): - """Install Path type='directory' entry.""" - if entry.get('perms') == None or \ - entry.get('owner') == None or \ - entry.get('group') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % \ - (entry.get('name'))) - return False - self.logger.info("Installing directory %s" % (entry.get('name'))) - try: - fmode = os.lstat(entry.get('name')) - if not stat.S_ISDIR(fmode[stat.ST_MODE]): - self.logger.debug("Found a non-directory entry at %s" % \ - (entry.get('name'))) - try: - os.unlink(entry.get('name')) - exists = False - except OSError: - self.logger.info("Failed to unlink %s" % \ - (entry.get('name'))) - return False - else: - self.logger.debug("Found a pre-existing directory at %s" % \ - (entry.get('name'))) - exists = True - except OSError: - # stat failed - exists = False - - if not exists: - parent = "/".join(entry.get('name').split('/')[:-1]) - if parent: - try: - os.stat(parent) - except: - self.logger.debug('Creating parent path for directory %s' % (entry.get('name'))) - for idx in range(len(parent.split('/')[:-1])): - current = '/'+'/'.join(parent.split('/')[1:2+idx]) - try: - sloc = os.stat(current) - except OSError: - try: - os.mkdir(current) - continue - except OSError: - return False - if not stat.S_ISDIR(sloc[stat.ST_MODE]): - try: - os.unlink(current) - os.mkdir(current) - except OSError: - return False - - try: - os.mkdir(entry.get('name')) - except OSError: - self.logger.error('Failed to create directory %s' % \ - (entry.get('name'))) - return False - if entry.get('prune', 'false') == 'true' and entry.get("qtest"): - for pent in entry.findall('Prune'): - pname = pent.get('path') - ulfailed = False - if os.path.isdir(pname): - self.logger.info("Not removing extra directory %s, " - "please check and remove manually" % pname) - continue - try: - self.logger.debug("Unlinking file %s" % pname) - os.unlink(pname) - except OSError: - self.logger.error("Failed to unlink path %s" % pname) - ulfailed = True - if ulfailed: - return False - return self.Installpermissions(entry) - - def Verifyfile(self, entry, _): - """Verify Path type='file' entry.""" - # permissions check + content check - permissionStatus = self.Verifydirectory(entry, _) - tbin = False - if entry.text == None and entry.get('empty', 'false') == 'false': - self.logger.error("Cannot verify incomplete Path type='%s' %s" % - (entry.get('type'), entry.get('name'))) - return False - if entry.get('encoding', 'ascii') == 'base64': - tempdata = binascii.a2b_base64(entry.text) - tbin = True - elif entry.get('empty', 'false') == 'true': - tempdata = '' - else: - tempdata = entry.text - if type(tempdata) == unicode: - try: - tempdata = tempdata.encode(self.setup['encoding']) - except UnicodeEncodeError: - e = sys.exc_info()[1] - self.logger.error("Error encoding file %s:\n %s" % \ - (entry.get('name'), e)) - - different = False - content = None - if not os.path.exists(entry.get("name")): - # first, see if the target file exists at all; if not, - # they're clearly different - different = True - content = "" - else: - # next, see if the size of the target file is different - # from the size of the desired content - try: - estat = os.stat(entry.get('name')) - except OSError: - err = sys.exc_info()[1] - self.logger.error("Failed to stat %s: %s" % - (err.filename, err)) - return False - if len(tempdata) != estat[stat.ST_SIZE]: - different = True - else: - # finally, read in the target file and compare them - # directly. comparison could be done with a checksum, - # which might be faster for big binary files, but - # slower for everything else - try: - content = open(entry.get('name')).read() - except IOError: - err = sys.exc_info()[1] - self.logger.error("Failed to read %s: %s" % - (err.filename, err)) - return False - different = content != tempdata - - if different: - if self.setup['interactive']: - prompt = [entry.get('qtext', '')] - if not tbin and content is None: - # it's possible that we figured out the files are - # different without reading in the local file. if - # the supplied version of the file is not binary, - # we now have to read in the local file to figure - # out if _it_ is binary, and either include that - # fact or the diff in our prompts for -I - try: - content = open(entry.get('name')).read() - except IOError: - err = sys.exc_info()[1] - self.logger.error("Failed to read %s: %s" % - (err.filename, err)) - return False - if tbin or not isString(content, self.setup['encoding']): - # don't compute diffs if the file is binary - prompt.append('Binary file, no printable diff') - else: - diff = self._diff(content, tempdata, - difflib.unified_diff, - filename=entry.get("name")) - if diff: - udiff = '\n'.join(diff) - try: - prompt.append(udiff.decode(self.setup['encoding'])) - except UnicodeDecodeError: - prompt.append("Binary file, no printable diff") - else: - prompt.append("Diff took too long to compute, no " - "printable diff") - prompt.append("Install %s %s: (y/N): " % (entry.tag, - entry.get('name'))) - entry.set("qtext", "\n".join(prompt)) - - if entry.get('sensitive', 'false').lower() != 'true': - if content is None: - # it's possible that we figured out the files are - # different without reading in the local file. we - # now have to read in the local file to figure out - # if _it_ is binary, and either include the whole - # file or the diff for reports - try: - content = open(entry.get('name')).read() - except IOError: - err = sys.exc_info()[1] - self.logger.error("Failed to read %s: %s" % - (err.filename, err)) - return False - - if tbin or not isString(content, self.setup['encoding']): - # don't compute diffs if the file is binary - entry.set('current_bfile', binascii.b2a_base64(content)) - else: - diff = self._diff(content, tempdata, difflib.ndiff, - filename=entry.get("name")) - if diff: - entry.set("current_bdiff", - binascii.b2a_base64("\n".join(diff))) - elif not tbin and isString(content, self.setup['encoding']): - entry.set('current_bfile', binascii.b2a_base64(content)) - elif permissionStatus == False and self.setup['interactive']: - prompt = [entry.get('qtext', '')] - prompt.append("Install %s %s: (y/N): " % (entry.tag, - entry.get('name'))) - entry.set("qtext", "\n".join(prompt)) - - - return permissionStatus and not different - - def Installfile(self, entry): - """Install Path type='file' entry.""" - self.logger.info("Installing file %s" % (entry.get('name'))) - - parent = "/".join(entry.get('name').split('/')[:-1]) - if parent: - try: - os.stat(parent) - except: - self.logger.debug('Creating parent path for config file %s' % \ - (entry.get('name'))) - current = '/' - for next in parent.split('/')[1:]: - current += next + '/' - try: - sloc = os.stat(current) - try: - if not stat.S_ISDIR(sloc[stat.ST_MODE]): - self.logger.debug('%s is not a directory; recreating' \ - % (current)) - os.unlink(current) - os.mkdir(current) - except OSError: - return False - except OSError: - try: - self.logger.debug("Creating non-existent path %s" % current) - os.mkdir(current) - except OSError: - return False - - # If we get here, then the parent directory should exist - if (entry.get("paranoid", False) in ['true', 'True']) and \ - self.setup.get("paranoid", False) and not \ - (entry.get('current_exists', 'true') == 'false'): - bkupnam = entry.get('name').replace('/', '_') - # current list of backups for this file - try: - bkuplist = [f for f in os.listdir(self.ppath) if - f.startswith(bkupnam)] - except OSError: - e = sys.exc_info()[1] - self.logger.error("Failed to create backup list in %s: %s" % - (self.ppath, e.strerror)) - return False - bkuplist.sort() - while len(bkuplist) >= int(self.max_copies): - # remove the oldest backup available - oldest = bkuplist.pop(0) - self.logger.info("Removing %s" % oldest) - try: - os.remove("%s/%s" % (self.ppath, oldest)) - except: - self.logger.error("Failed to remove %s/%s" % \ - (self.ppath, oldest)) - return False - try: - # backup existing file - shutil.copy(entry.get('name'), - "%s/%s_%s" % (self.ppath, bkupnam, - datetime.isoformat(datetime.now()))) - self.logger.info("Backup of %s saved to %s" % - (entry.get('name'), self.ppath)) - except IOError: - e = sys.exc_info()[1] - self.logger.error("Failed to create backup file for %s" % \ - (entry.get('name'))) - self.logger.error(e) - return False - try: - newfile = open("%s.new"%(entry.get('name')), 'w') - if entry.get('encoding', 'ascii') == 'base64': - filedata = binascii.a2b_base64(entry.text) - elif entry.get('empty', 'false') == 'true': - filedata = '' - else: - if type(entry.text) == unicode: - filedata = entry.text.encode(self.setup['encoding']) - else: - filedata = entry.text - newfile.write(filedata) - newfile.close() - try: - os.chown(newfile.name, normUid(entry), normGid(entry)) - except KeyError: - self.logger.error("Failed to chown %s to %s:%s" % - (newfile.name, entry.get('owner'), - entry.get('group'))) - os.chown(newfile.name, 0, 0) - except OSError: - err = sys.exc_info()[1] - self.logger.error("Could not chown %s: %s" % (newfile.name, - err)) - os.chmod(newfile.name, calcPerms(stat.S_IFREG, entry.get('perms'))) - os.rename(newfile.name, entry.get('name')) - if entry.get('mtime', '-1') != '-1': - try: - os.utime(entry.get('name'), (int(entry.get('mtime')), - int(entry.get('mtime')))) - except: - self.logger.error("File %s mtime fix failed" \ - % (entry.get('name'))) - return False - return True - except (OSError, IOError): - err = sys.exc_info()[1] - if err.errno == errno.EACCES: - self.logger.info("Failed to open %s for writing" % (entry.get('name'))) - else: - print(err) - return False - - def Verifyhardlink(self, entry, _): - """Verify HardLink entry.""" - if entry.get('to') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % \ - (entry.get('name'))) - return False - try: - if os.path.samefile(entry.get('name'), entry.get('to')): - return True - self.logger.debug("Hardlink %s is incorrect" % \ - entry.get('name')) - entry.set('qtext', "Link %s to %s? [y/N] " % \ - (entry.get('name'), - entry.get('to'))) - return False - except OSError: - entry.set('current_exists', 'false') - entry.set('qtext', "Link %s to %s? [y/N] " % \ - (entry.get('name'), - entry.get('to'))) - return False - - def Installhardlink(self, entry): - """Install HardLink entry.""" - if entry.get('to') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % \ - (entry.get('name'))) - return False - self.logger.info("Installing Hardlink %s" % (entry.get('name'))) - if os.path.lexists(entry.get('name')): - try: - fmode = os.lstat(entry.get('name'))[stat.ST_MODE] - if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode): - self.logger.debug("Non-directory entry already exists at " - "%s. Unlinking entry." % (entry.get('name'))) - os.unlink(entry.get('name')) - elif stat.S_ISDIR(fmode): - self.logger.debug("Directory already exists at %s" % \ - (entry.get('name'))) - self.cmd.run("mv %s/ %s.bak" % \ - (entry.get('name'), - entry.get('name'))) - else: - os.unlink(entry.get('name')) - except OSError: - self.logger.info("Hardlink %s cleanup failed" % \ - (entry.get('name'))) - try: - os.link(entry.get('to'), entry.get('name')) - return True - except OSError: - return False - - def Verifynonexistent(self, entry, _): - """Verify nonexistent entry.""" - # return true if path does _not_ exist - return not os.path.lexists(entry.get('name')) - - def Installnonexistent(self, entry): - '''Remove nonexistent entries''' - ename = entry.get('name') - if entry.get('recursive') in ['True', 'true']: - # ensure that configuration spec is consistent first - if [e for e in self.buildModlist() \ - if e.startswith(ename) and e != ename]: - self.logger.error('Not installing %s. One or more files ' - 'in this directory are specified in ' - 'your configuration.' % ename) - return False - try: - shutil.rmtree(ename) - except OSError: - e = sys.exc_info()[1] - self.logger.error('Failed to remove %s: %s' % (ename, - e.strerror)) - else: - if os.path.isdir(ename): - try: - os.rmdir(ename) - return True - except OSError: - e = sys.exc_info()[1] - self.logger.error('Failed to remove %s: %s' % (ename, - e.strerror)) - return False - try: - os.remove(ename) - return True - except OSError: - e = sys.exc_info()[1] - self.logger.error('Failed to remove %s: %s' % (ename, - e.strerror)) - return False - - def Verifypermissions(self, entry, _): - """Verify Path type='permissions' entry""" - if entry.get('perms') == None or \ - entry.get('owner') == None or \ - entry.get('group') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % (entry.get('name'))) - return False - if entry.get('recursive') in ['True', 'true']: - # verify ownership information recursively - owner = normUid(entry) - group = normGid(entry) - - for root, dirs, files in os.walk(entry.get('name')): - for p in dirs + files: - path = os.path.join(root, p) - pstat = os.stat(path) - if owner != pstat.st_uid: - # owner mismatch for path - entry.set('current_owner', str(pstat.st_uid)) - self.logger.debug("%s %s ownership wrong" % \ - (entry.tag, path)) - nqtext = entry.get('qtext', '') + '\n' - nqtext += ("Owner for path %s is incorrect. " - "Current owner is %s but should be %s\n" % \ - (path, pstat.st_uid, entry.get('owner'))) - nqtext += ("\nInstall %s %s: (y/N): " % - (entry.tag, entry.get('name'))) - entry.set('qtext', nqtext) - return False - if group != pstat.st_gid: - # group mismatch for path - entry.set('current_group', str(pstat.st_gid)) - self.logger.debug("%s %s group wrong" % \ - (entry.tag, path)) - nqtext = entry.get('qtext', '') + '\n' - nqtext += ("Group for path %s is incorrect. " - "Current group is %s but should be %s\n" % \ - (path, pstat.st_gid, entry.get('group'))) - nqtext += ("\nInstall %s %s: (y/N): " % - (entry.tag, entry.get('name'))) - entry.set('qtext', nqtext) - return False - return self.Verifydirectory(entry, _) - - def _diff(self, content1, content2, difffunc, filename=None): - rv = [] - start = time.time() - longtime = False - for diffline in difffunc(content1.split('\n'), - content2.split('\n')): - now = time.time() - rv.append(diffline) - if now - start > 5 and not longtime: - if filename: - self.logger.info("Diff of %s taking a long time" % - filename) - else: - self.logger.info("Diff taking a long time") - longtime = True - elif now - start > 30: - if filename: - self.logger.error("Diff of %s took too long; giving up" % - filename) - else: - self.logger.error("Diff took too long; giving up") - return False - return rv - - def Installpermissions(self, entry): - """Install POSIX permissions""" - if entry.get('perms') == None or \ - entry.get('owner') == None or \ - entry.get('group') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % (entry.get('name'))) - return False - plist = [entry.get('name')] - if entry.get('recursive') in ['True', 'true']: - # verify ownership information recursively - owner = normUid(entry) - group = normGid(entry) - - for root, dirs, files in os.walk(entry.get('name')): - for p in dirs + files: - path = os.path.join(root, p) - pstat = os.stat(path) - if owner != pstat.st_uid or group != pstat.st_gid: - # owner mismatch for path - plist.append(path) - try: - for p in plist: - os.chown(p, normUid(entry), normGid(entry)) - os.chmod(p, calcPerms(stat.S_IFDIR, entry.get('perms'))) - return True - except (OSError, KeyError): - self.logger.error('Permission fixup failed for %s' % \ - (entry.get('name'))) - return False - - def Verifysymlink(self, entry, _): - """Verify Path type='symlink' entry.""" - if entry.get('to') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % \ - (entry.get('name'))) - return False - try: - sloc = os.readlink(entry.get('name')) - if sloc == entry.get('to'): - return True - self.logger.debug("Symlink %s points to %s, should be %s" % \ - (entry.get('name'), sloc, entry.get('to'))) - entry.set('current_to', sloc) - entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'), - entry.get('to'))) - return False - except OSError: - entry.set('current_exists', 'false') - entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'), - entry.get('to'))) - return False - - def Installsymlink(self, entry): - """Install Path type='symlink' entry.""" - if entry.get('to') == None: - self.logger.error('Entry %s not completely specified. ' - 'Try running bcfg2-lint.' % \ - (entry.get('name'))) - return False - self.logger.info("Installing symlink %s" % (entry.get('name'))) - if os.path.lexists(entry.get('name')): - try: - fmode = os.lstat(entry.get('name'))[stat.ST_MODE] - if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode): - self.logger.debug("Non-directory entry already exists at " - "%s. Unlinking entry." % \ - (entry.get('name'))) - os.unlink(entry.get('name')) - elif stat.S_ISDIR(fmode): - self.logger.debug("Directory already exists at %s" %\ - (entry.get('name'))) - self.cmd.run("mv %s/ %s.bak" % \ - (entry.get('name'), - entry.get('name'))) - else: - os.unlink(entry.get('name')) - except OSError: - self.logger.info("Symlink %s cleanup failed" %\ - (entry.get('name'))) - try: - os.symlink(entry.get('to'), entry.get('name')) - return True - except OSError: - return False - - def InstallPath(self, entry): - """Dispatch install to the proper method according to type""" - ret = getattr(self, 'Install%s' % entry.get('type')) - return ret(entry) - - def VerifyPath(self, entry, _): - """Dispatch verify to the proper method according to type""" - ret = getattr(self, 'Verify%s' % entry.get('type')) - return ret(entry, _) diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py new file mode 100644 index 000000000..0ea4128f7 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py @@ -0,0 +1,66 @@ +import os +import sys +try: + from base import POSIXTool, device_map +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool, device_map") + +class POSIXDevice(POSIXTool): + __req__ = ['name', 'dev_type', 'perms', 'owner', 'group'] + + def fully_specified(self, entry): + if entry.get('dev_type') in ['block', 'char']: + # check if major/minor are properly specified + if (entry.get('major') == None or + entry.get('minor') == None): + return False + return True + + def verify(self, entry, modlist): + """Verify device entry.""" + ondisk = self._exists(entry) + if not ondisk: + return False + + # attempt to verify device properties as specified in config + rv = True + dev_type = entry.get('dev_type') + if dev_type in ['block', 'char']: + major = int(entry.get('major')) + minor = int(entry.get('minor')) + if major != os.major(ondisk.st_rdev): + msg = ("Major number for device %s is incorrect. " + "Current major is %s but should be %s" % + (entry.get("name"), os.major(ondisk.st_rdev), major)) + self.logger.debug('POSIX: ' + msg) + entry.set('qtext', entry.get('qtext', '') + "\n" + msg) + rv = False + + if minor != os.minor(ondisk.st_rdev): + msg = ("Minor number for device %s is incorrect. " + "Current minor is %s but should be %s" % + (entry.get("name"), os.minor(ondisk.st_rdev), minor)) + self.logger.debug('POSIX: ' + msg) + entry.set('qtext', entry.get('qtext', '') + "\n" + msg) + rv = False + return POSIXTool.verify(self, entry, modlist) and rv + + def install(self, entry): + if not self._exists(entry, remove=True): + try: + dev_type = entry.get('dev_type') + mode = device_map[dev_type] | int(entry.get('perms'), 8) + if dev_type in ['block', 'char']: + major = int(entry.get('major')) + minor = int(entry.get('minor')) + device = os.makedev(major, minor) + os.mknod(entry.get('name'), mode, device) + else: + os.mknod(entry.get('name'), mode) + except (KeyError, OSError, ValueError): + err = sys.exc_info()[1] + self.logger.error('POSIX: Failed to install %s: %s' % + (entry.get('name'), err)) + return False + return POSIXTool.install(self, entry) diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Directory.py b/src/lib/Bcfg2/Client/Tools/POSIX/Directory.py new file mode 100644 index 000000000..d2d383f66 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/Directory.py @@ -0,0 +1,90 @@ +import os +import sys +import stat +import shutil +import Bcfg2.Client.XML +try: + from base import POSIXTool +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool") + +class POSIXDirectory(POSIXTool): + __req__ = ['name', 'perms', 'owner', 'group'] + + def verify(self, entry, modlist): + ondisk = self._exists(entry) + if not ondisk: + return False + + if not stat.S_ISDIR(ondisk[stat.ST_MODE]): + self.logger.info("POSIX: %s is not a directory" % entry.get('name')) + return False + + pruneTrue = True + if entry.get('prune', 'false').lower() == 'true': + # check for any extra entries when prune='true' attribute is set + try: + extras = [os.path.join(entry.get('name'), ent) + for ent in os.listdir(entry.get('name')) + if os.path.join(entry.get('name'), + ent) not in modlist] + if extras: + pruneTrue = False + msg = "Directory %s contains extra entries: %s" % \ + (entry.get('name'), "; ".join(extras)) + self.logger.info("POSIX: " + msg) + entry.set('qtext', entry.get('qtext', '') + '\n' + msg) + for extra in extras: + Bcfg2.Client.XML.SubElement(entry, 'Prune', path=extra) + except OSError: + pruneTrue = True + + return POSIXTool.verify(self, entry, modlist) and pruneTrue + + def install(self, entry): + """Install device entries.""" + fmode = self._exists(entry) + + if fmode and not stat.S_ISDIR(fmode[stat.ST_MODE]): + self.logger.info("POSIX: Found a non-directory entry at %s, " + "removing" % entry.get('name')) + try: + os.unlink(entry.get('name')) + fmode = False + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to unlink %s: %s" % + (entry.get('name'), err)) + return False + elif fmode: + self.logger.debug("POSIX: Found a pre-existing directory at %s" % + entry.get('name')) + + rv = True + if not fmode: + rv &= self._makedirs(entry) + + if entry.get('prune', 'false') == 'true': + ulfailed = False + for pent in entry.findall('Prune'): + pname = pent.get('path') + ulfailed = False + if os.path.isdir(pname): + rm = shutil.rmtree + else: + rm = os.unlink + try: + self.logger.debug("POSIX: Removing %s" % pname) + rm(pname) + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to unlink %s: %s" % + (pname, err)) + ulfailed = True + if ulfailed: + # even if prune failed, we still want to install the + # entry to make sure that we get permissions and + # whatnot set + rv = False + return POSIXTool.install(self, entry) and rv diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/File.py b/src/lib/Bcfg2/Client/Tools/POSIX/File.py new file mode 100644 index 000000000..26550078e --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/File.py @@ -0,0 +1,225 @@ +import os +import sys +import stat +import time +import difflib +import tempfile +try: + from base import POSIXTool +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool") +from Bcfg2.Bcfg2Py3k import unicode, b64encode, b64decode + +class POSIXFile(POSIXTool): + __req__ = ['name', 'perms', 'owner', 'group'] + + def fully_specified(self, entry): + return entry.text is not None or entry.get('empty', 'false') == 'true' + + def _is_string(self, strng, encoding): + """ Returns true if the string contains no ASCII control + characters and can be decoded from the specified encoding. """ + for char in strng: + if ord(char) < 9 or ord(char) > 13 and ord(char) < 32: + return False + if not hasattr(strng, "decode"): + # py3k + return True + try: + strng.decode(encoding) + return True + except: + return False + + def _get_data(self, entry): + is_binary = False + if entry.get('encoding', 'ascii') == 'base64': + tempdata = b64decode(entry.text) + is_binary = True + + elif entry.get('empty', 'false') == 'true': + tempdata = '' + else: + tempdata = entry.text + if isinstance(tempdata, unicode) and unicode != str: + try: + tempdata = tempdata.encode(self.setup['encoding']) + except UnicodeEncodeError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Error encoding file %s: %s" % + (entry.get('name'), err)) + return (tempdata, is_binary) + + def verify(self, entry, modlist): + ondisk = self._exists(entry) + tempdata, is_binary = self._get_data(entry) + + different = False + content = None + if not ondisk: + # first, see if the target file exists at all; if not, + # they're clearly different + different = True + content = "" + elif len(tempdata) != ondisk[stat.ST_SIZE]: + # next, see if the size of the target file is different + # from the size of the desired content + different = True + else: + # finally, read in the target file and compare them + # directly. comparison could be done with a checksum, + # which might be faster for big binary files, but slower + # for everything else + try: + content = open(entry.get('name')).read() + except IOError: + self.logger.error("POSIX: Failed to read %s: %s" % + (entry.get("name"), sys.exc_info()[1])) + return False + different = content != tempdata + + if different: + self.logger.debug("POSIX: %s has incorrect contents" % + entry.get("name")) + self._get_diffs( + entry, interactive=self.setup['interactive'], + sensitive=entry.get('sensitive', 'false').lower() == 'true', + is_binary=is_binary, content=content) + return POSIXTool.verify(self, entry, modlist) and not different + + def _write_tmpfile(self, entry): + filedata, _ = self._get_data(entry) + # get a temp file to write to that is in the same directory as + # the existing file in order to preserve any permissions + # protections on that directory, and also to avoid issues with + # /tmp set nosetuid while creating files that are supposed to + # be setuid + try: + (newfd, newfile) = \ + tempfile.mkstemp(prefix=os.path.basename(entry.get("name")), + dir=os.path.dirname(entry.get("name"))) + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to create temp file in %s: %s" % + (os.path.dirname(entry.get('name')), err)) + return False + try: + os.fdopen(newfd, 'w').write(filedata) + except (OSError, IOError): + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to open temp file %s for writing " + "%s: %s" % + (newfile, entry.get("name"), err)) + return False + return newfile + + def _rename_tmpfile(self, newfile, entry): + try: + os.rename(newfile, entry.get('name')) + return True + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to rename temp file %s to %s: %s" % + (newfile, entry.get('name'), err)) + try: + os.unlink(newfile) + except: + err = sys.exc_info()[1] + self.logger.error("POSIX: Could not remove temp file %s: %s" % + (newfile, err)) + return False + + def install(self, entry): + """Install device entries.""" + if not os.path.exists(os.path.dirname(entry.get('name'))): + if not self._makedirs(entry, + path=os.path.dirname(entry.get('name'))): + return False + newfile = self._write_tmpfile(entry) + if not newfile: + return False + rv = self._set_perms(entry, path=newfile) + if not self._rename_tmpfile(newfile, entry): + return False + + return POSIXTool.install(self, entry) and rv + + def _get_diffs(self, entry, interactive=False, sensitive=False, + is_binary=False, content=None): + if not interactive and sensitive: + return + + prompt = [entry.get('qtext', '')] + attrs = dict() + if content is None: + # it's possible that we figured out the files are + # different without reading in the local file. if the + # supplied version of the file is not binary, we now have + # to read in the local file to figure out if _it_ is + # binary, and either include that fact or the diff in our + # prompts for -I and the reports + try: + content = open(entry.get('name')).read() + except IOError: + self.logger.error("POSIX: Failed to read %s: %s" % + (entry.get("name"), sys.exc_info()[1])) + return False + if not is_binary: + is_binary |= not self._is_string(content, self.setup['encoding']) + if is_binary: + # don't compute diffs if the file is binary + prompt.append('Binary file, no printable diff') + attrs['current_bfile'] = b64encode(content) + else: + if interactive: + diff = self._diff(content, self._get_data(entry)[0], + difflib.unified_diff, + filename=entry.get("name")) + if diff: + udiff = ''.join(diff) + if hasattr(udiff, "decode"): + udiff = udiff.decode(self.setup['encoding']) + try: + prompt.append(udiff) + except UnicodeEncodeError: + prompt.append("Could not encode diff") + else: + prompt.append("Diff took too long to compute, no " + "printable diff") + if not sensitive: + diff = self._diff(content, self._get_data(entry)[0], + difflib.ndiff, filename=entry.get("name")) + if diff: + attrs["current_bdiff"] = b64encode("\n".join(diff)) + else: + attrs['current_bfile'] = b64encode(content) + if interactive: + entry.set("qtext", "\n".join(prompt)) + if not sensitive: + for attr, val in attrs.items(): + entry.set(attr, val) + + def _diff(self, content1, content2, difffunc, filename=None): + rv = [] + start = time.time() + longtime = False + for diffline in difffunc(content1.split('\n'), + content2.split('\n')): + now = time.time() + rv.append(diffline) + if now - start > 5 and not longtime: + if filename: + self.logger.info("POSIX: Diff of %s taking a long time" % + filename) + else: + self.logger.info("POSIX: Diff taking a long time") + longtime = True + elif now - start > 30: + if filename: + self.logger.error("POSIX: Diff of %s took too long; giving " + "up" % filename) + else: + self.logger.error("POSIX: Diff took too long; giving up") + return False + return rv diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py b/src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py new file mode 100644 index 000000000..ca7a23717 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py @@ -0,0 +1,43 @@ +import os +import sys +try: + from base import POSIXTool +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool") + +class POSIXHardlink(POSIXTool): + __req__ = ['name', 'to'] + + def verify(self, entry, modlist): + rv = True + + try: + if not os.path.samefile(entry.get('name'), entry.get('to')): + msg = "Hardlink %s is incorrect" % entry.get('name') + self.logger.debug("POSIX: " + msg) + entry.set('qtext', "\n".join([entry.get('qtext', ''), msg])) + rv = False + except OSError: + self.logger.debug("POSIX: %s %s does not exist" % + (entry.tag, entry.get("name"))) + entry.set('current_exists', 'false') + return False + + return POSIXTool.verify(self, entry, modlist) and rv + + def install(self, entry): + ondisk = self._exists(entry, remove=True) + if ondisk: + self.logger.info("POSIX: Hardlink %s cleanup failed" % + entry.get('name')) + try: + os.link(entry.get('to'), entry.get('name')) + rv = True + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to create hardlink %s to %s: %s" % + (entry.get('name'), entry.get('to'), err)) + rv = False + return POSIXTool.install(self, entry) and rv + diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py new file mode 100644 index 000000000..c870ca0ed --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py @@ -0,0 +1,45 @@ +import os +import sys +import shutil +try: + from base import POSIXTool +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool") + +class POSIXNonexistent(POSIXTool): + __req__ = ['name'] + + def verify(self, entry, _): + if os.path.lexists(entry.get('name')): + self.logger.debug("POSIX: %s exists but should not" % + entry.get("name")) + return False + return True + + def install(self, entry): + ename = entry.get('name') + if entry.get('recursive', '').lower() == 'true': + # ensure that configuration spec is consistent first + for struct in self.config.getchildren(): + for entry in struct.getchildren(): + if (entry.tag == 'Path' and + entry.get('type') != 'nonexistent' and + entry.get('name').startswith(ename)): + self.logger.error('POSIX: Not removing %s. One or ' + 'more files in this directory are ' + 'specified in your configuration.' % + ename) + return False + rm = shutil.rmtree + elif os.path.isdir(ename): + rm = os.rmdir + else: + rm = os.remove + try: + rm(ename) + return True + except OSError: + err = sys.exc_info()[1] + self.logger.error('POSIX: Failed to remove %s: %s' % (ename, err)) + return False diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py b/src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py new file mode 100644 index 000000000..321376b98 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py @@ -0,0 +1,11 @@ +import os +import sys +try: + from base import POSIXTool +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool") + +class POSIXPermissions(POSIXTool): + __req__ = ['name', 'perms', 'owner', 'group'] + diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py b/src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py new file mode 100644 index 000000000..fb303bdbe --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py @@ -0,0 +1,46 @@ +import os +import sys +try: + from base import POSIXTool +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool") + +class POSIXSymlink(POSIXTool): + __req__ = ['name', 'to'] + + def verify(self, entry, modlist): + rv = True + + try: + sloc = os.readlink(entry.get('name')) + if sloc != entry.get('to'): + entry.set('current_to', sloc) + msg = ("Symlink %s points to %s, should be %s" % + (entry.get('name'), sloc, entry.get('to'))) + self.logger.debug("POSIX: " + msg) + entry.set('qtext', "\n".join([entry.get('qtext', ''), msg])) + rv = False + except OSError: + self.logger.debug("POSIX: %s %s does not exist" % + (entry.tag, entry.get("name"))) + entry.set('current_exists', 'false') + return False + + return POSIXTool.verify(self, entry, modlist) and rv + + def install(self, entry): + ondisk = self._exists(entry, remove=True) + if ondisk: + self.logger.info("POSIX: Symlink %s cleanup failed" % + entry.get('name')) + try: + os.symlink(entry.get('to'), entry.get('name')) + rv = True + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to create symlink %s to %s: %s" % + (entry.get('name'), entry.get('to'), err)) + rv = False + return POSIXTool.install(self, entry) and rv + diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py new file mode 100644 index 000000000..46631eb06 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py @@ -0,0 +1,151 @@ +"""All POSIX Type client support for Bcfg2.""" + +import os +import re +import sys +import shutil +import pkgutil +from datetime import datetime +import Bcfg2.Client.Tools +try: + from base import POSIXTool +except ImportError: + # py3k, incompatible syntax with py2.4 + exec("from .base import POSIXTool") + +class POSIX(Bcfg2.Client.Tools.Tool): + """POSIX File support code.""" + name = 'POSIX' + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + self.ppath = setup['ppath'] + self.max_copies = setup['max_copies'] + self._load_handlers() + self.logger.debug("POSIX: Handlers loaded: %s" % + (", ".join(self._handlers.keys()))) + self.__req__ = dict(Path=dict()) + for etype, hdlr in self._handlers.items(): + self.__req__['Path'][etype] = hdlr.__req__ + self.__handles__.append(('Path', etype)) + # Tool.__init__() sets up the list of handled entries, but we + # need to do it again after __handles__ has been populated. we + # can't populate __handles__ when the class is created because + # _load_handlers() _must_ be called at run-time, not at + # compile-time. + for struct in config: + self.handled = [e for e in struct if self.handlesEntry(e)] + + def _load_handlers(self): + # this must be called at run-time, not at compile-time, or we + # get wierd circular import issues. + self._handlers = dict() + if hasattr(pkgutil, 'walk_packages'): + submodules = pkgutil.walk_packages(path=__path__) + else: + # python 2.4 + import glob + submodules = [] + for path in __path__: + for submodule in glob.glob(os.path.join(path, "*.py")): + mod = os.path.splitext(os.path.basename(submodule))[0] + if mod not in ['__init__']: + submodules.append((None, mod, True)) + + for submodule in submodules: + if submodule[1] == 'base': + continue + module = getattr(__import__("%s.%s" % + (__name__, + submodule[1])).Client.Tools.POSIX, + submodule[1]) + hdlr = getattr(module, "POSIX" + submodule[1]) + if POSIXTool in hdlr.__mro__: + # figure out what entry type this handler handles + etype = hdlr.__name__[5:].lower() + self._handlers[etype] = hdlr(self.logger, + self.setup, + self.config) + + def canVerify(self, entry): + if not Bcfg2.Client.Tools.Tool.canVerify(self, entry): + return False + if not self._handlers[entry.get("type")].fully_specified(entry): + self.logger.error('POSIX: Cannot verify incomplete entry %s. ' + 'Try running bcfg2-lint.' % + entry.get('name')) + return False + return True + + def canInstall(self, entry): + """Check if entry is complete for installation.""" + if not Bcfg2.Client.Tools.Tool.canInstall(self, entry): + return False + if not self._handlers[entry.get("type")].fully_specified(entry): + self.logger.error('POSIX: Cannot install incomplete entry %s. ' + 'Try running bcfg2-lint.' % + entry.get('name')) + return False + return True + + def InstallPath(self, entry): + """Dispatch install to the proper method according to type""" + self.logger.debug("POSIX: Installing entry %s:%s:%s" % + (entry.tag, entry.get("type"), entry.get("name"))) + self._paranoid_backup(entry) + return self._handlers[entry.get("type")].install(entry) + + def VerifyPath(self, entry, modlist): + """Dispatch verify to the proper method according to type""" + self.logger.debug("POSIX: Verifying entry %s:%s:%s" % + (entry.tag, entry.get("type"), entry.get("name"))) + ret = self._handlers[entry.get("type")].verify(entry, modlist) + if self.setup['interactive'] and not ret: + entry.set('qtext', + '%s\nInstall %s %s: (y/N) ' % + (entry.get('qtext', ''), + entry.get('type'), entry.get('name'))) + return ret + + def _prune_old_backups(self, entry): + bkupnam = entry.get('name').replace('/', '_') + bkup_re = re.compile(bkupnam + \ + r'_\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}$') + # current list of backups for this file + try: + bkuplist = [f for f in os.listdir(self.ppath) if + bkup_re.match(f)] + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to create backup list in %s: %s" % + (self.ppath, err)) + return + bkuplist.sort() + while len(bkuplist) >= int(self.max_copies): + # remove the oldest backup available + oldest = bkuplist.pop(0) + self.logger.info("POSIX: Removing old backup %s" % oldest) + try: + os.remove(os.path.join(self.ppath, oldest)) + except OSError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to remove old backup %s: %s" % + (os.path.join(self.ppath, oldest), err)) + + def _paranoid_backup(self, entry): + if (entry.get("paranoid", 'false').lower() == 'true' and + self.setup.get("paranoid", False) and + entry.get('current_exists', 'true') == 'true' and + not os.path.isdir(entry.get("name"))): + self._prune_old_backups(entry) + bkupnam = "%s_%s" % (entry.get('name').replace('/', '_'), + datetime.isoformat(datetime.now())) + bfile = os.path.join(self.ppath, bkupnam) + try: + shutil.copy(entry.get('name'), bfile) + self.logger.info("POSIX: Backup of %s saved to %s" % + (entry.get('name'), bfile)) + except IOError: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to create backup file for %s: " + "%s" % (entry.get('name'), err)) diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/base.py b/src/lib/Bcfg2/Client/Tools/POSIX/base.py new file mode 100644 index 000000000..6952d0f7b --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX/base.py @@ -0,0 +1,642 @@ +import os +import sys +import pwd +import grp +import stat +import shutil +import Bcfg2.Client.Tools +import Bcfg2.Client.XML + +try: + import selinux + has_selinux = True +except ImportError: + has_selinux = False + +try: + import posix1e + has_acls = True + + # map between permissions characters and numeric ACL constants + acl_map = dict(r=posix1e.ACL_READ, + w=posix1e.ACL_WRITE, + x=posix1e.ACL_EXECUTE) +except ImportError: + has_acls = False + acl_map = dict(r=4, w=2, x=1) + +# map between dev_type attribute and stat constants +device_map = dict(block=stat.S_IFBLK, + char=stat.S_IFCHR, + fifo=stat.S_IFIFO) + + +class POSIXTool(Bcfg2.Client.Tools.Tool): + def fully_specified(self, entry): + # checking is done by __req__ + return True + + def verify(self, entry, modlist): + if not self._verify_metadata(entry): + return False + if entry.get('recursive', 'false').lower() == 'true': + # verify ownership information recursively + for root, dirs, files in os.walk(entry.get('name')): + for p in dirs + files: + if not self._verify_metadata(entry, + path=os.path.join(root, p)): + return False + return True + + def install(self, entry): + plist = [entry.get('name')] + rv = True + rv &= self._set_perms(entry) + if entry.get('recursive', 'false').lower() == 'true': + # set metadata recursively + for root, dirs, files in os.walk(entry.get('name')): + for path in dirs + files: + rv &= self._set_perms(entry, path=os.path.join(root, path)) + return rv + + def _exists(self, entry, remove=False): + try: + # check for existing paths and optionally remove them + ondisk = os.lstat(entry.get('name')) + if remove: + if os.path.isdir(entry.get('name')): + rm = shutil.rmtree + else: + rm = os.unlink + try: + rm(entry.get('name')) + return False + except OSError: + err = sys.exc_info()[1] + self.logger.warning('POSIX: Failed to unlink %s: %s' % + (entry.get('name'), err)) + return ondisk # probably still exists + else: + return ondisk + except OSError: + return False + + def _set_perms(self, entry, path=None): + if path is None: + path = entry.get("name") + + rv = True + if entry.get("owner") and entry.get("group"): + try: + self.logger.debug("POSIX: Setting ownership of %s to %s:%s" % + (path, + self._norm_entry_uid(entry), + self._norm_entry_gid(entry))) + os.chown(path, self._norm_entry_uid(entry), + self._norm_entry_gid(entry)) + except KeyError: + self.logger.error('POSIX: Failed to change ownership of %s' % + path) + rv = False + os.chown(path, 0, 0) + except OSError: + self.logger.error('POSIX: Failed to change ownership of %s' % + path) + rv = False + + if entry.get("perms"): + configPerms = int(entry.get('perms'), 8) + if entry.get('dev_type'): + configPerms |= device_map[entry.get('dev_type')] + try: + self.logger.debug("POSIX: Setting permissions on %s to %s" % + (path, oct(configPerms))) + os.chmod(path, configPerms) + except (OSError, KeyError): + self.logger.error('POSIX: Failed to change permissions on %s' % + path) + rv = False + + if entry.get('mtime'): + try: + os.utime(entry.get('name'), (int(entry.get('mtime')), + int(entry.get('mtime')))) + except OSError: + self.logger.error("POSIX: Failed to set mtime of %s" % path) + rv = False + + rv &= self._set_secontext(entry, path=path) + rv &= self._set_acls(entry, path=path) + return rv + + + def _set_acls(self, entry, path=None): + """ set POSIX ACLs on the file on disk according to the config """ + if not has_acls: + if entry.findall("ACL"): + self.logger.debug("POSIX: ACLs listed for %s but no pylibacl " + "library installed" % entry.get('name')) + return True + + if path is None: + path = entry.get("name") + + try: + acl = posix1e.ACL(file=path) + except IOError: + err = sys.exc_info()[1] + if err.errno == 95: + # fs is mounted noacl + self.logger.error("POSIX: Cannot set ACLs on filesystem " + "mounted without ACL support: %s" % path) + else: + self.logger.error("POSIX: Error getting current ACLS on %s: %s" + % (path, err)) + return False + # clear ACLs out so we start fresh -- way easier than trying + # to add/remove/modify ACLs + for aclentry in acl: + if aclentry.tag_type in [posix1e.ACL_USER, posix1e.ACL_GROUP]: + acl.delete_entry(aclentry) + if os.path.isdir(path): + defacl = posix1e.ACL(filedef=path) + if not defacl.valid(): + # when a default ACL is queried on a directory that + # has no default ACL entries at all, you get an empty + # ACL, which is not valid. in this circumstance, we + # just copy the access ACL to get a base valid ACL + # that we can add things to. + defacl = posix1e.ACL(acl=acl) + else: + for aclentry in defacl: + if aclentry.tag_type in [posix1e.ACL_USER, + posix1e.ACL_GROUP]: + defacl.delete_entry(aclentry) + else: + defacl = None + + for aclkey, perms in self._list_entry_acls(entry).items(): + atype, scope, qualifier = aclkey + if atype == "default": + if defacl is None: + self.logger.warning("POSIX: Cannot set default ACLs on " + "non-directory %s" % path) + continue + entry = posix1e.Entry(defacl) + else: + entry = posix1e.Entry(acl) + for perm in acl_map.values(): + if perm & perms: + entry.permset.add(perm) + entry.tag_type = scope + try: + if scope == posix1e.ACL_USER: + scopename = "user" + entry.qualifier = self._norm_uid(qualifier) + elif scope == posix1e.ACL_GROUP: + scopename = "group" + entry.qualifier = self._norm_gid(qualifier) + except (OSError, KeyError): + err = sys.exc_info()[1] + self.logger.error("POSIX: Could not resolve %s %s: %s" % + (scopename, qualifier, err)) + continue + acl.calc_mask() + + def _apply_acl(acl, path, atype=posix1e.ACL_TYPE_ACCESS): + if atype == posix1e.ACL_TYPE_ACCESS: + atype_str = "access" + else: + atype_str = "default" + if acl.valid(): + self.logger.debug("POSIX: Applying %s ACL to %s:" % (atype_str, + path)) + for line in str(acl).splitlines(): + self.logger.debug(" " + line) + try: + acl.applyto(path, atype) + return True + except: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to set ACLs on %s: %s" % + (path, err)) + return False + else: + self.logger.warning("POSIX: %s ACL created for %s was invalid:" + % (atype_str.title(), path)) + for line in str(acl).splitlines(): + self.logger.warning(" " + line) + return False + + rv = _apply_acl(acl, path) + if defacl: + defacl.calc_mask() + rv &= _apply_acl(defacl, path, posix1e.ACL_TYPE_DEFAULT) + return rv + + def _set_secontext(self, entry, path=None): + """ set the SELinux context of the file on disk according to the + config""" + if not has_selinux: + return True + + if path is None: + path = entry.get("name") + context = entry.get("secontext") + if context is None: + # no context listed + return True + + if context == '__default__': + try: + selinux.restorecon(path) + rv = True + except: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to restore SELinux context " + "for %s: %s" % (path, err)) + rv = False + else: + try: + rv = selinux.lsetfilecon(path, context) == 0 + except: + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to restore SELinux context " + "for %s: %s" % (path, err)) + rv = False + return rv + + def _norm_gid(self, gid): + """ This takes a group name or gid and returns the + corresponding gid. """ + try: + return int(gid) + except ValueError: + return int(grp.getgrnam(gid)[2]) + + def _norm_entry_gid(self, entry): + try: + return self._norm_gid(entry.get('group')) + except (OSError, KeyError): + err = sys.exc_info()[1] + self.logger.error('POSIX: GID normalization failed for %s on %s: %s' + % (entry.get('group'), entry.get('name'), err)) + return 0 + + def _norm_uid(self, uid): + """ This takes a username or uid and returns the + corresponding uid. """ + try: + return int(uid) + except ValueError: + return int(pwd.getpwnam(uid)[2]) + + def _norm_entry_uid(self, entry): + try: + return self._norm_uid(entry.get("owner")) + except (OSError, KeyError): + err = sys.exc_info()[1] + self.logger.error('POSIX: UID normalization failed for %s on %s: %s' + % (entry.get('owner'), entry.get('name'), err)) + return 0 + + def _norm_acl_perms(self, perms): + """ takes a representation of an ACL permset and returns a digit + representing the permissions entailed by it. representations can + either be a single octal digit, a string of up to three 'r', + 'w', 'x', or '-' characters, or a posix1e.Permset object""" + if hasattr(perms, 'test'): + # Permset object + return sum([p for p in acl_map.values() + if perms.test(p)]) + + try: + # single octal digit + rv = int(perms) + if rv > 0 and rv < 8: + return rv + else: + self.logger.error("POSIX: Permissions digit out of range in " + "ACL: %s" % perms) + return 0 + except ValueError: + # couldn't be converted to an int; process as a string + if len(perms) > 3: + self.logger.error("POSIX: Permissions string too long in ACL: " + "%s" % perms) + return 0 + rv = 0 + for char in perms: + if char == '-': + continue + elif char not in acl_map: + self.logger.warning("POSIX: Unknown permissions character " + "in ACL: %s" % char) + elif rv & acl_map[char]: + self.logger.warning("POSIX: Duplicate permissions " + "character in ACL: %s" % perms) + else: + rv |= acl_map[char] + return rv + + def _acl2string(self, aclkey, perms): + atype, scope, qualifier = aclkey + acl_str = [] + if atype == 'default': + acl_str.append(atype) + if scope == posix1e.ACL_USER: + acl_str.append("user") + elif scope == posix1e.ACL_GROUP: + acl_str.append("group") + acl_str.append(qualifier) + acl_str.append(self._acl_perm2string(perms)) + return ":".join(acl_str) + + def _acl_perm2string(self, perm): + rv = [] + for char in 'rwx': + if acl_map[char] & perm: + rv.append(char) + else: + rv.append('-') + return ''.join(rv) + + def _gather_data(self, path): + try: + ondisk = os.stat(path) + except OSError: + self.logger.debug("POSIX: %s does not exist" % path) + return (False, None, None, None, None, None) + + try: + owner = str(ondisk[stat.ST_UID]) + except OSError: + err = sys.exc_info()[1] + self.logger.debug("POSIX: Could not get current owner of %s: %s" % + (path, err)) + owner = None + except KeyError: + self.logger.error('POSIX: User resolution failed for %s' % path) + owner = None + + try: + group = str(ondisk[stat.ST_GID]) + except (OSError, KeyError): + err = sys.exc_info()[1] + self.logger.debug("POSIX: Could not get current group of %s: %s" % + (path, err)) + group = None + except KeyError: + self.logger.error('POSIX: Group resolution failed for %s' % path) + group = None + + try: + perms = oct(ondisk[stat.ST_MODE])[-4:] + except (OSError, KeyError, TypeError): + err = sys.exc_info()[1] + self.logger.debug("POSIX: Could not get current permissions of %s: " + "%s" % (path, err)) + perms = None + + if has_selinux: + try: + secontext = selinux.getfilecon(path)[1].split(":")[2] + except (OSError, KeyError): + err = sys.exc_info()[1] + self.logger.debug("POSIX: Could not get current SELinux " + "context of %s: %s" % (path, err)) + secontext = None + else: + secontext = None + + if has_acls: + acls = self._list_file_acls(path) + else: + acls = None + return (ondisk, owner, group, perms, secontext, acls) + + def _verify_metadata(self, entry, path=None): + """ generic method to verify perms, owner, group, secontext, acls, + and mtime """ + # allow setting an alternate path for recursive permissions checking + if path is None: + path = entry.get('name') + attrib = dict() + ondisk, attrib['current_owner'], attrib['current_group'], \ + attrib['current_perms'], attrib['current_secontext'], acls = \ + self._gather_data(path) + + if not ondisk: + entry.set('current_exists', 'false') + return False + + # we conditionally verify every bit of metadata only if it's + # specified on the entry. consequently, canVerify() and + # fully_specified() are preconditions of _verify_metadata(), + # since they will ensure that everything that needs to be + # specified actually is. this lets us gracefully handle + # symlink and hardlink entries, which have SELinux contexts + # but not other permissions, optional secontext and mtime + # attrs, and so on. + configOwner, configGroup, configPerms, mtime = None, None, None, -1 + if entry.get('mtime', '-1') != '-1': + mtime = str(ondisk[stat.ST_MTIME]) + if entry.get("owner"): + configOwner = str(self._norm_entry_uid(entry)) + if entry.get("group"): + configGroup = str(self._norm_entry_gid(entry)) + if entry.get("perms"): + while len(entry.get('perms', '')) < 4: + entry.set('perms', '0' + entry.get('perms', '')) + configPerms = int(entry.get('perms'), 8) + + errors = [] + if configOwner and attrib['current_owner'] != configOwner: + errors.append("Owner for path %s is incorrect. " + "Current owner is %s but should be %s" % + (path, attrib['current_owner'], entry.get('owner'))) + + if configGroup and attrib['current_group'] != configGroup: + errors.append("Group for path %s is incorrect. " + "Current group is %s but should be %s" % + (path, attrib['current_group'], entry.get('group'))) + + if (configPerms and + oct(int(attrib['current_perms'], 8)) != oct(configPerms)): + errors.append("Permissions for path %s are incorrect. " + "Current permissions are %s but should be %s" % + (path, attrib['current_perms'], entry.get('perms'))) + + if entry.get('mtime'): + attrib['current_mtime'] = mtime + if mtime != entry.get('mtime', '-1'): + errors.append("mtime for path %s is incorrect. " + "Current mtime is %s but should be %s" % + (path, mtime, entry.get('mtime'))) + + if has_selinux and entry.get("secontext"): + if entry.get("secontext") == "__default__": + configContext = selinux.matchpathcon(path, 0)[1].split(":")[2] + else: + configContext = entry.get("secontext") + if attrib['current_secontext'] != configContext: + errors.append("SELinux context for path %s is incorrect. " + "Current context is %s but should be %s" % + (path, attrib['current_secontext'], + configContext)) + + if errors: + for error in errors: + self.logger.debug("POSIX: " + error) + entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors)) + if path == entry.get("name"): + for attr, val in attrib.items(): + if val is not None: + entry.set(attr, str(val)) + + aclVerifies = self._verify_acls(entry, path=path) + return aclVerifies and len(errors) == 0 + + def _list_entry_acls(self, entry): + wanted = dict() + for acl in entry.findall("ACL"): + if acl.get("scope") == "user": + scope = posix1e.ACL_USER + elif acl.get("scope") == "group": + scope = posix1e.ACL_GROUP + else: + self.logger.error("POSIX: Unknown ACL scope %s" % + acl.get("scope")) + continue + wanted[(acl.get("type"), scope, acl.get(acl.get("scope")))] = \ + self._norm_acl_perms(acl.get('perms')) + return wanted + + def _list_file_acls(self, path): + def _process_acl(acl, atype): + try: + if acl.tag_type == posix1e.ACL_USER: + qual = pwd.getpwuid(acl.qualifier)[0] + elif acl.tag_type == posix1e.ACL_GROUP: + qual = grp.getgrgid(acl.qualifier)[0] + else: + return + except (OSError, KeyError): + err = sys.exc_info()[1] + self.logger.error("POSIX: Lookup of %s %s failed: %s" % + (scope, acl.qualifier, err)) + qual = acl.qualifier + existing[(atype, acl.tag_type, qual)] = \ + self._norm_acl_perms(acl.permset) + + existing = dict() + try: + for acl in posix1e.ACL(file=path): + _process_acl(acl, "access") + except IOError: + err = sys.exc_info()[1] + if err.errno == 95: + # fs is mounted noacl + self.logger.debug("POSIX: Filesystem mounted without ACL " + "support: %s" % path) + else: + self.logger.error("POSIX: Error getting current ACLS on %s: %s" + % (path, err)) + return existing + + if os.path.isdir(path): + for acl in posix1e.ACL(filedef=path): + _process_acl(acl, "default") + return existing + + def _verify_acls(self, entry, path=None): + if not has_acls: + if entry.findall("ACL"): + self.logger.debug("POSIX: ACLs listed for %s but no pylibacl " + "library installed" % entry.get('name')) + return True + + if path is None: + path = entry.get("name") + + # create lists of normalized representations of the ACLs we want + # and the ACLs we have. this will make them easier to compare + # than trying to mine that data out of the ACL objects and XML + # objects and compare it at the same time. + wanted = self._list_entry_acls(entry) + existing = self._list_file_acls(path) + + missing = [] + extra = [] + wrong = [] + for aclkey, perms in wanted.items(): + if aclkey not in existing: + missing.append(self._acl2string(aclkey, perms)) + elif existing[aclkey] != perms: + wrong.append((self._acl2string(aclkey, perms), + self._acl2string(aclkey, existing[aclkey]))) + if path == entry.get("name"): + atype, scope, qual = aclkey + aclentry = Bcfg2.Client.XML.Element("ACL", type=atype, + perms=str(perms)) + if scope == posix1e.ACL_USER: + aclentry.set("scope", "user") + elif scope == posix1e.ACL_GROUP: + aclentry.set("scope", "group") + else: + self.logger.debug("POSIX: Unknown ACL scope %s on %s" % + (scope, path)) + continue + aclentry.set(aclentry.get("scope"), qual) + entry.append(aclentry) + + for aclkey, perms in existing.items(): + if aclkey not in wanted: + extra.append(self._acl2string(aclkey, perms)) + + msg = [] + if missing: + msg.append("%s ACLs are missing: %s" % (len(missing), + ", ".join(missing))) + if wrong: + msg.append("%s ACLs are wrong: %s" % + (len(wrong), + "; ".join(["%s should be %s" % (e, w) + for w, e in wrong]))) + if extra: + msg.append("%s extra ACLs: %s" % (len(extra), ", ".join(extra))) + + if msg: + msg.insert(0, "POSIX: ACLs for %s are incorrect." % path) + self.logger.debug(msg[0]) + for line in msg[1:]: + self.logger.debug(" " + line) + entry.set('qtext', "\n".join([entry.get("qtext", '')] + msg)) + return False + return True + + def _makedirs(self, entry, path=None): + """ os.makedirs helpfully creates all parent directories for + us, but it sets permissions according to umask, which is + probably wrong. we need to find out which directories were + created and set permissions on those + (http://trac.mcs.anl.gov/projects/bcfg2/ticket/1125) """ + created = [] + if path is None: + path = entry.get("name") + cur = path + while cur != '/': + if not os.path.exists(cur): + created.append(cur) + cur = os.path.dirname(cur) + rv = True + try: + os.makedirs(path) + except OSError: + err = sys.exc_info()[1] + self.logger.error('POSIX: Failed to create directory %s: %s' % + (path, err)) + rv = False + for cpath in created: + rv &= self._set_perms(entry, path=cpath) + return rv diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py index 4516f419d..36d48b8d3 100644 --- a/src/lib/Bcfg2/Client/Tools/Portage.py +++ b/src/lib/Bcfg2/Client/Tools/Portage.py @@ -2,8 +2,6 @@ import re import Bcfg2.Client.Tools -from Bcfg2.Bcfg2Py3k import ConfigParser - class Portage(Bcfg2.Client.Tools.PkgTool): """The Gentoo toolset implements package and service operations and @@ -27,30 +25,11 @@ class Portage(Bcfg2.Client.Tools.PkgTool): self._ebuild_pattern = re.compile('(ebuild|binary)') self.cfg = cfg self.installed = {} - self._binpkgonly = True - - # Used to get options from configuration file - parser = ConfigParser.ConfigParser() - parser.read(self.setup.get('setup')) - for opt in ['binpkgonly']: - if parser.has_option(self.name, opt): - setattr(self, ('_%s' % opt), - self._StrToBoolIfBool(parser.get(self.name, opt))) - + self._binpkgonly = self.setup.get('portage_binpkgonly', False) if self._binpkgonly: self.pkgtool = self._binpkgtool self.RefreshPackages() - def _StrToBoolIfBool(self, s): - """Returns a boolean if the string specifies a boolean value. - Returns a string otherwise""" - if s.lower() in ('true', 'yes', 't', 'y', '1'): - return True - elif s.lower() in ('false', 'no', 'f', 'n', '0'): - return False - else: - return s - def RefreshPackages(self): """Refresh memory hashes of packages.""" if not self._initialised: @@ -83,8 +62,8 @@ class Portage(Bcfg2.Client.Tools.PkgTool): entry.set('current_version', version) if not self.setup['quick']: - if ('verify' not in entry.attrib) or \ - self._StrToBoolIfBool(entry.get('verify')): + if ('verify' not in entry.attrib or + entry.get('verify').lower == 'true'): # Check the package if: # - Not running in quick mode diff --git a/src/lib/Bcfg2/Client/Tools/RPMng.py b/src/lib/Bcfg2/Client/Tools/RPMng.py index 00dd00d71..91e2180ae 100644 --- a/src/lib/Bcfg2/Client/Tools/RPMng.py +++ b/src/lib/Bcfg2/Client/Tools/RPMng.py @@ -4,8 +4,6 @@ import os.path import rpm import rpmtools import Bcfg2.Client.Tools -# Compatibility import -from Bcfg2.Bcfg2Py3k import ConfigParser class RPMng(Bcfg2.Client.Tools.PkgTool): """Support for RPM packages.""" @@ -44,82 +42,42 @@ class RPMng(Bcfg2.Client.Tools.PkgTool): self.modlists = {} self.gpg_keyids = self.getinstalledgpg() - # Process thee RPMng section from the config file. - RPMng_CP = ConfigParser.ConfigParser() - RPMng_CP.read(self.setup.get('setup')) - - # installonlypackages - self.installOnlyPkgs = [] - if RPMng_CP.has_option(self.name, 'installonlypackages'): - for i in RPMng_CP.get(self.name, 'installonlypackages').split(','): - self.installOnlyPkgs.append(i.strip()) - if self.installOnlyPkgs == []: - self.installOnlyPkgs = ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', - 'kernel-modules', 'kernel-debug', 'kernel-unsupported', - 'kernel-source', 'kernel-devel', 'kernel-default', - 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', - 'gpg-pubkey'] + opt_prefix = self.name.lower() + self.installOnlyPkgs = self.setup["%s_installonly" % opt_prefix] if 'gpg-pubkey' not in self.installOnlyPkgs: self.installOnlyPkgs.append('gpg-pubkey') - self.logger.debug('installOnlyPackages = %s' % self.installOnlyPkgs) - - # erase_flags - self.erase_flags = [] - if RPMng_CP.has_option(self.name, 'erase_flags'): - for i in RPMng_CP.get(self.name, 'erase_flags').split(','): - self.erase_flags.append(i.strip()) - if self.erase_flags == []: - self.erase_flags = ['allmatches'] - self.logger.debug('erase_flags = %s' % self.erase_flags) - - # pkg_checks - if RPMng_CP.has_option(self.name, 'pkg_checks'): - self.pkg_checks = RPMng_CP.get(self.name, 'pkg_checks').lower() - else: - self.pkg_checks = 'true' - self.logger.debug('pkg_checks = %s' % self.pkg_checks) - - # pkg_verify - if RPMng_CP.has_option(self.name, 'pkg_verify'): - self.pkg_verify = RPMng_CP.get(self.name, 'pkg_verify').lower() - else: - self.pkg_verify = 'true' - self.logger.debug('pkg_verify = %s' % self.pkg_verify) - - # installed_action - if RPMng_CP.has_option(self.name, 'installed_action'): - self.installed_action = RPMng_CP.get(self.name, 'installed_action').lower() - else: - self.installed_action = 'install' - self.logger.debug('installed_action = %s' % self.installed_action) - - # version_fail_action - if RPMng_CP.has_option(self.name, 'version_fail_action'): - self.version_fail_action = RPMng_CP.get(self.name, 'version_fail_action').lower() - else: - self.version_fail_action = 'upgrade' - self.logger.debug('version_fail_action = %s' % self.version_fail_action) - - # verify_fail_action - if self.name == "RPMng": - if RPMng_CP.has_option(self.name, 'verify_fail_action'): - self.verify_fail_action = RPMng_CP.get(self.name, 'verify_fail_action').lower() - else: - self.verify_fail_action = 'reinstall' - else: # yum can't reinstall packages. - self.verify_fail_action = 'none' - self.logger.debug('verify_fail_action = %s' % self.verify_fail_action) - - # version_fail_action - if RPMng_CP.has_option(self.name, 'verify_flags'): - self.verify_flags = RPMng_CP.get(self.name, 'verify_flags').lower().split(',') - else: - self.verify_flags = [] + self.erase_flags = self.setup['%s_erase_flags' % opt_prefix] + self.pkg_checks = self.setup['%s_pkg_checks' % opt_prefix] + self.pkg_verify = self.setup['%s_pkg_verify' % opt_prefix] + self.installed_action = self.setup['%s_installed_action' % opt_prefix] + self.version_fail_action = self.setup['%s_version_fail_action' % + opt_prefix] + self.verify_fail_action = self.setup['%s_verify_fail_action' % + opt_prefix] + self.verify_flags = self.setup['%s_verify_flags' % opt_prefix] if '' in self.verify_flags: self.verify_flags.remove('') - self.logger.debug('version_fail_action = %s' % self.version_fail_action) + + self.logger.debug('%s: installOnlyPackages = %s' % + (self.name, self.installOnlyPkgs)) + self.logger.debug('%s: erase_flags = %s' % + (self.name, self.erase_flags)) + self.logger.debug('%s: pkg_checks = %s' % + (self.name, self.pkg_checks)) + self.logger.debug('%s: pkg_verify = %s' % + (self.name, self.pkg_verify)) + self.logger.debug('%s: installed_action = %s' % + (self.name, self.installed_action)) + self.logger.debug('%s: version_fail_action = %s' % + (self.name, self.version_fail_action)) + self.logger.debug('%s: verify_fail_action = %s' % + (self.name, self.verify_fail_action)) + self.logger.debug('%s: verify_flags = %s' % + (self.name, self.verify_flags)) + # Force a re- prelink of all packages if prelink exists. - # Many, if not most package verifies can be caused by out of date prelinking. + # Many, if not most package verifies can be caused by out of + # date prelinking. if os.path.isfile('/usr/sbin/prelink') and not self.setup['dryrun']: cmdrc, output = self.cmd.run('/usr/sbin/prelink -a -mR') if cmdrc == 0: @@ -193,7 +151,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool): instance = Bcfg2.Client.XML.SubElement(entry, 'Package') for attrib in list(entry.attrib.keys()): instance.attrib[attrib] = entry.attrib[attrib] - if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true': + if (self.pkg_checks and + entry.get('pkg_checks', 'true').lower() == 'true'): if 'any' in [entry.get('version'), pinned_version]: version, release = 'any', 'any' elif entry.get('version') == 'auto': @@ -215,7 +174,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool): if entry.get('name') in self.installed: # There is at least one instance installed. - if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true': + if (self.pkg_checks and + entry.get('pkg_checks', 'true').lower() == 'true'): rpmTs = rpm.TransactionSet() rpmHeader = None for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')): @@ -243,8 +203,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool): self.logger.debug(" %s" % self.str_evra(inst)) self.instance_status[inst]['installed'] = True - if self.pkg_verify == 'true' and \ - inst.get('pkg_verify', 'true') == 'true': + if (self.pkg_verify and + inst.get('pkg_verify', 'true').lower() == 'true'): flags = inst.get('verify_flags', '').split(',') + self.verify_flags if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \ entry.get('name') != 'gpg-pubkey': @@ -302,8 +262,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool): self.logger.debug(" %s" % self.str_evra(inst)) self.instance_status[inst]['installed'] = True - if self.pkg_verify == 'true' and \ - inst.get('pkg_verify', 'true') == 'true': + if (self.pkg_verify and + inst.get('pkg_verify', 'true').lower() == 'true'): flags = inst.get('verify_flags', '').split(',') + self.verify_flags if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \ 'nosignature' not in flags: @@ -520,7 +480,7 @@ class RPMng(Bcfg2.Client.Tools.PkgTool): self.extra = self.FindExtraPackages() def FixInstance(self, instance, inst_status): - """" + """ Control if a reinstall of a package happens or not based on the results from RPMng.VerifyPackage(). @@ -824,8 +784,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool): return False # We don't want to do any checks so we don't care what the entry has in it. - if self.pkg_checks == 'false' or \ - entry.get('pkg_checks', 'true').lower() == 'false': + if (not self.pkg_checks or + entry.get('pkg_checks', 'true').lower() == 'false'): return True instances = entry.findall('Instance') diff --git a/src/lib/Bcfg2/Client/Tools/RcUpdate.py b/src/lib/Bcfg2/Client/Tools/RcUpdate.py index 1b9a29478..ddf9c1f2d 100644 --- a/src/lib/Bcfg2/Client/Tools/RcUpdate.py +++ b/src/lib/Bcfg2/Client/Tools/RcUpdate.py @@ -23,22 +23,18 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool): rc = self.cmd.run(cmd % entry.get('name'))[0] is_enabled = (rc == 0) - if entry.get('mode', 'default') == 'supervised': - # check if init script exists - try: - os.stat('/etc/init.d/%s' % entry.get('name')) - except OSError: - self.logger.debug('Init script for service %s does not exist' % - entry.get('name')) - return False + # check if init script exists + try: + os.stat('/etc/init.d/%s' % entry.get('name')) + except OSError: + self.logger.debug('Init script for service %s does not exist' % + entry.get('name')) + return False - # check if service is enabled - cmd = '/etc/init.d/%s status | grep started' - rc = self.cmd.run(cmd % entry.attrib['name'])[0] - is_running = (rc == 0) - else: - # we don't care - is_running = is_enabled + # check if service is enabled + cmd = '/etc/init.d/%s status | grep started' + rc = self.cmd.run(cmd % entry.attrib['name'])[0] + is_running = (rc == 0) if entry.get('status') == 'on' and not (is_enabled and is_running): entry.set('current_status', 'off') @@ -53,19 +49,11 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool): def InstallService(self, entry): """ Install Service entry - In supervised mode we also take care it's (not) running. """ - # don't take any actions for mode='manual' - if entry.get('mode', 'default') == 'manual': - self.logger.info("Service %s mode set to manual. Skipping " - "installation." % (entry.get('name'))) - return False self.logger.info('Installing Service %s' % entry.get('name')) if entry.get('status') == 'on': - # make sure it's running if in supervised mode - if entry.get('mode', 'default') == 'supervised' \ - and entry.get('current_status') == 'off': + if entry.get('current_status') == 'off': self.start_service(entry) # make sure it's enabled cmd = '/sbin/rc-update add %s default' @@ -73,9 +61,7 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool): return (rc == 0) elif entry.get('status') == 'off': - # make sure it's not running if in supervised mode - if entry.get('mode', 'default') == 'supervised' \ - and entry.get('current_status') == 'on': + if entry.get('current_status') == 'on': self.stop_service(entry) # make sure it's disabled cmd = '/sbin/rc-update del %s default' diff --git a/src/lib/Bcfg2/Client/Tools/SELinux.py b/src/lib/Bcfg2/Client/Tools/SELinux.py new file mode 100644 index 000000000..1c0db904b --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/SELinux.py @@ -0,0 +1,716 @@ +import os +import re +import sys +import copy +import glob +import struct +import socket +import selinux +import seobject +import Bcfg2.Client.XML +import Bcfg2.Client.Tools +import Bcfg2.Client.Tools.POSIX + +def pack128(int_val): + """ pack a 128-bit integer in big-endian format """ + max_int = 2 ** (128) - 1 + max_word_size = 2 ** 32 - 1 + + if int_val <= max_word_size: + return struct.pack('>L', int_val) + + words = [] + for i in range(4): + word = int_val & max_word_size + words.append(int(word)) + int_val >>= 32 + words.reverse() + return struct.pack('>4I', *words) + +def netmask_itoa(netmask, proto="ipv4"): + """ convert an integer netmask (e.g., /16) to dotted-quad + notation (255.255.0.0) or IPv6 prefix notation (ffff::) """ + if proto == "ipv4": + size = 32 + family = socket.AF_INET + else: # ipv6 + size = 128 + family = socket.AF_INET6 + try: + int(netmask) + except ValueError: + return netmask + + if netmask > size: + raise ValueError("Netmask too large: %s" % netmask) + + res = 0L + for n in range(netmask): + res |= 1 << (size - n - 1) + netmask = socket.inet_ntop(family, pack128(res)) + return netmask + + +class SELinux(Bcfg2.Client.Tools.Tool): + """ SELinux boolean and module support """ + name = 'SELinux' + __handles__ = [('SELinux', 'boolean'), + ('SELinux', 'port'), + ('SELinux', 'fcontext'), + ('SELinux', 'node'), + ('SELinux', 'login'), + ('SELinux', 'user'), + ('SELinux', 'interface'), + ('SELinux', 'permissive'), + ('SELinux', 'module')] + __req__ = dict(SELinux=dict(boolean=['name', 'value'], + module=['name'], + port=['name', 'selinuxtype'], + fcontext=['name', 'selinuxtype'], + node=['name', 'selinuxtype', 'proto'], + login=['name', 'selinuxuser'], + user=['name', 'roles', 'prefix'], + interface=['name', 'selinuxtype'], + permissive=['name'])) + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + self.handlers = {} + for handles in self.__handles__: + etype = handles[1] + self.handlers[etype] = \ + globals()["SELinux%sHandler" % etype.title()](self, logger, + setup, config) + + def BundleUpdated(self, _, states): + for handler in self.handlers.values(): + handler.BundleUpdated(states) + + def FindExtra(self): + extra = [] + for handler in self.handlers.values(): + extra.extend(handler.FindExtra()) + return extra + + def canInstall(self, entry): + return (Bcfg2.Client.Tools.Tool.canInstall(self, entry) and + self.handlers[entry.get('type')].canInstall(entry)) + + def primarykey(self, entry): + """ return a string that should be unique amongst all entries + in the specification """ + return self.handlers[entry.get('type')].primarykey(entry) + + def Install(self, entries, states): + # start a transaction + sr = seobject.semanageRecords("") + if hasattr(sr, "start"): + self.logger.debug("Starting SELinux transaction") + sr.start() + else: + self.logger.debug("SELinux transactions not supported; this may " + "slow things down considerably") + Bcfg2.Client.Tools.Tool.Install(self, entries, states) + if hasattr(sr, "finish"): + self.logger.debug("Committing SELinux transaction") + sr.finish() + + def InstallSELinux(self, entry): + """Dispatch install to the proper method according to type""" + return self.handlers[entry.get('type')].Install(entry) + + def VerifySELinux(self, entry, _): + """Dispatch verify to the proper method according to type""" + rv = self.handlers[entry.get('type')].Verify(entry) + if entry.get('qtext') and self.setup['interactive']: + entry.set('qtext', + '%s\nInstall SELinux %s %s: (y/N) ' % + (entry.get('qtext'), + entry.get('type'), + self.handlers[entry.get('type')].tostring(entry))) + return rv + + def Remove(self, entries): + """Dispatch verify to the proper removal method according to type""" + # sort by type + types = list() + for entry in entries: + if entry.get('type') not in types: + types.append(entry.get('type')) + + for etype in types: + self.handlers[entry.get('type')].Remove([e for e in entries + if e.get('type') == etype]) + + +class SELinuxEntryHandler(object): + etype = None + key_format = ("name",) + value_format = () + str_format = '%(name)s' + custom_re = re.compile(' (?P<name>\S+)$') + custom_format = None + + def __init__(self, tool, logger, setup, config): + self.tool = tool + self.logger = logger + self._records = None + self._all = None + if not self.custom_format: + self.custom_format = self.key_format + + @property + def records(self): + if self._records is None: + self._records = getattr(seobject, "%sRecords" % self.etype)("") + return self._records + + @property + def all_records(self): + if self._all is None: + self._all = self.records.get_all() + return self._all + + @property + def custom_records(self): + if hasattr(self.records, "customized") and self.custom_re: + return dict([(k, self.all_records[k]) for k in self.custom_keys]) + else: + # ValueError is really a pretty dumb exception to raise, + # but that's what the seobject customized() method raises + # if it's defined but not implemented. yeah, i know, wtf. + raise ValueError("custom_records") + + @property + def custom_keys(self): + keys = [] + for cmd in self.records.customized(): + match = self.custom_re.search(cmd) + if match: + if (len(self.custom_format) == 1 and + self.custom_format[0] == "name"): + keys.append(match.group("name")) + else: + keys.append(tuple([match.group(k) + for k in self.custom_format])) + return keys + + def tostring(self, entry): + return self.str_format % entry.attrib + + def keytostring(self, key): + return self.str_format % self._key2attrs(key) + + def _key(self, entry): + if len(self.key_format) == 1 and self.key_format[0] == "name": + return entry.get("name") + else: + rv = [] + for key in self.key_format: + rv.append(entry.get(key)) + return tuple(rv) + + def _key2attrs(self, key): + if isinstance(key, tuple): + rv = dict((self.key_format[i], key[i]) + for i in range(len(self.key_format)) + if self.key_format[i]) + else: + rv = dict(name=key) + if self.value_format: + vals = self.all_records[key] + rv.update(dict((self.value_format[i], vals[i]) + for i in range(len(self.value_format)) + if self.value_format[i])) + return rv + + def key2entry(self, key): + attrs = self._key2attrs(key) + attrs["type"] = self.etype + return Bcfg2.Client.XML.Element("SELinux", **attrs) + + def _args(self, entry, method): + if hasattr(self, "_%sargs" % method): + return getattr(self, "_%sargs" % method)(entry) + elif hasattr(self, "_defaultargs"): + # default args + return self._defaultargs(entry) + else: + raise NotImplementedError + + def _deleteargs(self, entry): + return (self._key(entry)) + + def canInstall(self, entry): + return bool(self._key(entry)) + + def primarykey(self, entry): + return ":".join([entry.tag, entry.get("type"), entry.get("name")]) + + def exists(self, entry): + if self._key(entry) not in self.all_records: + self.logger.debug("SELinux %s %s does not exist" % + (self.etype, self.tostring(entry))) + return False + return True + + def Verify(self, entry): + if not self.exists(entry): + entry.set('current_exists', 'false') + return False + + errors = [] + current_attrs = self._key2attrs(self._key(entry)) + desired_attrs = entry.attrib + for attr in self.value_format: + if not attr: + continue + if current_attrs[attr] != desired_attrs[attr]: + entry.set('current_%s' % attr, current_attrs[attr]) + errors.append("SELinux %s %s has wrong %s: %s, should be %s" % + (self.etype, self.tostring(entry), attr, + current_attrs[attr], desired_attrs[attr])) + + if errors: + for error in errors: + self.logger.debug(error) + entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors)) + return False + else: + return True + + def Install(self, entry, method=None): + if not method: + if self.exists(entry): + method = "modify" + else: + method = "add" + self.logger.debug("%s SELinux %s %s" % + (method.title(), self.etype, self.tostring(entry))) + + try: + getattr(self.records, method)(*self._args(entry, method)) + self._all = None + return True + except ValueError: + err = sys.exc_info()[1] + self.logger.debug("Failed to %s SELinux %s %s: %s" % + (method, self.etype, self.tostring(entry), err)) + return False + + def Remove(self, entries): + for entry in entries: + try: + self.records.delete(*self._args(entry, "delete")) + self._all = None + except ValueError: + err = sys.exc_info()[1] + self.logger.info("Failed to remove SELinux %s %s: %s" % + (self.etype, self.tostring(entry), err)) + + def FindExtra(self): + specified = [self._key(e) + for e in self.tool.getSupportedEntries() + if e.get("type") == self.etype] + try: + records = self.custom_records + except ValueError: + records = self.all_records + return [self.key2entry(key) + for key in records.keys() + if key not in specified] + + def BundleUpdated(self, states): + pass + + +class SELinuxBooleanHandler(SELinuxEntryHandler): + etype = "boolean" + value_format = ("value",) + + @property + def all_records(self): + # older versions of selinux return a single 0/1 value for each + # bool, while newer versions return a list of three 0/1 values + # representing various states. we don't care about the latter + # two values, but it's easier to coerce the older format into + # the newer format as far as interoperation with the rest of + # SELinuxEntryHandler goes + rv = SELinuxEntryHandler.all_records.fget(self) + if rv.values()[0] in [0, 1]: + for key, val in rv.items(): + rv[key] = [val, val, val] + return rv + + def _key2attrs(self, key): + rv = SELinuxEntryHandler._key2attrs(self, key) + status = self.all_records[key][0] + if status: + rv['value'] = "on" + else: + rv['value'] = "off" + return rv + + def _defaultargs(self, entry): + # the only values recognized by both new and old versions of + # selinux are the strings "0" and "1". old selinux accepts + # ints or bools as well, new selinux accepts "on"/"off" + if entry.get("value").lower() == "on": + value = "1" + else: + value = "0" + return (entry.get("name"), value) + + def canInstall(self, entry): + if entry.get("value").lower() not in ["on", "off"]: + self.logger.debug("SELinux %s %s has a bad value: %s" % + (self.etype, self.tostring(entry), + entry.get("value"))) + return False + return (self.exists(entry) and + SELinuxEntryHandler.canInstall(self, entry)) + + +class SELinuxPortHandler(SELinuxEntryHandler): + etype = "port" + value_format = ('selinuxtype', None) + custom_re = re.compile(r'-p (?P<proto>tcp|udp).*? (?P<start>\d+)(?:-(?P<end>\d+))?$') + + @property + def custom_keys(self): + keys = [] + for cmd in self.records.customized(): + match = self.custom_re.search(cmd) + if match: + if match.group('end'): + keys.append((int(match.group('start')), + int(match.group('end')), + match.group('proto'))) + else: + keys.append((int(match.group('start')), + int(match.group('start')), + match.group('proto'))) + return keys + + @property + def all_records(self): + if self._all is None: + # older versions of selinux use (startport, endport) as + # they key for the ports.get_all() dict, and (type, proto, + # level) as the value; this is obviously broken, so newer + # versions use (startport, endport, proto) as the key, and + # (type, level) as the value. abstracting around this + # sucks. + ports = self.records.get_all() + if len(ports.keys()[0]) == 3: + self._all = ports + else: + # uglist list comprehension ever? + self._all = dict([((k[0], k[1], v[1]), (v[0], v[2])) + for k, v in ports.items()]) + return self._all + + def _key(self, entry): + try: + (port, proto) = entry.get("name").split("/") + except ValueError: + self.logger.error("Invalid SELinux node %s: no protocol specified" % + entry.get("name")) + return + if "-" in port: + start, end = port.split("-") + else: + start = port + end = port + return (int(start), int(end), proto) + + def _key2attrs(self, key): + if key[0] == key[1]: + port = str(key[0]) + else: + port = "%s-%s" % (key[0], key[1]) + vals = self.all_records[key] + return dict(name="%s/%s" % (port, key[2]), selinuxtype=vals[0]) + + def _defaultargs(self, entry): + (port, proto) = entry.get("name").split("/") + return (port, proto, '', entry.get("selinuxtype")) + + def _deleteargs(self, entry): + return tuple(entry.get("name").split("/")) + + +class SELinuxFcontextHandler(SELinuxEntryHandler): + etype = "fcontext" + key_format = ("name", "filetype") + value_format = (None, None, "selinuxtype", None) + filetypeargs = dict(all="", + regular="--", + directory="-d", + symlink="-l", + pipe="-p", + socket="-s", + block="-b", + char="-c", + door="-D") + filetypenames = dict(all="all files", + regular="regular file", + directory="directory", + symlink="symbolic link", + pipe="named pipe", + socket="socket", + block="block device", + char="character device", + door="door") + filetypeattrs = dict([v, k] for k, v in filetypenames.iteritems()) + custom_re = re.compile(r'-f \'(?P<filetype>[a-z ]+)\'.*? \'(?P<name>.*)\'') + + @property + def all_records(self): + if self._all is None: + # on older selinux, fcontextRecords.get_all() returns a + # list of tuples of (filespec, filetype, seuser, serole, + # setype, level); on newer selinux, get_all() returns a + # dict of (filespec, filetype) => (seuser, serole, setype, + # level). + fcontexts = self.records.get_all() + if isinstance(fcontexts, dict): + self._all = fcontexts + else: + self._all = dict([(f[0:2], f[2:]) for f in fcontexts]) + return self._all + + def _key(self, entry): + ftype = entry.get("filetype", "all") + return (entry.get("name"), + self.filetypenames.get(ftype, ftype)) + + def _key2attrs(self, key): + rv = dict(name=key[0], filetype=self.filetypeattrs[key[1]]) + vals = self.all_records[key] + # in older versions of selinux, an fcontext with no selinux + # type is the single value None; in newer versions, it's a + # tuple whose 0th (and only) value is None. + if vals and vals[0]: + rv["selinuxtype"] = vals[2] + else: + rv["selinuxtype"] = "<<none>>" + return rv + + def canInstall(self, entry): + return (entry.get("filetype", "all") in self.filetypeargs and + SELinuxEntryHandler.canInstall(self, entry)) + + def _defaultargs(self, entry): + return (entry.get("name"), entry.get("selinuxtype"), + self.filetypeargs[entry.get("filetype", "all")], + '', '') + + def primarykey(self, entry): + return ":".join([entry.tag, entry.get("type"), entry.get("name"), + entry.get("filetype", "all")]) + + +class SELinuxNodeHandler(SELinuxEntryHandler): + etype = "node" + value_format = (None, None, "selinuxtype", None) + str_format = '%(name)s (%(proto)s)' + custom_re = re.compile(r'-M (?P<netmask>\S+).*?-p (?P<proto>ipv\d).*? (?P<addr>\S+)$') + custom_format = ('addr', 'netmask', 'proto') + + def _key(self, entry): + try: + (addr, netmask) = entry.get("name").split("/") + except ValueError: + self.logger.error("Invalid SELinux node %s: no netmask specified" % + entry.get("name")) + return + netmask = netmask_itoa(netmask, proto=entry.get("proto")) + return (addr, netmask, entry.get("proto")) + + def _key2attrs(self, key): + vals = self.all_records[key] + return dict(name="%s/%s" % (key[0], key[1]), proto=key[2], + selinuxtype=vals[2]) + + def _defaultargs(self, entry): + (addr, netmask) = entry.get("name").split("/") + return (addr, netmask, entry.get("proto"), "", entry.get("selinuxtype")) + + +class SELinuxLoginHandler(SELinuxEntryHandler): + etype = "login" + value_format = ("selinuxuser", None) + + def _defaultargs(self, entry): + return (entry.get("name"), entry.get("selinuxuser"), "") + + +class SELinuxUserHandler(SELinuxEntryHandler): + etype = "user" + value_format = ("prefix", None, None, "roles") + + def __init__(self, tool, logger, setup, config): + SELinuxEntryHandler.__init__(self, tool, logger, setup, config) + self.needs_prefix = False + + @property + def records(self): + if self._records is None: + self._records = seobject.seluserRecords() + return self._records + + def Install(self, entry): + # in older versions of selinux, modify() is broken if you + # provide a prefix _at all_, so we try to avoid giving the + # prefix. however, in newer versions, prefix is _required_, + # so we a) try without a prefix; b) catch TypeError, which + # indicates that we had the wrong number of args (ValueError + # is thrown by the bug in older versions of selinux); and c) + # try with prefix. + try: + SELinuxEntryHandler.Install(self, entry) + except TypeError: + self.needs_prefix = True + SELinuxEntryHandler.Install(self, entry) + + def _defaultargs(self, entry): + # in older versions of selinux, modify() is broken if you + # provide a prefix _at all_, so we try to avoid giving the + # prefix. see the comment in Install() above for more + # details. + rv = [entry.get("name"), + entry.get("roles", "").replace(" ", ",").split(",")] + if self.needs_prefix: + rv.extend(['', '', entry.get("prefix")]) + else: + key = self._key(entry) + if key in self.all_records: + attrs = self._key2attrs(key) + if attrs['prefix'] != entry.get("prefix"): + rv.extend(['', '', entry.get("prefix")]) + return tuple(rv) + + +class SELinuxInterfaceHandler(SELinuxEntryHandler): + etype = "interface" + value_format = (None, None, "selinuxtype", None) + + def _defaultargs(self, entry): + return (entry.get("name"), '', entry.get("selinuxtype")) + + +class SELinuxPermissiveHandler(SELinuxEntryHandler): + etype = "permissive" + + @property + def records(self): + try: + return SELinuxEntryHandler.records.fget(self) + except AttributeError: + self.logger.info("Permissive domains not supported by this version " + "of SELinux") + self._records = False + return self._records + + @property + def all_records(self): + if self._all is None: + if self.records == False: + self._all = dict() + else: + # permissiveRecords.get_all() returns a list, so we just + # make it into a dict so that the rest of + # SELinuxEntryHandler works + self._all = dict([(d, d) for d in self.records.get_all()]) + return self._all + + def _defaultargs(self, entry): + return (entry.get("name"),) + + +class SELinuxModuleHandler(SELinuxEntryHandler): + etype = "module" + value_format = (None, "disabled") + + def __init__(self, tool, logger, setup, config): + SELinuxEntryHandler.__init__(self, tool, logger, setup, config) + self.posixtool = Bcfg2.Client.Tools.POSIX.POSIX(logger, setup, config) + try: + self.setype = selinux.selinux_getpolicytype()[1] + except IndexError: + self.logger.error("Unable to determine SELinux policy type") + self.setype = None + + @property + def all_records(self): + if self._all is None: + # we get a list of tuples back; coerce it into a dict + self._all = dict([(m[0], (m[1], m[2])) + for m in self.records.get_all()]) + return self._all + + def _key2attrs(self, key): + rv = SELinuxEntryHandler._key2attrs(self, key) + status = self.all_records[key][1] + if status: + rv['disabled'] = "false" + else: + rv['disabled'] = "true" + return rv + + def _filepath(self, entry): + return os.path.join("/usr/share/selinux", self.setype, + "%s.pp" % entry.get("name")) + + def _pathentry(self, entry): + pathentry = copy.deepcopy(entry) + pathentry.set("name", self._filepath(pathentry)) + pathentry.set("perms", "0644") + pathentry.set("owner", "root") + pathentry.set("group", "root") + pathentry.set("secontext", "__default__") + return pathentry + + def Verify(self, entry): + if not entry.get("disabled"): + entry.set("disabled", "false") + return (SELinuxEntryHandler.Verify(self, entry) and + self.posixtool.Verifyfile(self._pathentry(entry), None)) + + def canInstall(self, entry): + return (entry.text and self.setype and + SELinuxEntryHandler.canInstall(self, entry)) + + def Install(self, entry): + rv = self.posixtool.Installfile(self._pathentry(entry)) + try: + rv = rv and SELinuxEntryHandler.Install(self, entry) + except NameError: + # some versions of selinux have a bug in seobject that + # makes modify() calls fail. add() seems to have the same + # effect as modify, but without the bug + if self.exists(entry): + rv = rv and SELinuxEntryHandler.Install(self, entry, + method="add") + + if entry.get("disabled", "false").lower() == "true": + method = "disable" + else: + method = "enable" + return rv and SELinuxEntryHandler.Install(self, entry, method=method) + + def _addargs(self, entry): + return (self._filepath(entry),) + + def _defaultargs(self, entry): + return (entry.get("name"),) + + def FindExtra(self): + specified = [self._key(e) + for e in self.tool.getSupportedEntries() + if e.get("type") == self.etype] + return [self.key2entry(os.path.basename(f)[:-3]) + for f in glob.glob(os.path.join("/usr/share/selinux", + self.setype, "*.pp")) + if f not in specified] diff --git a/src/lib/Bcfg2/Client/Tools/SMF.py b/src/lib/Bcfg2/Client/Tools/SMF.py index f824410ad..3e0a9da13 100644 --- a/src/lib/Bcfg2/Client/Tools/SMF.py +++ b/src/lib/Bcfg2/Client/Tools/SMF.py @@ -73,11 +73,6 @@ class SMF(Bcfg2.Client.Tools.SvcTool): def InstallService(self, entry): """Install SMF Service entry.""" - # don't take any actions for mode='manual' - if entry.get('mode', 'default') == 'manual': - self.logger.info("Service %s mode set to manual. Skipping " - "installation." % (entry.get('name'))) - return False self.logger.info("Installing Service %s" % (entry.get('name'))) if entry.get('status') == 'off': if entry.get("FMRI").startswith('lrc'): diff --git a/src/lib/Bcfg2/Client/Tools/Systemd.py b/src/lib/Bcfg2/Client/Tools/Systemd.py index e3f6a4169..a295bc608 100644 --- a/src/lib/Bcfg2/Client/Tools/Systemd.py +++ b/src/lib/Bcfg2/Client/Tools/Systemd.py @@ -42,18 +42,11 @@ class Systemd(Bcfg2.Client.Tools.SvcTool): def InstallService(self, entry): """Install Service entry.""" - # don't take any actions for mode = 'manual' - if entry.get('mode', 'default') == 'manual': - self.logger.info("Service %s mode set to manual. Skipping " - "installation." % (entry.get('name'))) - return True - if entry.get('status') == 'on': - pstatus = self.cmd.run(self.get_svc_command(entry, 'enable'))[0] - pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0] - + rv = self.cmd.run(self.get_svc_command(entry, 'enable'))[0] == 0 + rv &= self.cmd.run(self.get_svc_command(entry, 'start'))[0] == 0 else: - pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0] - pstatus = self.cmd.run(self.get_svc_command(entry, 'disable'))[0] + rv = self.cmd.run(self.get_svc_command(entry, 'stop'))[0] == 0 + rv &= self.cmd.run(self.get_svc_command(entry, 'disable'))[0] == 0 - return not pstatus + return rv diff --git a/src/lib/Bcfg2/Client/Tools/Upstart.py b/src/lib/Bcfg2/Client/Tools/Upstart.py index 7afc8edd7..aa5a921a6 100644 --- a/src/lib/Bcfg2/Client/Tools/Upstart.py +++ b/src/lib/Bcfg2/Client/Tools/Upstart.py @@ -69,11 +69,6 @@ class Upstart(Bcfg2.Client.Tools.SvcTool): def InstallService(self, entry): """Install Service for entry.""" - # don't take any actions for mode='manual' - if entry.get('mode', 'default') == 'manual': - self.logger.info("Service %s mode set to manual. Skipping " - "installation." % (entry.get('name'))) - return False if entry.get('status') == 'on': pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0] elif entry.get('status') == 'off': diff --git a/src/lib/Bcfg2/Client/Tools/YUM24.py b/src/lib/Bcfg2/Client/Tools/YUM24.py index 4e488b9da..2bc821db3 100644 --- a/src/lib/Bcfg2/Client/Tools/YUM24.py +++ b/src/lib/Bcfg2/Client/Tools/YUM24.py @@ -6,20 +6,6 @@ import sys import yum import Bcfg2.Client.XML import Bcfg2.Client.Tools.RPMng -# Compatibility import -from Bcfg2.Bcfg2Py3k import ConfigParser - -YAD = True -CP = ConfigParser.ConfigParser() -try: - if '-C' in sys.argv: - CP.read([sys.argv[sys.argv.index('-C') + 1]]) - else: - CP.read(['/etc/bcfg2.conf']) - if CP.get('YUMng', 'autodep').lower() == 'false': - YAD = False -except: - pass if not hasattr(Bcfg2.Client.Tools.RPMng, 'RPMng'): raise ImportError @@ -79,6 +65,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng): (entry.get('name').startswith('/etc/yum.d') \ or entry.get('name').startswith('/etc/yum.repos.d')) \ or entry.get('name') == '/etc/yum.conf'] + self.autodep = setup.get("yum24_autodep") self.yum_avail = dict() self.yum_installed = dict() self.yb = yum.YumBase() @@ -273,7 +260,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng): if len(install_pkgs) > 0: self.logger.info("Attempting to install packages") - if YAD: + if self.autodep: pkgtool = "/usr/bin/yum -d0 -y install %s" else: pkgtool = "/usr/bin/yum -d0 install %s" @@ -309,7 +296,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng): if len(upgrade_pkgs) > 0: self.logger.info("Attempting to upgrade packages") - if YAD: + if self.autodep: pkgtool = "/usr/bin/yum -d0 -y update %s" else: pkgtool = "/usr/bin/yum -d0 update %s" @@ -359,7 +346,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng): """ self.logger.debug('Running YUMng.RemovePackages()') - if YAD: + if self.autodep: pkgtool = "/usr/bin/yum -d0 -y erase %s" else: pkgtool = "/usr/bin/yum -d0 erase %s" diff --git a/src/lib/Bcfg2/Client/Tools/YUMng.py b/src/lib/Bcfg2/Client/Tools/YUMng.py index 244b66cf4..34029b9fe 100644 --- a/src/lib/Bcfg2/Client/Tools/YUMng.py +++ b/src/lib/Bcfg2/Client/Tools/YUMng.py @@ -12,9 +12,6 @@ import yum.misc import rpmUtils.arch import Bcfg2.Client.XML import Bcfg2.Client.Tools -# Compatibility import -from Bcfg2.Bcfg2Py3k import ConfigParser - def build_yname(pkgname, inst): """Build yum appropriate package name.""" @@ -58,20 +55,6 @@ def nevraString(p): return ret -class Parser(ConfigParser.ConfigParser): - - def get(self, section, option, default): - """ - Override ConfigParser.get: If the request option is not in the - config file then return the value of default rather than raise - an exception. We still raise exceptions on missing sections. - """ - try: - return ConfigParser.ConfigParser.get(self, section, option) - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - return default - - class RPMDisplay(yum.rpmtrans.RPMBaseCallback): """We subclass the default RPM transaction callback so that we can control Yum's verbosity and pipe it through the right logger.""" @@ -224,38 +207,24 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): def _loadConfig(self): # Process the YUMng section from the config file. - CP = Parser() - CP.read(self.setup.get('setup')) - truth = ['true', 'yes', '1'] - # These are all boolean flags, either we do stuff or we don't - self.pkg_checks = CP.get(self.name, "pkg_checks", "true").lower() \ - in truth - self.pkg_verify = CP.get(self.name, "pkg_verify", "true").lower() \ - in truth - self.doInstall = CP.get(self.name, "installed_action", - "install").lower() == "install" - self.doUpgrade = CP.get(self.name, - "version_fail_action", "upgrade").lower() == "upgrade" - self.doReinst = CP.get(self.name, "verify_fail_action", - "reinstall").lower() == "reinstall" - self.verifyFlags = CP.get(self.name, "verify_flags", - "").lower().replace(' ', ',') + self.pkg_checks = self.setup["yumng_pkg_checks"] + self.pkg_verify = self.setup["yumng_pkg_verify"] + self.doInstall = self.setup["yumng_installed_action"] == "install" + self.doUpgrade = self.setup["yumng_version_fail_action"] == "upgrade" + self.doReinst = self.setup["yumng_verify_fail_action"] == "reinstall" + self.verifyFlags = self.setup["yumng_verify_flags"] self.installOnlyPkgs = self.yb.conf.installonlypkgs if 'gpg-pubkey' not in self.installOnlyPkgs: self.installOnlyPkgs.append('gpg-pubkey') - self.logger.debug("YUMng: Install missing: %s" \ - % self.doInstall) + self.logger.debug("YUMng: Install missing: %s" % self.doInstall) self.logger.debug("YUMng: pkg_checks: %s" % self.pkg_checks) self.logger.debug("YUMng: pkg_verify: %s" % self.pkg_verify) - self.logger.debug("YUMng: Upgrade on version fail: %s" \ - % self.doUpgrade) - self.logger.debug("YUMng: Reinstall on verify fail: %s" \ - % self.doReinst) - self.logger.debug("YUMng: installOnlyPkgs: %s" \ - % str(self.installOnlyPkgs)) + self.logger.debug("YUMng: Upgrade on version fail: %s" % self.doUpgrade) + self.logger.debug("YUMng: Reinstall on verify fail: %s" % self.doReinst) + self.logger.debug("YUMng: installOnlyPkgs: %s" % self.installOnlyPkgs) self.logger.debug("YUMng: verify_flags: %s" % self.verifyFlags) def _fixAutoVersion(self, entry): @@ -425,8 +394,8 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): if entry.get('version', False) == 'auto': self._fixAutoVersion(entry) - self.logger.debug("Verifying package instances for %s" \ - % entry.get('name')) + self.logger.debug("Verifying package instances for %s" % + entry.get('name')) self.verifyCache = {} # Used for checking multilib packages self.modlists[entry] = modlist @@ -450,8 +419,8 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): POs = self.yb.rpmdb.searchProvides(entry.get('name')) if len(POs) > 0: virtPkg = True - self.logger.info("%s appears to be provided by:" \ - % entry.get('name')) + self.logger.info("%s appears to be provided by:" % + entry.get('name')) for p in POs: self.logger.info(" %s" % p) @@ -473,8 +442,13 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): stat['verify_fail'] = False stat['pkg'] = entry stat['modlist'] = modlist - verify_flags = inst.get('verify_flags', self.verifyFlags) - verify_flags = verify_flags.lower().replace(' ', ',').split(',') + if inst.get('verify_flags'): + # this splits on either space or comma + verify_flags = \ + inst.get('verify_flags').lower().replace(' ', + ',').split(',') + else: + verify_flags = self.verifyFlags if 'arch' in nevra: # If arch is specified use it to select the package @@ -483,6 +457,7 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): _POs = POs if len(_POs) == 0: # Package (name, arch) not installed + entry.set('current_exists', 'false') self.logger.debug(" %s is not installed" % nevraString(nevra)) stat['installed'] = False package_fail = True @@ -494,8 +469,23 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): # Check EVR if virtPkg: - self.logger.debug(" Not checking version for virtual package") - _POs = [po for po in POs] # Make a copy + # we need to make sure that the version of the symbol + # provided matches the one required in the + # configuration + vlist = [] + for attr in ["epoch", "version", "release"]: + vlist.append(nevra.get(attr)) + if tuple(vlist) == (None, None, None): + # we just require the package name, no particular + # version, so just make a copy of POs since every + # package that provides this symbol satisfies the + # requirement + _POs = [po for po in POs] + else: + _POs = [po for po in POs + if po.checkPrco('provides', + (nevra["name"], 'EQ', + tuple(vlist)))] elif entry.get('name') == 'gpg-pubkey': if 'version' not in nevra: m = "Skipping verify: gpg-pubkey without an RPM version." @@ -513,10 +503,33 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): package_fail = True stat['version_fail'] = True # Just chose the first pkg for the error message - self.logger.info(" %s: Wrong version installed. " - "Want %s, but have %s" % (entry.get("name"), - nevraString(nevra), - nevraString(POs[0]))) + if virtPkg: + provTuple = \ + [p for p in POs[0].provides + if p[0] == entry.get("name")][0] + entry.set('current_version', "%s:%s-%s" % provTuple[2]) + self.logger.info(" %s: Wrong version installed. " + "Want %s, but %s provides %s" % + (entry.get("name"), + nevraString(nevra), + nevraString(POs[0]), + yum.misc.prco_tuple_to_string(provTuple))) + else: + entry.set('current_version', "%s:%s-%s.%s" % + (POs[0].epoch, + POs[0].version, + POs[0].release, + POs[0].arch)) + self.logger.info(" %s: Wrong version installed. " + "Want %s, but have %s" % + (entry.get("name"), + nevraString(nevra), + nevraString(POs[0]))) + entry.set('version', "%s:%s-%s.%s" % + (nevra.get('epoch', 'any'), + nevra.get('version', 'any'), + nevra.get('release', 'any'), + nevra.get('arch', 'any'))) qtext_versions.append("U(%s)" % str(POs[0])) continue @@ -547,7 +560,7 @@ class YUMng(Bcfg2.Client.Tools.PkgTool): package_fail = True continue - # Now take out the Yum specific objects / modlists / unproblmes + # Now take out the Yum specific objects / modlists / unproblems ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \ [ig.get('name') for ig in inst.findall('Ignore')] + \ self.ignores diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py index c6cb6e239..026c7ade0 100644 --- a/src/lib/Bcfg2/Client/Tools/__init__.py +++ b/src/lib/Bcfg2/Client/Tools/__init__.py @@ -1,16 +1,27 @@ """This contains all Bcfg2 Tool modules""" import os -import stat import sys -from subprocess import Popen, PIPE +import stat import time +import pkgutil +from subprocess import Popen, PIPE import Bcfg2.Client.XML - -__all__ = [tool.split('.')[0] \ - for tool in os.listdir(os.path.dirname(__file__)) \ - if tool.endswith(".py") and tool != "__init__.py"] - +from Bcfg2.Bcfg2Py3k import input + +if hasattr(pkgutil, 'walk_packages'): + submodules = pkgutil.walk_packages(path=__path__) +else: + # python 2.4 + import glob + submodules = [] + for path in __path__: + for submodule in glob.glob(os.path.join(path, "*.py")): + mod = os.path.splitext(os.path.basename(submodule))[0] + if mod not in ['__init__']: + submodules.append((None, mod, True)) + +__all__ = [m[1] for m in submodules] drivers = [item for item in __all__ if item not in ['rpmtools']] default = [item for item in drivers if item not in ['RPM', 'Yum']] @@ -36,7 +47,7 @@ class executor: return (p.returncode, output.splitlines()) -class Tool: +class Tool(object): """ All tools subclass this. It defines all interfaces that need to be defined. """ @@ -47,10 +58,6 @@ class Tool: __important__ = [] def __init__(self, logger, setup, config): - self.__important__ = [entry.get('name') \ - for struct in config for entry in struct \ - if entry.tag == 'Path' and \ - entry.get('important') in ['true', 'True']] self.setup = setup self.logger = logger if not hasattr(self, '__ireq__'): @@ -59,8 +66,15 @@ class Tool: self.cmd = executor(logger) self.modified = [] self.extra = [] - self.handled = [entry for struct in self.config for entry in struct \ - if self.handlesEntry(entry)] + self.__important__ = [] + self.handled = [] + for struct in config: + for entry in struct: + if (entry.tag == 'Path' and + entry.get('important', 'false').lower() == 'true'): + self.__important__.append(entry.get('name')) + if self.handlesEntry(entry): + self.handled.append(entry) for filename in self.__execs__: try: mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE]) @@ -130,12 +144,24 @@ class Tool: '''Build a list of potentially modified POSIX paths for this entry''' return [entry.get('name') for struct in self.config.getchildren() \ for entry in struct.getchildren() \ - if entry.tag in ['Ignore', 'Path']] + if entry.tag == 'Path'] def gatherCurrentData(self, entry): """Default implementation of the information gathering routines.""" pass + def missing_attrs(self, entry): + required = self.__req__[entry.tag] + if isinstance(required, dict): + required = ["type"] + try: + required.extend(self.__req__[entry.tag][entry.get("type")]) + except KeyError: + pass + + return [attr for attr in required + if attr not in entry.attrib or not entry.attrib[attr]] + def canVerify(self, entry): """Test if entry has enough information to be verified.""" if not self.handlesEntry(entry): @@ -148,13 +174,12 @@ class Tool: entry.get('failure'))) return False - missing = [attr for attr in self.__req__[entry.tag] \ - if attr not in entry.attrib] + missing = self.missing_attrs(entry) if missing: - self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ - % (entry.tag, entry.get('name'))) - self.logger.error("\t... due to absence of %s attribute(s)" % \ - (":".join(missing))) + self.logger.error("Cannot verify entry %s:%s due to missing " + "required attribute(s): %s" % + (entry.tag, entry.get('name'), + ", ".join(missing))) try: self.gatherCurrentData(entry) except: @@ -167,6 +192,11 @@ class Tool: """Return a list of extra entries.""" return [] + def primarykey(self, entry): + """ return a string that should be unique amongst all entries + in the specification """ + return "%s:%s" % (entry.tag, entry.get("name")) + def canInstall(self, entry): """Test if entry has enough information to be installed.""" if not self.handlesEntry(entry): @@ -177,13 +207,12 @@ class Tool: (entry.tag, entry.get('name'))) return False - missing = [attr for attr in self.__ireq__[entry.tag] \ - if attr not in entry.attrib or not entry.attrib[attr]] + missing = self.missing_attrs(entry) if missing: - self.logger.error("Incomplete information for entry %s:%s; cannot install" \ - % (entry.tag, entry.get('name'))) - self.logger.error("\t... due to absence of %s attribute" % \ - (":".join(missing))) + self.logger.error("Incomplete information for entry %s:%s; cannot " + "install due to absence of attribute(s): %s" % + (entry.tag, entry.get('name'), + ", ".join(missing))) return False return True @@ -305,8 +334,7 @@ class SvcTool(Tool): return self.cmd.run(self.get_svc_command(service, restart_target))[0] def check_service(self, service): - # not supported for this driver - return 0 + return self.cmd.run(self.get_svc_command(service, 'status'))[0] == 0 def Remove(self, services): """ Dummy implementation of service removal method """ @@ -321,13 +349,12 @@ class SvcTool(Tool): return for entry in [ent for ent in bundle if self.handlesEntry(ent)]: - mode = entry.get('mode', 'default') - if (mode == 'manual' or - (mode == 'interactive_only' and + restart = entry.get("restart", "true") + if (restart.lower() == "false" or + (restart.lower == "interactive" and not self.setup['interactive'])): continue - # need to handle servicemode = (build|default) - # need to handle mode = (default|supervised) + rc = None if entry.get('status') == 'on': if self.setup['servicemode'] == 'build': @@ -336,11 +363,7 @@ class SvcTool(Tool): if self.setup['interactive']: prompt = ('Restart service %s?: (y/N): ' % entry.get('name')) - # py3k compatibility - try: - ans = raw_input(prompt) - except NameError: - ans = input(prompt) + ans = input(prompt) if ans not in ['y', 'Y']: continue rc = self.restart_service(entry) @@ -351,3 +374,19 @@ class SvcTool(Tool): if rc: self.logger.error("Failed to manipulate service %s" % (entry.get('name'))) + + def Install(self, entries, states): + """Install all entries in sublist.""" + for entry in entries: + if entry.get('install', 'true').lower() == 'false': + self.logger.info("Service %s installation is false. Skipping " + "installation." % (entry.get('name'))) + continue + try: + func = getattr(self, "Install%s" % (entry.tag)) + states[entry] = func(entry) + if states[entry]: + self.modified.append(entry) + except: + self.logger.error("Unexpected failure of install method for entry type %s" + % (entry.tag), exc_info=1) diff --git a/src/lib/Bcfg2/Client/Tools/launchd.py b/src/lib/Bcfg2/Client/Tools/launchd.py index c022d32ae..6f08559a2 100644 --- a/src/lib/Bcfg2/Client/Tools/launchd.py +++ b/src/lib/Bcfg2/Client/Tools/launchd.py @@ -88,11 +88,6 @@ class launchd(Bcfg2.Client.Tools.Tool): def InstallService(self, entry): """Enable or disable launchd item.""" - # don't take any actions for mode='manual' - if entry.get('mode', 'default') == 'manual': - self.logger.info("Service %s mode set to manual. Skipping " - "installation." % (entry.get('name'))) - return False name = entry.get('name') if entry.get('status') == 'on': self.logger.error("Installing service %s" % name) diff --git a/src/lib/Bcfg2/Client/Tools/rpmtools.py b/src/lib/Bcfg2/Client/Tools/rpmtools.py index 7441b2c06..32a04262d 100755 --- a/src/lib/Bcfg2/Client/Tools/rpmtools.py +++ b/src/lib/Bcfg2/Client/Tools/rpmtools.py @@ -43,7 +43,6 @@ try: isprelink_imported = True except ImportError: isprelink_imported = False - #print '*********************** isprelink not loaded ***********************' # If the prelink command is installed on the system then we need to do # prelink -y on files. @@ -333,7 +332,6 @@ def prelink_size_check(filename): fsize += len(data) elif whitelist_re.search(filename) and not blacklist_re.search(filename): - # print "***** Warning isprelink extension failed to import ******" plf.close() cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ % (re.escape(filename)) @@ -601,7 +599,6 @@ def rpm_verify_package(vp_ts, header, verify_options): omitmask |= VERIFY_RDEV omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS) - #print 'omitmask =', omitmask package_results = {} @@ -754,58 +751,41 @@ class Rpmtscallback(object): """ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE: pass - #print 'rpm.RPMCALLBACK_INST_OPEN_FILE' elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE: pass - #print 'rpm.RPMCALLBACK_INST_CLOSE_FILE' elif reason == rpm.RPMCALLBACK_INST_START: pass - #print 'rpm.RPMCALLBACK_INST_START' elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \ reason == rpm.RPMCALLBACK_INST_PROGRESS: pass - #print 'rpm.RPMCALLBACK_TRANS_PROGRESS or \ # rpm.RPMCALLBACK_INST_PROGRESS' elif reason == rpm.RPMCALLBACK_TRANS_START: pass - #print 'rpm.RPMCALLBACK_TRANS_START' elif reason == rpm.RPMCALLBACK_TRANS_STOP: pass - #print 'rpm.RPMCALLBACK_TRANS_STOP' elif reason == rpm.RPMCALLBACK_REPACKAGE_START: pass - #print 'rpm.RPMCALLBACK_REPACKAGE_START' elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: pass - #print 'rpm.RPMCALLBACK_REPACKAGE_PROGRESS' elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP: pass - #print 'rpm.RPMCALLBACK_REPACKAGE_STOP' elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS: pass - #print 'rpm.RPMCALLBACK_UNINST_PROGRESS' elif reason == rpm.RPMCALLBACK_UNINST_START: pass - #print 'rpm.RPMCALLBACK_UNINST_START' elif reason == rpm.RPMCALLBACK_UNINST_STOP: pass - #print 'rpm.RPMCALLBACK_UNINST_STOP' - #print '***Package ', key, ' deleted ***' # How do we get at this? # RPM.modified += key elif reason == rpm.RPMCALLBACK_UNPACK_ERROR: pass - #print 'rpm.RPMCALLBACK_UNPACK_ERROR' elif reason == rpm.RPMCALLBACK_CPIO_ERROR: pass - #print 'rpm.RPMCALLBACK_CPIO_ERROR' elif reason == rpm.RPMCALLBACK_UNKNOWN: pass - #print 'rpm.RPMCALLBACK_UNKNOWN' else: print('ERROR - Fell through callBack') - #print reason, amount, total, key, client_data def rpm_erase(erase_pkgspecs, erase_flags): """ @@ -836,7 +816,6 @@ def rpm_erase(erase_pkgspecs, erase_flags): erase_ts.addErase(idx) #for te in erase_ts: - # print "%s %s:%s-%s.%s" % (te.N(), te.E(), te.V(), te.R(), te.A()) erase_problems = [] if 'nodeps' not in erase_flags: @@ -847,8 +826,6 @@ def rpm_erase(erase_pkgspecs, erase_flags): erase_callback = Rpmtscallback() erase_ts.run(erase_callback.callback, 'Erase') #else: - # print 'ERROR - Dependency failures on package erase' - # print erase_problems erase_ts.closeDB() del erase_ts diff --git a/src/lib/Bcfg2/Component.py b/src/lib/Bcfg2/Component.py index eb9ea166a..bb0e64102 100644 --- a/src/lib/Bcfg2/Component.py +++ b/src/lib/Bcfg2/Component.py @@ -6,6 +6,7 @@ import inspect import logging import os import pydoc +import socket import sys import time import threading @@ -59,12 +60,14 @@ def run_component(component_cls, listen_all, location, daemon, pidfile_name, pidfile.close() component = component_cls(cfile=cfile, **cls_kwargs) - up = urlparse(location) - port = tuple(up[1].split(':')) - port = (port[0], int(port[1])) + hostname, port = urlparse(location)[1].split(':') + server_address = socket.getaddrinfo(hostname, + port, + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0][4] try: server = XMLRPCServer(listen_all, - port, + server_address, keyfile=keyfile, certfile=certfile, register=register, @@ -82,23 +85,6 @@ def run_component(component_cls, listen_all, location, daemon, pidfile_name, server.server_close() component.shutdown() -def exposed(func): - """Mark a method to be exposed publically. - - Examples: - class MyComponent (Component): - @expose - def my_method (self, param1, param2): - do_stuff() - - class MyComponent (Component): - def my_method (self, param1, param2): - do_stuff() - my_method = expose(my_method) - - """ - func.exposed = True - return func def automatic(func, period=10): """Mark a method to be run periodically.""" @@ -150,6 +136,11 @@ class Component (object): self.lock = threading.Lock() self.instance_statistics = Statistics() + def critical_error(self, operation): + """Log and err, traceback and return an xmlrpc fault to client.""" + logger.error(operation, exc_info=1) + raise xmlrpclib.Fault(xmlrpclib.APPLICATION_ERROR, "Critical unexpected failure: %s" % (operation)) + def do_tasks(self): """Perform automatic tasks for the component. @@ -213,7 +204,8 @@ class Component (object): method_func = self._resolve_exposed_method(method) except NoExposedMethod: self.logger.error("Unknown method %s" % (method)) - raise xmlrpclib.Fault(7, "Unknown method %s" % method) + raise xmlrpclib.Fault(xmlrpclib.METHOD_NOT_FOUND, + "Unknown method %s" % method) except Exception: e = sys.exc_info()[1] if getattr(e, "log", True): @@ -246,14 +238,7 @@ class Component (object): raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e)) return result - def listMethods(self): - """Custom XML-RPC introspective method list.""" - return [ - name for name, func in inspect.getmembers(self, callable) - if getattr(func, "exposed", False) - ] - listMethods = exposed(listMethods) - + @exposed def methodHelp(self, method_name): """Custom XML-RPC introspective method help. @@ -266,19 +251,18 @@ class Component (object): except NoExposedMethod: return "" return pydoc.getdoc(func) - methodHelp = exposed(methodHelp) + @exposed def get_name(self): """The name of the component.""" return self.name - get_name = exposed(get_name) + @exposed def get_implementation(self): """The implementation of the component.""" return self.implementation - get_implementation = exposed(get_implementation) + @exposed def get_statistics(self, _): """Get current statistics about component execution""" return self.instance_statistics.display() - get_statistics = exposed(get_statistics) diff --git a/src/lib/Bcfg2/Encryption.py b/src/lib/Bcfg2/Encryption.py new file mode 100755 index 000000000..62b22d7de --- /dev/null +++ b/src/lib/Bcfg2/Encryption.py @@ -0,0 +1,75 @@ +#!/usr/bin/python -Ott + +import os +import base64 +from M2Crypto import Rand +from M2Crypto.EVP import Cipher, EVPError +from Bcfg2.Bcfg2Py3k import StringIO + +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + +ENCRYPT = 1 +DECRYPT = 0 +ALGORITHM = "aes_256_cbc" +IV = '\0' * 16 + +Rand.rand_seed(os.urandom(1024)) + +def _cipher_filter(cipher, instr): + inbuf = StringIO(instr) + outbuf = StringIO() + while 1: + buf = inbuf.read() + if not buf: + break + outbuf.write(cipher.update(buf)) + outbuf.write(cipher.final()) + rv = outbuf.getvalue() + inbuf.close() + outbuf.close() + return rv + +def str_encrypt(plaintext, key, iv=IV, algorithm=ALGORITHM, salt=None): + """ encrypt a string """ + cipher = Cipher(alg=algorithm, key=key, iv=iv, op=ENCRYPT, salt=salt) + return _cipher_filter(cipher, plaintext) + +def str_decrypt(crypted, key, iv=IV, algorithm=ALGORITHM): + """ decrypt a string """ + cipher = Cipher(alg=algorithm, key=key, iv=iv, op=DECRYPT) + return _cipher_filter(cipher, crypted) + +def ssl_decrypt(data, passwd, algorithm=ALGORITHM): + """ decrypt openssl-encrypted data """ + # base64-decode the data if necessary + try: + data = base64.b64decode(data) + except TypeError: + # already decoded + pass + + salt = data[8:16] + hashes = [md5(passwd + salt).digest()] + for i in range(1,3): + hashes.append(md5(hashes[i-1] + passwd + salt).digest()) + key = hashes[0] + hashes[1] + iv = hashes[2] + + return str_decrypt(data[16:], key=key, iv=iv) + +def ssl_encrypt(plaintext, passwd, algorithm=ALGORITHM, salt=None): + """ encrypt data in a format that is openssl compatible """ + if salt is None: + salt = Rand.rand_bytes(8) + + hashes = [md5(passwd + salt).digest()] + for i in range(1,3): + hashes.append(md5(hashes[i-1] + passwd + salt).digest()) + key = hashes[0] + hashes[1] + iv = hashes[2] + + crypted = str_encrypt(plaintext, key=key, salt=salt, iv=iv) + return base64.b64encode("Salted__" + salt + crypted) + "\n" diff --git a/src/lib/Bcfg2/Logger.py b/src/lib/Bcfg2/Logger.py index 81b45550f..26c1d52f6 100644 --- a/src/lib/Bcfg2/Logger.py +++ b/src/lib/Bcfg2/Logger.py @@ -57,9 +57,11 @@ class TermiosFormatter(logging.Formatter): lines = int(math.ceil(float(len(record.msg)) / columns)) for lineNumber in range(lines): indices = [idx for idx in [(colNum * lines) + lineNumber - for colNum in range(columns)] if idx < len(record.msg)] - format = (len(indices) * (" %%-%ds " % columnWidth)) - returns.append(format % tuple([record.msg[idx] for idx in indices])) + for colNum in range(columns)] + if idx < len(record.msg)] + retformat = (len(indices) * (" %%-%ds " % columnWidth)) + returns.append(retformat % tuple([record.msg[idx] + for idx in indices])) else: returns.append(str(record.msg)) if record.exc_info: @@ -86,6 +88,8 @@ class FragmentingSysLogHandler(logging.handlers.SysLogHandler): error = record.exc_info record.exc_info = None msgdata = record.msg + if len(msgdata) == 0: + return while msgdata: newrec = copy.copy(record) newrec.msg = msgdata[:250] @@ -122,20 +126,15 @@ class FragmentingSysLogHandler(logging.handlers.SysLogHandler): """ pass - -def add_console_handler(level): +def add_console_handler(level=logging.DEBUG): """Add a logging handler that logs at a level to sys.stdout.""" console = logging.StreamHandler(sys.stdout) - if level is True: - console.setLevel(logging.DEBUG) - else: - console.setLevel(level) + console.setLevel(level) # tell the handler to use this format console.setFormatter(TermiosFormatter()) logging.root.addHandler(console) - -def add_syslog_handler(procname, syslog_facility): +def add_syslog_handler(procname, syslog_facility, level=logging.DEBUG): """Add a logging handler that logs as procname to syslog_facility.""" try: try: @@ -146,7 +145,7 @@ def add_syslog_handler(procname, syslog_facility): syslog = FragmentingSysLogHandler(procname, ('localhost', 514), syslog_facility) - syslog.setLevel(logging.DEBUG) + syslog.setLevel(level) syslog.setFormatter(logging.Formatter('%(name)s[%(process)d]: %(message)s')) logging.root.addHandler(syslog) except socket.error: @@ -154,15 +153,13 @@ def add_syslog_handler(procname, syslog_facility): except: print("Failed to activate syslogging") - -def add_file_handler(to_file): +def add_file_handler(to_file, level=logging.DEBUG): """Add a logging handler that logs to to_file.""" filelog = logging.FileHandler(to_file) - filelog.setLevel(logging.DEBUG) + filelog.setLevel(level) filelog.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s')) logging.root.addHandler(filelog) - def setup_logging(procname, to_console=True, to_syslog=True, syslog_facility='daemon', level=0, to_file=None): """Setup logging for Bcfg2 software.""" @@ -170,11 +167,16 @@ def setup_logging(procname, to_console=True, to_syslog=True, return if to_console: - add_console_handler(to_console) + if to_console == True: + clvl = min(logging.WARNING, level) + else: + clvl = min(to_console, level) + add_console_handler(clvl) if to_syslog: - add_syslog_handler(procname, syslog_facility) + slvl = min(level, logging.INFO) + add_syslog_handler(procname, syslog_facility, level=slvl) if to_file is not None: - add_file_handler(to_file) + add_file_handler(to_file, level=level) - logging.root.setLevel(level) + logging.root.setLevel(logging.DEBUG) logging.already_setup = True diff --git a/src/lib/Bcfg2/Options.py b/src/lib/Bcfg2/Options.py index dfb062341..1883bc222 100644 --- a/src/lib/Bcfg2/Options.py +++ b/src/lib/Bcfg2/Options.py @@ -1,25 +1,24 @@ """Option parsing library for utilities.""" +import copy import getopt -import re +import inspect import os -import sys +import re import shlex +import sys import Bcfg2.Client.Tools # Compatibility imports from Bcfg2.Bcfg2Py3k import ConfigParser +from Bcfg2.version import __version__ -def bool_cook(x): - if x: - return True - else: - return False class OptionFailure(Exception): pass -DEFAULT_CONFIG_LOCATION = '/etc/bcfg2.conf' #/etc/bcfg2.conf -DEFAULT_INSTALL_PREFIX = '/usr' #/usr +DEFAULT_CONFIG_LOCATION = '/etc/bcfg2.conf' +DEFAULT_INSTALL_PREFIX = '/usr' + class DefaultConfigParser(ConfigParser.ConfigParser): def get(self, section, option, **kwargs): @@ -55,16 +54,9 @@ class DefaultConfigParser(ConfigParser.ConfigParser): class Option(object): - def get_cooked_value(self, value): - if self.boolean: - return True - if self.cook: - return self.cook(value) - else: - return value - def __init__(self, desc, default, cmd=False, odesc=False, - env=False, cf=False, cook=False, long_arg=False): + env=False, cf=False, cook=False, long_arg=False, + deprecated_cf=None): self.desc = desc self.default = default self.cmd = cmd @@ -72,33 +64,38 @@ class Option(object): if not self.long: if cmd and (cmd[0] != '-' or len(cmd) != 2): raise OptionFailure("Poorly formed command %s" % cmd) - else: - if cmd and (not cmd.startswith('--')): - raise OptionFailure("Poorly formed command %s" % cmd) + elif cmd and (not cmd.startswith('--')): + raise OptionFailure("Poorly formed command %s" % cmd) self.odesc = odesc self.env = env self.cf = cf + self.deprecated_cf = deprecated_cf self.boolean = False - if not odesc and not cook: + if not odesc and not cook and isinstance(self.default, bool): self.boolean = True self.cook = cook + def get_cooked_value(self, value): + if self.boolean: + return True + if self.cook: + return self.cook(value) + else: + return value + def buildHelpMessage(self): - msg = '' - if self.cmd: - if not self.long: - msg = self.cmd.ljust(3) - else: - msg = self.cmd - if self.odesc: - if self.long: - msg = "%-28s" % ("%s=%s" % (self.cmd, self.odesc)) - else: - msg += '%-25s' % (self.odesc) + vals = [] + if not self.cmd: + return '' + if self.odesc: + if self.long: + vals.append("%s=%s" % (self.cmd, self.odesc)) else: - msg += '%-25s' % ('') - msg += "%s\n" % self.desc - return msg + vals.append("%s %s" % (self.cmd, self.odesc)) + else: + vals.append(self.cmd) + vals.append(self.desc) + return " %-28s %s\n" % tuple(vals) def buildGetopt(self): gstr = '' @@ -112,7 +109,7 @@ class Option(object): def buildLongGetopt(self): if self.odesc: - return self.cmd[2:]+'=' + return self.cmd[2:] + '=' else: return self.cmd[2:] @@ -127,7 +124,10 @@ class Option(object): self.value = True return if self.cmd and self.cmd in rawopts: - data = rawopts[rawopts.index(self.cmd) + 1] + if self.odesc: + data = rawopts[rawopts.index(self.cmd) + 1] + else: + data = True self.value = self.get_cooked_value(data) return # No command line option found @@ -140,9 +140,20 @@ class Option(object): return except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): pass + if self.deprecated_cf: + try: + self.value = self.get_cooked_value(configparser.get(*self.deprecated_cf)) + print("Warning: [%s] %s is deprecated, use [%s] %s instead" + % (self.deprecated_cf[0], self.deprecated_cf[1], + self.cf[0], self.cf[1])) + return + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + pass + # Default value not cooked self.value = self.default + class OptionSet(dict): def __init__(self, *args, **kwargs): dict.__init__(self, *args) @@ -154,8 +165,14 @@ class OptionSet(dict): self.cfp = DefaultConfigParser() if (len(self.cfp.read(self.cfile)) == 0 and ('quiet' not in kwargs or not kwargs['quiet'])): - print("Warning! Unable to read specified configuration file: %s" % - self.cfile) + # suppress warnings if called from bcfg2-admin init + caller = inspect.stack()[-1][1].split('/')[-1] + if caller == 'bcfg2-admin' and len(sys.argv) > 1: + if sys.argv[1] == 'init': + return + else: + print("Warning! Unable to read specified configuration file: %s" % + self.cfile) def buildGetopt(self): return ''.join([opt.buildGetopt() for opt in list(self.values())]) @@ -170,18 +187,28 @@ class OptionSet(dict): hlist = [] # list of _non-empty_ help messages for opt in list(self.values()): hm = opt.buildHelpMessage() - if hm != '': + if hm: hlist.append(hm) - return ' '.join(hlist) + return ''.join(hlist) def helpExit(self, msg='', code=1): if msg: print(msg) - print("Usage:\n %s" % self.buildHelpMessage()) + print("Usage:") + print(self.buildHelpMessage()) + raise SystemExit(code) + + def versionExit(self, code=0): + print("%s %s on Python %s" % + (os.path.basename(sys.argv[0]), + __version__, + ".".join(str(v) for v in sys.version_info[0:3]))) raise SystemExit(code) def parse(self, argv, do_getopt=True): '''Parse options from command line.''' + if VERSION not in self.values(): + self['__version__'] = VERSION if do_getopt: try: opts, args = getopt.getopt(argv, self.buildGetopt(), @@ -191,6 +218,8 @@ class OptionSet(dict): self.helpExit(err) if '-h' in argv: self.helpExit('', 0) + if '--version' in argv: + self.versionExit() self['args'] = args for key in list(self.keys()): if key == 'args': @@ -203,17 +232,22 @@ class OptionSet(dict): if hasattr(option, 'value'): val = option.value self[key] = val + if "__version__" in self: + del self['__version__'] + def list_split(c_string): if c_string: return re.split("\s*,\s*", c_string) return [] + def colon_split(c_string): if c_string: return c_string.split(':') return [] + def get_bool(s): # these values copied from ConfigParser.RawConfigParser.getboolean # with the addition of True and False @@ -225,210 +259,784 @@ def get_bool(s): return False else: raise ValueError + + +""" +Options: + + Accepts keyword argument list with the following values: + default: default value for the option + cmd: command line switch + odesc: option description + cf: tuple containing section/option + cook: method for parsing option + long_arg: (True|False) specifies whether cmd is a long argument +""" # General options -CFILE = Option('Specify configuration file', DEFAULT_CONFIG_LOCATION, cmd='-C', - odesc='<conffile>') -LOCKFILE = Option('Specify lockfile', - "/var/lock/bcfg2.run", - cf=('components', 'lockfile'), - odesc='<Path to lockfile>') -HELP = Option('Print this usage message', False, cmd='-h') -DEBUG = Option("Enable debugging output", False, cmd='-d') -VERBOSE = Option("Enable verbose output", False, cmd='-v') -DAEMON = Option("Daemonize process, storing pid", False, - cmd='-D', odesc="<pidfile>") -INSTALL_PREFIX = Option('Installation location', cf=('server', 'prefix'), - default=DEFAULT_INSTALL_PREFIX, odesc='</path>') -SENDMAIL_PATH = Option('Path to sendmail', cf=('reports', 'sendmailpath'), - default='/usr/lib/sendmail') -INTERACTIVE = Option('Run interactively, prompting the user for each change', - default=False, - cmd='-I', ) -ENCODING = Option('Encoding of cfg files', - default='UTF-8', - cmd='-E', - odesc='<encoding>', - cf=('components', 'encoding')) -PARANOID_PATH = Option('Specify path for paranoid file backups', - default='/var/cache/bcfg2', cf=('paranoid', 'path'), - odesc='<paranoid backup path>') -PARANOID_MAX_COPIES = Option('Specify the number of paranoid copies you want', - default=1, cf=('paranoid', 'max_copies'), - odesc='<max paranoid copies>') -OMIT_LOCK_CHECK = Option('Omit lock check', default=False, cmd='-O') -CORE_PROFILE = Option('profile', - default=False, cmd='-p', ) -FILES_ON_STDIN = Option('Operate on a list of files supplied on stdin', - cmd='--stdin', default=False, long_arg=True) -SCHEMA_PATH = Option('Path to XML Schema files', cmd='--schema', - odesc='<schema path>', - default="%s/share/bcfg2/schemas" % DEFAULT_INSTALL_PREFIX, - long_arg=True) -REQUIRE_SCHEMA = Option("Require property files to have matching schema files", - cmd="--require-schema", default=False, long_arg=True) - -# Metadata options -MDATA_OWNER = Option('Default Path owner', - default='root', cf=('mdata', 'owner'), - odesc='owner permissions') -MDATA_GROUP = Option('Default Path group', - default='root', cf=('mdata', 'group'), - odesc='group permissions') -MDATA_IMPORTANT = Option('Default Path priority (importance)', - default='False', cf=('mdata', 'important'), - odesc='Important entries are installed first') -MDATA_PERMS = Option('Default Path permissions', - '644', cf=('mdata', 'perms'), - odesc='octal permissions') -MDATA_PARANOID = Option('Default Path paranoid setting', - 'true', cf=('mdata', 'paranoid'), - odesc='Path paranoid setting') -MDATA_SENSITIVE = Option('Default Path sensitive setting', - 'false', cf=('mdata', 'sensitive'), - odesc='Path sensitive setting') +CFILE = \ + Option('Specify configuration file', + default=DEFAULT_CONFIG_LOCATION, + cmd='-C', + odesc='<conffile>') +LOCKFILE = \ + Option('Specify lockfile', + default='/var/lock/bcfg2.run', + odesc='<Path to lockfile>', + cf=('components', 'lockfile')) +HELP = \ + Option('Print this usage message', + default=False, + cmd='-h') +VERSION = \ + Option('Print the version and exit', + default=False, + cmd='--version', long_arg=True) +DAEMON = \ + Option("Daemonize process, storing pid", + default=None, + cmd='-D', + odesc='<pidfile>') +INSTALL_PREFIX = \ + Option('Installation location', + default=DEFAULT_INSTALL_PREFIX, + odesc='</path>', + cf=('server', 'prefix')) +SENDMAIL_PATH = \ + Option('Path to sendmail', + default='/usr/lib/sendmail', + cf=('reports', 'sendmailpath')) +INTERACTIVE = \ + Option('Run interactively, prompting the user for each change', + default=False, + cmd='-I', ) +ENCODING = \ + Option('Encoding of cfg files', + default='UTF-8', + cmd='-E', + odesc='<encoding>', + cf=('components', 'encoding')) +PARANOID_PATH = \ + Option('Specify path for paranoid file backups', + default='/var/cache/bcfg2', + odesc='<paranoid backup path>', + cf=('paranoid', 'path')) +PARANOID_MAX_COPIES = \ + Option('Specify the number of paranoid copies you want', + default=1, + odesc='<max paranoid copies>', + cf=('paranoid', 'max_copies')) +OMIT_LOCK_CHECK = \ + Option('Omit lock check', + default=False, + cmd='-O') +CORE_PROFILE = \ + Option('profile', + default=False, + cmd='-p') +SCHEMA_PATH = \ + Option('Path to XML Schema files', + default='%s/share/bcfg2/schemas' % DEFAULT_INSTALL_PREFIX, + cmd='--schema', + odesc='<schema path>', + cf=('lint', 'schema'), + long_arg=True) +INTERPRETER = \ + Option("Python interpreter to use", + default='best', + cmd="--interpreter", + odesc='<python|bpython|ipython|best>', + cf=('bcfg2-info', 'interpreter'), + long_arg=True) + +# Metadata options (mdata section) +MDATA_OWNER = \ + Option('Default Path owner', + default='root', + odesc='owner permissions', + cf=('mdata', 'owner')) +MDATA_GROUP = \ + Option('Default Path group', + default='root', + odesc='group permissions', + cf=('mdata', 'group')) +MDATA_IMPORTANT = \ + Option('Default Path priority (importance)', + default='False', + odesc='Important entries are installed first', + cf=('mdata', 'important')) +MDATA_PERMS = \ + Option('Default Path permissions', + default='644', + odesc='octal permissions', + cf=('mdata', 'perms')) +MDATA_SECONTEXT = \ + Option('Default SELinux context', + default='__default__', + odesc='SELinux context', + cf=('mdata', 'secontext')) +MDATA_PARANOID = \ + Option('Default Path paranoid setting', + default='true', + odesc='Path paranoid setting', + cf=('mdata', 'paranoid')) +MDATA_SENSITIVE = \ + Option('Default Path sensitive setting', + default='false', + odesc='Path sensitive setting', + cf=('mdata', 'sensitive')) # Server options -SERVER_REPOSITORY = Option('Server repository path', '/var/lib/bcfg2', - cf=('server', 'repository'), cmd='-Q', - odesc='<repository path>') -SERVER_PLUGINS = Option('Server plugin list', cf=('server', 'plugins'), - # default server plugins - default=[ - 'Bundler', - 'Cfg', - 'Metadata', - 'Pkgmgr', - 'Rules', - 'SSHbase', - ], - cook=list_split) -SERVER_MCONNECT = Option('Server Metadata Connector list', cook=list_split, - cf=('server', 'connectors'), default=['Probes'], ) -SERVER_FILEMONITOR = Option('Server file monitor', cf=('server', 'filemonitor'), - default='default', odesc='File monitoring driver') -SERVER_LISTEN_ALL = Option('Listen on all interfaces', - cf=('server', 'listen_all'), - cmd='--listen-all', - default=False, - long_arg=True, - cook=get_bool, - odesc='True|False') -SERVER_LOCATION = Option('Server Location', cf=('components', 'bcfg2'), - default='https://localhost:6789', cmd='-S', - odesc='https://server:port') -SERVER_STATIC = Option('Server runs on static port', cf=('components', 'bcfg2'), - default=False, cook=bool_cook) -SERVER_KEY = Option('Path to SSL key', cf=('communication', 'key'), - default=False, cmd='--ssl-key', odesc='<ssl key>', - long_arg=True) -SERVER_CERT = Option('Path to SSL certificate', default='/etc/bcfg2.key', - cf=('communication', 'certificate'), odesc='<ssl cert>') -SERVER_CA = Option('Path to SSL CA Cert', default=None, - cf=('communication', 'ca'), odesc='<ca cert>') -SERVER_PASSWORD = Option('Communication Password', cmd='-x', odesc='<password>', - cf=('communication', 'password'), default=False) -SERVER_PROTOCOL = Option('Server Protocol', cf=('communication', 'procotol'), - default='xmlrpc/ssl') +SERVER_REPOSITORY = \ + Option('Server repository path', + default='/var/lib/bcfg2', + cmd='-Q', + odesc='<repository path>', + cf=('server', 'repository')) +SERVER_PLUGINS = \ + Option('Server plugin list', + # default server plugins + default=['Bundler', 'Cfg', 'Metadata', 'Pkgmgr', 'Rules', 'SSHbase'], + cf=('server', 'plugins'), + cook=list_split) +SERVER_MCONNECT = \ + Option('Server Metadata Connector list', + default=['Probes'], + cf=('server', 'connectors'), + cook=list_split) +SERVER_FILEMONITOR = \ + Option('Server file monitor', + default='default', + odesc='File monitoring driver', + cf=('server', 'filemonitor')) +SERVER_FAM_IGNORE = \ + Option('File globs to ignore', + default=['*~', '*#', '.#*', '*.swp', '.*.swx', 'SCCS', '.svn', + '4913', '.gitignore',], + cf=('server', 'ignore_files'), + cook=list_split) +SERVER_LISTEN_ALL = \ + Option('Listen on all interfaces', + default=False, + cmd='--listen-all', + cf=('server', 'listen_all'), + cook=get_bool, + long_arg=True) +SERVER_LOCATION = \ + Option('Server Location', + default='https://localhost:6789', + cmd='-S', + odesc='https://server:port', + cf=('components', 'bcfg2')) +SERVER_STATIC = \ + Option('Server runs on static port', + default=False, + cf=('components', 'bcfg2')) +SERVER_KEY = \ + Option('Path to SSL key', + default=None, + cmd='--ssl-key', + odesc='<ssl key>', + cf=('communication', 'key'), + long_arg=True) +SERVER_CERT = \ + Option('Path to SSL certificate', + default='/etc/bcfg2.key', + odesc='<ssl cert>', + cf=('communication', 'certificate')) +SERVER_CA = \ + Option('Path to SSL CA Cert', + default=None, + odesc='<ca cert>', + cf=('communication', 'ca')) +SERVER_PASSWORD = \ + Option('Communication Password', + default=None, + cmd='-x', + odesc='<password>', + cf=('communication', 'password')) +SERVER_PROTOCOL = \ + Option('Server Protocol', + default='xmlrpc/ssl', + cf=('communication', 'procotol')) +SERVER_BACKEND = \ + Option('Server Backend', + default='best', + cf=('server', 'backend')) + +# database options +DB_ENGINE = \ + Option('Database engine', + default='sqlite3', + cf=('database', 'engine'), + deprecated_cf=('statistics', 'database_engine')) +DB_NAME = \ + Option('Database name', + default=os.path.join(SERVER_REPOSITORY.default, "bcfg2.sqlite"), + cf=('database', 'name'), + deprecated_cf=('statistics', 'database_name')) +DB_USER = \ + Option('Database username', + default=None, + cf=('database', 'user'), + deprecated_cf=('statistics', 'database_user')) +DB_PASSWORD = \ + Option('Database password', + default=None, + cf=('database', 'password'), + deprecated_cf=('statistics', 'database_password')) +DB_HOST = \ + Option('Database host', + default='localhost', + cf=('database', 'host'), + deprecated_cf=('statistics', 'database_host')) +DB_PORT = \ + Option('Database port', + default='', + cf=('database', 'port'), + deprecated_cf=('statistics', 'database_port')) + +# Django options +WEB_CFILE = \ + Option('Web interface configuration file', + default="/etc/bcfg2-web.conf", + cmd='-W', + cf=('statistics', 'config'),) +DJANGO_TIME_ZONE = \ + Option('Django timezone', + default=None, + cf=('statistics', 'time_zone'),) +DJANGO_DEBUG = \ + Option('Django debug', + default=None, + cf=('statistics', 'web_debug'), + cook=get_bool,) +# Django options +DJANGO_WEB_PREFIX = \ + Option('Web prefix', + default=None, + cf=('statistics', 'web_prefix'),) + + # Client options -CLIENT_KEY = Option('Path to SSL key', cf=('communication', 'key'), - default=None, cmd="--ssl-key", odesc='<ssl key>', - long_arg=True) -CLIENT_CERT = Option('Path to SSL certificate', default=None, cmd="--ssl-cert", - cf=('communication', 'certificate'), odesc='<ssl cert>', - long_arg=True) -CLIENT_CA = Option('Path to SSL CA Cert', default=None, cmd="--ca-cert", - cf=('communication', 'ca'), odesc='<ca cert>', - long_arg=True) -CLIENT_SCNS = Option('List of server commonNames', default=None, cmd="--ssl-cns", - cf=('communication', 'serverCommonNames'), - odesc='<commonName1:commonName2>', cook=list_split, - long_arg=True) -CLIENT_PROFILE = Option('Assert the given profile for the host', - default=False, cmd='-p', odesc="<profile>") -CLIENT_RETRIES = Option('The number of times to retry network communication', - default='3', cmd='-R', cf=('communication', 'retries'), - odesc="<retry count>") -CLIENT_DRYRUN = Option('Do not actually change the system', - default=False, cmd='-n', ) -CLIENT_EXTRA_DISPLAY = Option('enable extra entry output', - default=False, cmd='-e', ) -CLIENT_PARANOID = Option('Make automatic backups of config files', - default=False, - cmd='-P', - cook=get_bool, - cf=('client', 'paranoid')) -CLIENT_DRIVERS = Option('Specify tool driver set', cmd='-D', - cf=('client', 'drivers'), - odesc="<driver1,driver2>", cook=list_split, - default=Bcfg2.Client.Tools.default) -CLIENT_CACHE = Option('Store the configuration in a file', - default=False, cmd='-c', odesc="<cache path>") -CLIENT_REMOVE = Option('Force removal of additional configuration items', - default=False, cmd='-r', odesc="<entry type|all>") -CLIENT_BUNDLE = Option('Only configure the given bundle(s)', default=[], - cmd='-b', odesc='<bundle:bundle>', cook=colon_split) -CLIENT_BUNDLEQUICK = Option('only verify/configure the given bundle(s)', default=False, - cmd='-Q') -CLIENT_INDEP = Option('Only configure independent entries, ignore bundles', default=False, - cmd='-z') -CLIENT_KEVLAR = Option('Run in kevlar (bulletproof) mode', default=False, - cmd='-k', ) -CLIENT_DLIST = Option('Run client in server decision list mode', default='none', - cf=('client', 'decision'), - cmd='-l', odesc='<whitelist|blacklist|none>') -CLIENT_FILE = Option('Configure from a file rather than querying the server', - default=False, cmd='-f', odesc='<specification path>') -CLIENT_QUICK = Option('Disable some checksum verification', default=False, - cmd='-q', ) -CLIENT_USER = Option('The user to provide for authentication', default='root', - cmd='-u', cf=('communication', 'user'), odesc='<user>') -CLIENT_SERVICE_MODE = Option('Set client service mode', default='default', - cmd='-s', odesc='<default|disabled|build>') -CLIENT_TIMEOUT = Option('Set the client XML-RPC timeout', default=90, - cmd='-t', cf=('communication', 'timeout'), - odesc='<timeout>') - -# bcfg2-test options -TEST_NOSEOPTS = Option('Options to pass to nosetests', default=[], - cmd='--nose-options', cf=('bcfg2_test', 'nose_options'), - odesc='<opts>', long_arg=True, cook=shlex.split) -TEST_IGNORE = Option('Ignore these entries if they fail to build.', default=[], - cmd='--ignore', - cf=('bcfg2_test', 'ignore_entries'), long_arg=True, - odesc='<Type>:<name>,<Type>:<name>', cook=list_split) - -# APT client tool options -CLIENT_APT_TOOLS_INSTALL_PATH = Option('Apt tools install path', - cf=('APT', 'install_path'), - default='/usr') -CLIENT_APT_TOOLS_VAR_PATH = Option('Apt tools var path', - cf=('APT', 'var_path'), default='/var') -CLIENT_SYSTEM_ETC_PATH = Option('System etc path', cf=('APT', 'etc_path'), - default='/etc') +CLIENT_KEY = \ + Option('Path to SSL key', + default=None, + cmd='--ssl-key', + odesc='<ssl key>', + cf=('communication', 'key'), + long_arg=True) +CLIENT_CERT = \ + Option('Path to SSL certificate', + default=None, + cmd='--ssl-cert', + odesc='<ssl cert>', + cf=('communication', 'certificate'), + long_arg=True) +CLIENT_CA = \ + Option('Path to SSL CA Cert', + default=None, + cmd='--ca-cert', + odesc='<ca cert>', + cf=('communication', 'ca'), + long_arg=True) +CLIENT_SCNS = \ + Option('List of server commonNames', + default=None, + cmd='--ssl-cns', + odesc='<CN1:CN2>', + cf=('communication', 'serverCommonNames'), + cook=list_split, + long_arg=True) +CLIENT_PROFILE = \ + Option('Assert the given profile for the host', + default=None, + cmd='-p', + odesc='<profile>') +CLIENT_RETRIES = \ + Option('The number of times to retry network communication', + default='3', + cmd='-R', + odesc='<retry count>', + cf=('communication', 'retries')) +CLIENT_RETRY_DELAY = \ + Option('The time in seconds to wait between retries', + default='1', + cmd='-y', + odesc='<retry delay>', + cf=('communication', 'retry_delay')) +CLIENT_DRYRUN = \ + Option('Do not actually change the system', + default=False, + cmd='-n') +CLIENT_EXTRA_DISPLAY = \ + Option('enable extra entry output', + default=False, + cmd='-e') +CLIENT_PARANOID = \ + Option('Make automatic backups of config files', + default=False, + cmd='-P', + cf=('client', 'paranoid'), + cook=get_bool) +CLIENT_DRIVERS = \ + Option('Specify tool driver set', + default=Bcfg2.Client.Tools.default, + cmd='-D', + odesc='<driver1,driver2>', + cf=('client', 'drivers'), + cook=list_split) +CLIENT_CACHE = \ + Option('Store the configuration in a file', + default=None, + cmd='-c', + odesc='<cache path>') +CLIENT_REMOVE = \ + Option('Force removal of additional configuration items', + default=None, + cmd='-r', + odesc='<entry type|all>') +CLIENT_BUNDLE = \ + Option('Only configure the given bundle(s)', + default=[], + cmd='-b', + odesc='<bundle:bundle>', + cook=colon_split) +CLIENT_SKIPBUNDLE = \ + Option('Configure everything except the given bundle(s)', + default=[], + cmd='-B', + odesc='<bundle:bundle>', + cook=colon_split) +CLIENT_BUNDLEQUICK = \ + Option('Only verify/configure the given bundle(s)', + default=False, + cmd='-Q') +CLIENT_INDEP = \ + Option('Only configure independent entries, ignore bundles', + default=False, + cmd='-z') +CLIENT_SKIPINDEP = \ + Option('Do not configure independent entries', + default=False, + cmd='-Z') +CLIENT_KEVLAR = \ + Option('Run in kevlar (bulletproof) mode', + default=False, + cmd='-k', ) +CLIENT_FILE = \ + Option('Configure from a file rather than querying the server', + default=None, + cmd='-f', + odesc='<specification path>') +CLIENT_QUICK = \ + Option('Disable some checksum verification', + default=False, + cmd='-q') +CLIENT_USER = \ + Option('The user to provide for authentication', + default='root', + cmd='-u', + odesc='<user>', + cf=('communication', 'user')) +CLIENT_SERVICE_MODE = \ + Option('Set client service mode', + default='default', + cmd='-s', + odesc='<default|disabled|build>') +CLIENT_TIMEOUT = \ + Option('Set the client XML-RPC timeout', + default=90, + cmd='-t', + odesc='<timeout>', + cf=('communication', 'timeout')) +CLIENT_DLIST = \ + Option('Run client in server decision list mode', + default='none', + cmd='-l', + odesc='<whitelist|blacklist|none>', + cf=('client', 'decision')) +CLIENT_DECISION_LIST = \ + Option('Decision List', + default=False, + cmd='--decision-list', + odesc='<file>', + long_arg=True) + +# bcfg2-test and bcfg2-lint options +TEST_NOSEOPTS = \ + Option('Options to pass to nosetests', + default=[], + cmd='--nose-options', + odesc='<opts>', + cf=('bcfg2_test', 'nose_options'), + cook=shlex.split, + long_arg=True) +TEST_IGNORE = \ + Option('Ignore these entries if they fail to build.', + default=[], + cmd='--ignore', + odesc='<Type>:<name>,<Type>:<name>', + cf=('bcfg2_test', 'ignore_entries'), + cook=list_split, + long_arg=True) +LINT_CONFIG = \ + Option('Specify bcfg2-lint configuration file', + default='/etc/bcfg2-lint.conf', + cmd='--lint-config', + odesc='<conffile>', + long_arg=True) +LINT_SHOW_ERRORS = \ + Option('Show error handling', + default=False, + cmd='--list-errors', + long_arg=True) +LINT_FILES_ON_STDIN = \ + Option('Operate on a list of files supplied on stdin', + default=False, + cmd='--stdin', + long_arg=True) + +# individual client tool options +CLIENT_APT_TOOLS_INSTALL_PATH = \ + Option('Apt tools install path', + default='/usr', + cf=('APT', 'install_path')) +CLIENT_APT_TOOLS_VAR_PATH = \ + Option('Apt tools var path', + default='/var', + cf=('APT', 'var_path')) +CLIENT_SYSTEM_ETC_PATH = \ + Option('System etc path', + default='/etc', + cf=('APT', 'etc_path')) +CLIENT_PORTAGE_BINPKGONLY = \ + Option('Portage binary packages only', + default=False, + cf=('Portage', 'binpkgonly'), + cook=get_bool) +CLIENT_RPMNG_INSTALLONLY = \ + Option('RPMng install-only packages', + default=['kernel', 'kernel-bigmem', 'kernel-enterprise', + 'kernel-smp', 'kernel-modules', 'kernel-debug', + 'kernel-unsupported', 'kernel-devel', 'kernel-source', + 'kernel-default', 'kernel-largesmp-devel', + 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], + cf=('RPMng', 'installonlypackages'), + cook=list_split) +CLIENT_RPMNG_PKG_CHECKS = \ + Option("Perform RPMng package checks", + default=True, + cf=('RPMng', 'pkg_checks'), + cook=get_bool) +CLIENT_RPMNG_PKG_VERIFY = \ + Option("Perform RPMng package verify", + default=True, + cf=('RPMng', 'pkg_verify'), + cook=get_bool) +CLIENT_RPMNG_INSTALLED_ACTION = \ + Option("RPMng installed action", + default="install", + cf=('RPMng', 'installed_action')) +CLIENT_RPMNG_ERASE_FLAGS = \ + Option("RPMng erase flags", + default=["allmatches"], + cf=('RPMng', 'erase_flags'), + cook=list_split) +CLIENT_RPMNG_VERSION_FAIL_ACTION = \ + Option("RPMng version fail action", + default="upgrade", + cf=('RPMng', 'version_fail_action')) +CLIENT_RPMNG_VERIFY_FAIL_ACTION = \ + Option("RPMng verify fail action", + default="reinstall", + cf=('RPMng', 'verify_fail_action')) +CLIENT_RPMNG_VERIFY_FLAGS = \ + Option("RPMng verify flags", + default=[], + cf=('RPMng', 'verify_flags'), + cook=list_split) +CLIENT_YUM24_INSTALLONLY = \ + Option('RPMng install-only packages', + default=['kernel', 'kernel-bigmem', 'kernel-enterprise', + 'kernel-smp', 'kernel-modules', 'kernel-debug', + 'kernel-unsupported', 'kernel-devel', 'kernel-source', + 'kernel-default', 'kernel-largesmp-devel', + 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], + cf=('RPMng', 'installonlypackages'), + cook=list_split) +CLIENT_YUM24_PKG_CHECKS = \ + Option("Perform YUM24 package checks", + default=True, + cf=('YUM24', 'pkg_checks'), + cook=get_bool) +CLIENT_YUM24_PKG_VERIFY = \ + Option("Perform YUM24 package verify", + default=True, + cf=('YUM24', 'pkg_verify'), + cook=get_bool) +CLIENT_YUM24_INSTALLED_ACTION = \ + Option("YUM24 installed action", + default="install", + cf=('YUM24', 'installed_action')) +CLIENT_YUM24_ERASE_FLAGS = \ + Option("YUM24 erase flags", + default=["allmatches"], + cf=('YUM24', 'erase_flags'), + cook=list_split) +CLIENT_YUM24_VERSION_FAIL_ACTION = \ + Option("YUM24 version fail action", + cf=('YUM24', 'version_fail_action'), + default="upgrade") +CLIENT_YUM24_VERIFY_FAIL_ACTION = \ + Option("YUM24 verify fail action", + default="reinstall", + cf=('YUM24', 'verify_fail_action')) +CLIENT_YUM24_VERIFY_FLAGS = \ + Option("YUM24 verify flags", + default=[], + cf=('YUM24', 'verify_flags'), + cook=list_split) +CLIENT_YUM24_AUTODEP = \ + Option("YUM24 autodependency processing", + default=True, + cf=('YUM24', 'autodep'), + cook=get_bool) +CLIENT_YUMNG_PKG_CHECKS = \ + Option("Perform YUMng package checks", + default=True, + cf=('YUMng', 'pkg_checks'), + cook=get_bool) +CLIENT_YUMNG_PKG_VERIFY = \ + Option("Perform YUMng package verify", + default=True, + cf=('YUMng', 'pkg_verify'), + cook=get_bool) +CLIENT_YUMNG_INSTALLED_ACTION = \ + Option("YUMng installed action", + default="install", + cf=('YUMng', 'installed_action')) +CLIENT_YUMNG_VERSION_FAIL_ACTION = \ + Option("YUMng version fail action", + default="upgrade", + cf=('YUMng', 'version_fail_action')) +CLIENT_YUMNG_VERIFY_FAIL_ACTION = \ + Option("YUMng verify fail action", + default="reinstall", + cf=('YUMng', 'verify_fail_action')) +CLIENT_YUMNG_VERIFY_FLAGS = \ + Option("YUMng verify flags", + default=[], + cf=('YUMng', 'verify_flags'), + cook=list_split) # Logging options -LOGGING_FILE_PATH = Option('Set path of file log', default=None, - cmd='-o', odesc='<path>', cf=('logging', 'path')) +LOGGING_FILE_PATH = \ + Option('Set path of file log', + default=None, + cmd='-o', + odesc='<path>', + cf=('logging', 'path')) +LOGGING_SYSLOG = \ + Option('Log to syslog', + default=True, + cook=get_bool, + cf=('logging', 'syslog')) +DEBUG = \ + Option("Enable debugging output", + default=False, + cmd='-d', + cook=get_bool, + cf=('logging', 'debug')) +VERBOSE = \ + Option("Enable verbose output", + default=False, + cmd='-v', + cook=get_bool, + cf=('logging', 'verbose')) # Plugin-specific options -CFG_VALIDATION = Option('Run validation on Cfg files', default=True, - cf=('cfg', 'validation'), cmd='--cfg-validation', - long_arg=True, cook=get_bool) +CFG_VALIDATION = \ + Option('Run validation on Cfg files', + default=True, + cmd='--cfg-validation', + cf=('cfg', 'validation'), + long_arg=True, + cook=get_bool) + +# bcfg2-crypt options +ENCRYPT = \ + Option('Encrypt the specified file', + default=False, + cmd='--encrypt', + long_arg=True) +DECRYPT = \ + Option('Decrypt the specified file', + default=False, + cmd='--decrypt', + long_arg=True) +CRYPT_PASSPHRASE = \ + Option('Encryption passphrase (name or passphrase)', + default=None, + cmd='-p', + odesc='<passphrase>') +CRYPT_XPATH = \ + Option('XPath expression to select elements to encrypt', + default=None, + cmd='--xpath', + odesc='<xpath>', + long_arg=True) +CRYPT_PROPERTIES = \ + Option('Encrypt the specified file as a Properties file', + default=False, + cmd="--properties", + long_arg=True) +CRYPT_CFG = \ + Option('Encrypt the specified file as a Cfg file', + default=False, + cmd="--cfg", + long_arg=True) +CRYPT_REMOVE = \ + Option('Remove the plaintext file after encrypting', + default=False, + cmd="--remove", + long_arg=True) + +# Option groups +CLI_COMMON_OPTIONS = dict(configfile=CFILE, + debug=DEBUG, + help=HELP, + version=VERSION, + verbose=VERBOSE, + encoding=ENCODING, + logging=LOGGING_FILE_PATH, + syslog=LOGGING_SYSLOG) + +DAEMON_COMMON_OPTIONS = dict(daemon=DAEMON, + listen_all=SERVER_LISTEN_ALL) + +SERVER_COMMON_OPTIONS = dict(repo=SERVER_REPOSITORY, + plugins=SERVER_PLUGINS, + password=SERVER_PASSWORD, + filemonitor=SERVER_FILEMONITOR, + ignore=SERVER_FAM_IGNORE, + location=SERVER_LOCATION, + static=SERVER_STATIC, + key=SERVER_KEY, + cert=SERVER_CERT, + ca=SERVER_CA, + protocol=SERVER_PROTOCOL, + web_configfile=WEB_CFILE, + backend=SERVER_BACKEND) + +CRYPT_OPTIONS = dict(encrypt=ENCRYPT, + decrypt=DECRYPT, + passphrase=CRYPT_PASSPHRASE, + xpath=CRYPT_XPATH, + properties=CRYPT_PROPERTIES, + cfg=CRYPT_CFG, + remove=CRYPT_REMOVE) + +DRIVER_OPTIONS = \ + dict(apt_install_path=CLIENT_APT_TOOLS_INSTALL_PATH, + apt_var_path=CLIENT_APT_TOOLS_VAR_PATH, + apt_etc_path=CLIENT_SYSTEM_ETC_PATH, + portage_binpkgonly=CLIENT_PORTAGE_BINPKGONLY, + rpmng_installonly=CLIENT_RPMNG_INSTALLONLY, + rpmng_pkg_checks=CLIENT_RPMNG_PKG_CHECKS, + rpmng_pkg_verify=CLIENT_RPMNG_PKG_VERIFY, + rpmng_installed_action=CLIENT_RPMNG_INSTALLED_ACTION, + rpmng_erase_flags=CLIENT_RPMNG_ERASE_FLAGS, + rpmng_version_fail_action=CLIENT_RPMNG_VERSION_FAIL_ACTION, + rpmng_verify_fail_action=CLIENT_RPMNG_VERIFY_FAIL_ACTION, + rpmng_verify_flags=CLIENT_RPMNG_VERIFY_FLAGS, + yum24_installonly=CLIENT_YUM24_INSTALLONLY, + yum24_pkg_checks=CLIENT_YUM24_PKG_CHECKS, + yum24_pkg_verify=CLIENT_YUM24_PKG_VERIFY, + yum24_installed_action=CLIENT_YUM24_INSTALLED_ACTION, + yum24_erase_flags=CLIENT_YUM24_ERASE_FLAGS, + yum24_version_fail_action=CLIENT_YUM24_VERSION_FAIL_ACTION, + yum24_verify_fail_action=CLIENT_YUM24_VERIFY_FAIL_ACTION, + yum24_verify_flags=CLIENT_YUM24_VERIFY_FLAGS, + yum24_autodep=CLIENT_YUM24_AUTODEP, + yumng_pkg_checks=CLIENT_YUMNG_PKG_CHECKS, + yumng_pkg_verify=CLIENT_YUMNG_PKG_VERIFY, + yumng_installed_action=CLIENT_YUMNG_INSTALLED_ACTION, + yumng_version_fail_action=CLIENT_YUMNG_VERSION_FAIL_ACTION, + yumng_verify_fail_action=CLIENT_YUMNG_VERIFY_FAIL_ACTION, + yumng_verify_flags=CLIENT_YUMNG_VERIFY_FLAGS) + +CLIENT_COMMON_OPTIONS = \ + dict(extra=CLIENT_EXTRA_DISPLAY, + quick=CLIENT_QUICK, + lockfile=LOCKFILE, + drivers=CLIENT_DRIVERS, + dryrun=CLIENT_DRYRUN, + paranoid=CLIENT_PARANOID, + ppath=PARANOID_PATH, + max_copies=PARANOID_MAX_COPIES, + bundle=CLIENT_BUNDLE, + skipbundle=CLIENT_SKIPBUNDLE, + bundle_quick=CLIENT_BUNDLEQUICK, + indep=CLIENT_INDEP, + skipindep=CLIENT_SKIPINDEP, + file=CLIENT_FILE, + interactive=INTERACTIVE, + cache=CLIENT_CACHE, + profile=CLIENT_PROFILE, + remove=CLIENT_REMOVE, + server=SERVER_LOCATION, + user=CLIENT_USER, + password=SERVER_PASSWORD, + retries=CLIENT_RETRIES, + retry_delay=CLIENT_RETRY_DELAY, + kevlar=CLIENT_KEVLAR, + omit_lock_check=OMIT_LOCK_CHECK, + decision=CLIENT_DLIST, + servicemode=CLIENT_SERVICE_MODE, + key=CLIENT_KEY, + certificate=CLIENT_CERT, + ca=CLIENT_CA, + serverCN=CLIENT_SCNS, + timeout=CLIENT_TIMEOUT, + decision_list=CLIENT_DECISION_LIST) +CLIENT_COMMON_OPTIONS.update(DRIVER_OPTIONS) +CLIENT_COMMON_OPTIONS.update(CLI_COMMON_OPTIONS) + +DATABASE_COMMON_OPTIONS = dict(web_configfile=WEB_CFILE, + db_engine=DB_ENGINE, + db_name=DB_NAME, + db_user=DB_USER, + db_password=DB_PASSWORD, + db_host=DB_HOST, + db_port=DB_PORT, + time_zone=DJANGO_TIME_ZONE, + django_debug=DJANGO_DEBUG, + web_prefix=DJANGO_WEB_PREFIX) + class OptionParser(OptionSet): """ OptionParser bootstraps option parsing, getting the value of the config file """ - def __init__(self, args): + def __init__(self, args, argv=None, quiet=False): + if argv is None: + argv = sys.argv[1:] + # the bootstrap is always quiet, since it's running with a + # default config file and so might produce warnings otherwise self.Bootstrap = OptionSet([('configfile', CFILE)], quiet=True) - self.Bootstrap.parse(sys.argv[1:], do_getopt=False) - OptionSet.__init__(self, args, configfile=self.Bootstrap['configfile']) - self.optinfo = args + self.Bootstrap.parse(argv, do_getopt=False) + OptionSet.__init__(self, args, configfile=self.Bootstrap['configfile'], + quiet=quiet) + self.optinfo = copy.copy(args) def HandleEvent(self, event): - if not self['configfile'].endswith(event.filename): + if 'configfile' not in self or not isinstance(self['configfile'], str): + # we haven't parsed options yet, or CFILE wasn't included + # in the options + return + if event.filename != self['configfile']: print("Got event for unknown file: %s" % event.filename) return if event.code2str() == 'deleted': @@ -447,3 +1055,10 @@ class OptionParser(OptionSet): self.do_getopt = do_getopt OptionSet.parse(self, self.argv, do_getopt=self.do_getopt) + def add_option(self, name, opt): + self[name] = opt + self.optinfo[name] = opt + + def update(self, optdict): + dict.update(self, optdict) + self.optinfo.update(optdict) diff --git a/src/lib/Bcfg2/Proxy.py b/src/lib/Bcfg2/Proxy.py index 422d642db..220b89b5f 100644 --- a/src/lib/Bcfg2/Proxy.py +++ b/src/lib/Bcfg2/Proxy.py @@ -1,13 +1,3 @@ -"""RPC client access to cobalt components. - -Classes: -ComponentProxy -- an RPC client proxy to Cobalt components - -Functions: -load_config -- read configuration files - -""" - import logging import re import socket @@ -34,7 +24,6 @@ import time from Bcfg2.Bcfg2Py3k import httplib, xmlrpclib, urlparse version = sys.version_info[:2] -has_py23 = version >= (2, 3) has_py26 = version >= (2, 6) __all__ = ["ComponentProxy", @@ -70,50 +59,58 @@ class CertificateError(Exception): return ("Got unallowed commonName %s from server" % self.commonName) +_orig_Method = xmlrpclib._Method class RetryMethod(xmlrpclib._Method): """Method with error handling and retries built in.""" log = logging.getLogger('xmlrpc') - max_retries = 4 + max_retries = 3 + retry_delay = 1 def __call__(self, *args): for retry in range(self.max_retries): + if retry >= self.max_retries - 1: + final = True + else: + final = False + msg = None try: - return xmlrpclib._Method.__call__(self, *args) + return _orig_Method.__call__(self, *args) except xmlrpclib.ProtocolError: err = sys.exc_info()[1] - self.log.error("Server failure: Protocol Error: %s %s" % \ - (err.errcode, err.errmsg)) - raise xmlrpclib.Fault(20, "Server Failure") + msg = "Server failure: Protocol Error: %s %s" % \ + (err.errcode, err.errmsg) except xmlrpclib.Fault: - raise + msg = sys.exc_info()[1] except socket.error: err = sys.exc_info()[1] if hasattr(err, 'errno') and err.errno == 336265218: - self.log.error("SSL Key error") - break - if hasattr(err, 'errno') and err.errno == 185090050: - self.log.error("SSL CA error") - break - if retry == 3: - self.log.error("Server failure: %s" % err) - raise xmlrpclib.Fault(20, err) + msg = "SSL Key error: %s" % err + elif hasattr(err, 'errno') and err.errno == 185090050: + msg = "SSL CA error: %s" % err + elif final: + msg = "Server failure: %s" % err except CertificateError: - ce = sys.exc_info()[1] - self.log.error("Got unallowed commonName %s from server" \ - % ce.commonName) - break + err = sys.exc_info()[1] + msg = "Got unallowed commonName %s from server" % err.commonName except KeyError: - self.log.error("Server disallowed connection") - break + err = sys.exc_info()[1] + msg = "Server disallowed connection: %s" % err + except ProxyError: + err = sys.exc_info()[1] + msg = err except: - self.log.error("Unknown failure", exc_info=1) - break - time.sleep(0.5) - raise xmlrpclib.Fault(20, "Server Failure") + err = sys.exc_info()[1] + msg = "Unknown failure: %s" % err + if msg: + if final: + self.log.error(msg) + raise ProxyError(msg) + else: + self.log.info(msg) + time.sleep(self.retry_delay) -# sorry jon -_Method = RetryMethod +xmlrpclib._Method = RetryMethod class SSLHTTPConnection(httplib.HTTPConnection): @@ -192,7 +189,15 @@ class SSLHTTPConnection(httplib.HTTPConnection): def _connect_py26ssl(self): """Initiates a connection using the ssl module.""" - rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # check for IPv6 + hostip = socket.getaddrinfo(self.host, + self.port, + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0][4][0] + if ':' in hostip: + rawsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + else: + rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.protocol == 'xmlrpc/ssl': ssl_protocol_ver = ssl.PROTOCOL_SSLv23 elif self.protocol == 'xmlrpc/tlsv1': @@ -212,8 +217,7 @@ class SSLHTTPConnection(httplib.HTTPConnection): self.logger.warning("SSL key specfied, but no cert. Cannot authenticate this client with SSL.") self.key = None - if has_py23: - rawsock.settimeout(self.timeout) + rawsock.settimeout(self.timeout) self.sock = ssl.SSLSocket(rawsock, cert_reqs=other_side_required, ca_certs=self.ca, suppress_ragged_eofs=True, keyfile=self.key, certfile=self.cert, @@ -286,26 +290,21 @@ class XMLRPCTransport(xmlrpclib.Transport): def make_connection(self, host): host, self._extra_headers = self.get_host_info(host)[0:2] - http = SSLHTTPConnection(host, + return SSLHTTPConnection(host, key=self.key, cert=self.cert, ca=self.ca, scns=self.scns, timeout=self.timeout) - https = httplib.HTTP() - https._setup(http) - return https def request(self, host, handler, request_body, verbose=0): """Send request to server and return response.""" - h = self.make_connection(host) - try: - self.send_request(h, handler, request_body) - self.send_host(h, host) - self.send_user_agent(h) - self.send_content(h, request_body) - errcode, errmsg, headers = h.getreply() + conn = self.send_request(host, handler, request_body, False) + response = conn.getresponse() + errcode = response.status + errmsg = response.reason + headers = response.msg except (socket.error, SSL_ERROR): err = sys.exc_info()[1] raise ProxyError(xmlrpclib.ProtocolError(host + handler, @@ -320,8 +319,17 @@ class XMLRPCTransport(xmlrpclib.Transport): headers)) self.verbose = verbose - msglen = int(headers.dict['content-length']) - return self._get_response(h.getfile(), msglen) + return self.parse_response(response) + + if sys.hexversion < 0x03000000: + def send_request(self, host, handler, request_body, debug): + """ send_request() changed significantly in py3k.""" + conn = self.make_connection(host) + xmlrpclib.Transport.send_request(self, conn, handler, request_body) + self.send_host(conn, host) + self.send_user_agent(conn) + self.send_content(conn, request_body) + return conn def _get_response(self, fd, length): # read response from input file/socket, and parse it @@ -345,9 +353,8 @@ class XMLRPCTransport(xmlrpclib.Transport): return u.close() -def ComponentProxy(url, user=None, password=None, - key=None, cert=None, ca=None, - allowedServerCNs=None, timeout=90): +def ComponentProxy(url, user=None, password=None, key=None, cert=None, ca=None, + allowedServerCNs=None, timeout=90, retries=3, delay=1): """Constructs proxies to components. @@ -357,6 +364,8 @@ def ComponentProxy(url, user=None, password=None, Additional arguments are passed to the ServerProxy constructor. """ + xmlrpclib._Method.max_retries = retries + xmlrpclib._Method.retry_delay = delay if user and password: method, path = urlparse(url)[:2] diff --git a/src/lib/Bcfg2/SSLServer.py b/src/lib/Bcfg2/SSLServer.py index 6aa46ea58..aef44e419 100644 --- a/src/lib/Bcfg2/SSLServer.py +++ b/src/lib/Bcfg2/SSLServer.py @@ -45,7 +45,7 @@ class XMLRPCDispatcher (SimpleXMLRPCServer.SimpleXMLRPCDispatcher): params = (address, ) + params response = self.instance._dispatch(method, params, self.funcs) # py3k compatibility - if type(response) not in [bool, str, list, dict] or response is None: + if type(response) not in [bool, str, list, dict]: response = (response.decode('utf-8'), ) else: response = (response, ) @@ -98,14 +98,23 @@ class SSLServer (SocketServer.TCPServer, object): timeout -- timeout for non-blocking request handling """ - + # check whether or not we should listen on all interfaces if listen_all: listen_address = ('', server_address[1]) else: listen_address = (server_address[0], server_address[1]) + + # check for IPv6 address + if ':' in server_address[0]: + self.address_family = socket.AF_INET6 + try: SocketServer.TCPServer.__init__(self, listen_address, RequestHandlerClass) + except socket.gaierror: + e = sys.exc_info()[1] + self.logger.error("Failed to bind to socket: %s" % e) + raise except socket.error: self.logger.error("Failed to bind to socket") raise diff --git a/src/lib/Bcfg2/Server/Admin/Bundle.py b/src/lib/Bcfg2/Server/Admin/Bundle.py index 89c099602..e5e4eadf3 100644 --- a/src/lib/Bcfg2/Server/Admin/Bundle.py +++ b/src/lib/Bcfg2/Server/Admin/Bundle.py @@ -8,12 +8,11 @@ from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError class Bundle(Bcfg2.Server.Admin.MetadataCore): - __shorthelp__ = "Create or delete bundle entries" - # TODO: add/del functions + __shorthelp__ = "List and view bundle entries" __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin bundle list-xml" "\nbcfg2-admin bundle list-genshi" "\nbcfg2-admin bundle show\n") - __usage__ = ("bcfg2-admin bundle [options] [add|del] [group]") + __usage__ = ("bcfg2-admin bundle [options] [list-xml|list-genshi|show]") def __call__(self, args): Bcfg2.Server.Admin.MetadataCore.__call__(self, args) @@ -28,18 +27,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore): if len(args) == 0: self.errExit("No argument specified.\n" "Please see bcfg2-admin bundle help for usage.") -# if args[0] == 'add': -# try: -# self.metadata.add_bundle(args[1]) -# except MetadataConsistencyError: -# print("Error in adding bundle.") -# raise SystemExit(1) -# elif args[0] in ['delete', 'remove', 'del', 'rm']: -# try: -# self.metadata.remove_bundle(args[1]) -# except MetadataConsistencyError: -# print("Error in deleting bundle.") -# raise SystemExit(1) # Lists all available xml bundles elif args[0] in ['list-xml', 'ls-xml']: bundle_name = [] @@ -63,7 +50,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore): bundle_name = [] bundle_list = xml_list + genshi_list for bundle_path in bundle_list: - print "matching %s" % bundle_path bundle_name.append(rg.search(bundle_path).group(1)) text = "Available bundles (Number of bundles: %s)" % \ (len(bundle_list)) @@ -85,8 +71,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore): print('Details for the "%s" bundle:' % \ (bundle_name[int(lineno)].split('.')[0])) tree = lxml.etree.parse(bundle_list[int(lineno)]) - #Prints bundle content - #print(lxml.etree.tostring(tree)) names = ['Action', 'Package', 'Path', 'Service'] for name in names: for node in tree.findall("//" + name): diff --git a/src/lib/Bcfg2/Server/Admin/Client.py b/src/lib/Bcfg2/Server/Admin/Client.py index 4d580c54c..34dfd7550 100644 --- a/src/lib/Bcfg2/Server/Admin/Client.py +++ b/src/lib/Bcfg2/Server/Admin/Client.py @@ -4,50 +4,23 @@ from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError class Client(Bcfg2.Server.Admin.MetadataCore): - __shorthelp__ = "Create, delete, or modify client entries" + __shorthelp__ = "Create, delete, or list client entries" __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin client add <client> " - "attr1=val1 attr2=val2" - "\nbcfg2-admin client update <client> " - "attr1=val1 attr2=val2" "\nbcfg2-admin client list" "\nbcfg2-admin client del <client>\n") - __usage__ = ("bcfg2-admin client [options] [add|del|update|list] [attr=val]") + __usage__ = ("bcfg2-admin client [options] [add|del|list] [attr=val]") def __call__(self, args): Bcfg2.Server.Admin.MetadataCore.__call__(self, args) if len(args) == 0: self.errExit("No argument specified.\n" - "Please see bcfg2-admin client help for usage.") + "Usage: %s" % self.usage) if args[0] == 'add': - attr_d = {} - for i in args[2:]: - attr, val = i.split('=', 1) - if attr not in ['profile', 'uuid', 'password', - 'location', 'secure', 'address', - 'auth']: - print("Attribute %s unknown" % attr) - raise SystemExit(1) - attr_d[attr] = val try: - self.metadata.add_client(args[1], attr_d) + self.metadata.add_client(args[1]) except MetadataConsistencyError: print("Error in adding client") raise SystemExit(1) - elif args[0] in ['update', 'up']: - attr_d = {} - for i in args[2:]: - attr, val = i.split('=', 1) - if attr not in ['profile', 'uuid', 'password', - 'location', 'secure', 'address', - 'auth']: - print("Attribute %s unknown" % attr) - raise SystemExit(1) - attr_d[attr] = val - try: - self.metadata.update_client(args[1], attr_d) - except MetadataConsistencyError: - print("Error in updating client") - raise SystemExit(1) elif args[0] in ['delete', 'remove', 'del', 'rm']: try: self.metadata.remove_client(args[1]) @@ -55,10 +28,9 @@ class Client(Bcfg2.Server.Admin.MetadataCore): print("Error in deleting client") raise SystemExit(1) elif args[0] in ['list', 'ls']: - tree = lxml.etree.parse(self.metadata.data + "/clients.xml") - tree.xinclude() - for node in tree.findall("//Client"): - print(node.attrib["name"]) + for client in self.metadata.list_clients(): + print(client.hostname) else: print("No command specified") raise SystemExit(1) + diff --git a/src/lib/Bcfg2/Server/Admin/Compare.py b/src/lib/Bcfg2/Server/Admin/Compare.py index 050dd69f8..78b30120a 100644 --- a/src/lib/Bcfg2/Server/Admin/Compare.py +++ b/src/lib/Bcfg2/Server/Admin/Compare.py @@ -18,7 +18,8 @@ class Compare(Bcfg2.Server.Admin.Mode): 'important', 'paranoid', 'sensitive', 'dev_type', 'major', 'minor', 'prune', 'encoding', 'empty', 'to', 'recursive', - 'vcstype', 'sourceurl', 'revision'], + 'vcstype', 'sourceurl', 'revision', + 'secontext'], 'Package': ['name', 'type', 'version', 'simplefile', 'verify'], 'Service': ['name', 'type', 'status', 'mode', diff --git a/src/lib/Bcfg2/Server/Admin/Group.py b/src/lib/Bcfg2/Server/Admin/Group.py deleted file mode 100644 index 16a773d6f..000000000 --- a/src/lib/Bcfg2/Server/Admin/Group.py +++ /dev/null @@ -1,63 +0,0 @@ -import lxml.etree -import Bcfg2.Server.Admin -from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError - - -class Group(Bcfg2.Server.Admin.MetadataCore): - __shorthelp__ = "Create, delete, or modify group entries" - __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin group add <group> " - "attr1=val1 attr2=val2" - "\nbcfg2-admin group update <group> " - "attr1=val1 attr2=val2" - "\nbcfg2-admin group list" - "\nbcfg2-admin group del <group>\n") - __usage__ = ("bcfg2-admin group [options] [add|del|update|list] [attr=val]") - - def __call__(self, args): - Bcfg2.Server.Admin.MetadataCore.__call__(self, args) - if len(args) == 0: - self.errExit("No argument specified.\n" - "Please see bcfg2-admin group help for usage.") - if args[0] == 'add': - attr_d = {} - for i in args[2:]: - attr, val = i.split('=', 1) - if attr not in ['profile', 'public', 'default', - 'name', 'auth', 'toolset', 'category', - 'comment']: - print("Attribute %s unknown" % attr) - raise SystemExit(1) - attr_d[attr] = val - try: - self.metadata.add_group(args[1], attr_d) - except MetadataConsistencyError: - print("Error in adding group") - raise SystemExit(1) - elif args[0] in ['update', 'up']: - attr_d = {} - for i in args[2:]: - attr, val = i.split('=', 1) - if attr not in ['profile', 'public', 'default', - 'name', 'auth', 'toolset', 'category', - 'comment']: - print("Attribute %s unknown" % attr) - raise SystemExit(1) - attr_d[attr] = val - try: - self.metadata.update_group(args[1], attr_d) - except MetadataConsistencyError: - print("Error in updating group") - raise SystemExit(1) - elif args[0] in ['delete', 'remove', 'del', 'rm']: - try: - self.metadata.remove_group(args[1]) - except MetadataConsistencyError: - print("Error in deleting group") - raise SystemExit(1) - elif args[0] in ['list', 'ls']: - tree = lxml.etree.parse(self.metadata.data + "/groups.xml") - for node in tree.findall("//Group"): - print(node.attrib["name"]) - else: - print("No command specified") - raise SystemExit(1) diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py index c1f9ed484..fefd17d6a 100644 --- a/src/lib/Bcfg2/Server/Admin/Init.py +++ b/src/lib/Bcfg2/Server/Admin/Init.py @@ -6,9 +6,11 @@ import stat import string import sys import subprocess + import Bcfg2.Server.Admin import Bcfg2.Server.Plugin import Bcfg2.Options +from Bcfg2.Bcfg2Py3k import input # default config file config = ''' @@ -18,18 +20,22 @@ plugins = %s [statistics] sendmailpath = %s -database_engine = sqlite3 +#web_debug = False +#time_zone = + +[database] +#engine = sqlite3 # 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'. -database_name = +#name = # Or path to database file if using sqlite3. -#<repository>/etc/brpt.sqlite is default path if left empty -database_user = +#<repository>/bcfg2.sqlite is default path if left empty +#user = # Not used with sqlite3. -database_password = +#password = # Not used with sqlite3. -database_host = +#host = # Not used with sqlite3. -database_port = +#port = [communication] protocol = %s @@ -61,7 +67,7 @@ groups = '''<Groups version='3.0'> # Default contents of clients.xml clients = '''<Clients version="3.0"> - <Client profile="basic" pingable="Y" pingtime="0" name="%s"/> + <Client profile="basic" name="%s"/> </Clients> ''' @@ -98,7 +104,6 @@ plugin_list = ['Account', 'SSHbase', 'SSLCA', 'Statistics', - 'Svcmgr', 'TCheetah', 'TGenshi'] @@ -106,14 +111,6 @@ plugin_list = ['Account', default_plugins = Bcfg2.Options.SERVER_PLUGINS.default -def get_input(prompt): - """py3k compatible function to get input""" - try: - return raw_input(prompt) - except NameError: - return input(prompt) - - def gen_password(length): """Generates a random alphanumeric password with length characters.""" chars = string.letters + string.digits @@ -147,8 +144,8 @@ def create_key(hostname, keypath, certpath, country, state, location): def create_conf(confpath, confdata, keypath): # Don't overwrite existing bcfg2.conf file if os.path.exists(confpath): - result = get_input("\nWarning: %s already exists. " - "Overwrite? [y/N]: " % confpath) + result = input("\nWarning: %s already exists. " + "Overwrite? [y/N]: " % confpath) if result not in ['Y', 'y']: print("Leaving %s unchanged" % confpath) return @@ -206,8 +203,8 @@ class Init(Bcfg2.Server.Admin.Mode): def _prompt_hostname(self): """Ask for the server hostname.""" - data = get_input("What is the server's hostname [%s]: " % - socket.getfqdn()) + data = input("What is the server's hostname [%s]: " % + socket.getfqdn()) if data != '': self.shostname = data else: @@ -215,21 +212,21 @@ class Init(Bcfg2.Server.Admin.Mode): def _prompt_config(self): """Ask for the configuration file path.""" - newconfig = get_input("Store Bcfg2 configuration in [%s]: " % - self.configfile) + newconfig = input("Store Bcfg2 configuration in [%s]: " % + self.configfile) if newconfig != '': self.configfile = os.path.abspath(newconfig) def _prompt_repopath(self): """Ask for the repository path.""" while True: - newrepo = get_input("Location of Bcfg2 repository [%s]: " % - self.repopath) + newrepo = input("Location of Bcfg2 repository [%s]: " % + self.repopath) if newrepo != '': self.repopath = os.path.abspath(newrepo) if os.path.isdir(self.repopath): - response = get_input("Directory %s exists. Overwrite? [y/N]:" \ - % self.repopath) + response = input("Directory %s exists. Overwrite? [y/N]:" \ + % self.repopath) if response.lower().strip() == 'y': break else: @@ -245,8 +242,8 @@ class Init(Bcfg2.Server.Admin.Mode): def _prompt_server(self): """Ask for the server name.""" - newserver = get_input("Input the server location [%s]: " % - self.server_uri) + newserver = input("Input the server location [%s]: " % + self.server_uri) if newserver != '': self.server_uri = newserver @@ -258,19 +255,19 @@ class Init(Bcfg2.Server.Admin.Mode): prompt += ': ' while True: try: - osidx = int(get_input(prompt)) + osidx = int(input(prompt)) self.os_sel = os_list[osidx - 1][1] break except ValueError: continue def _prompt_plugins(self): - default = get_input("Use default plugins? (%s) [Y/n]: " % - ''.join(default_plugins)).lower() + default = input("Use default plugins? (%s) [Y/n]: " % + ''.join(default_plugins)).lower() if default != 'y' or default != '': while True: plugins_are_valid = True - plug_str = get_input("Specify plugins: ") + plug_str = input("Specify plugins: ") plugins = plug_str.split(',') for plugin in plugins: plugin = plugin.strip() @@ -284,26 +281,26 @@ class Init(Bcfg2.Server.Admin.Mode): """Ask for the key details (country, state, and location).""" print("The following questions affect SSL certificate generation.") print("If no data is provided, the default values are used.") - newcountry = get_input("Country name (2 letter code) for certificate: ") + newcountry = input("Country name (2 letter code) for certificate: ") if newcountry != '': if len(newcountry) == 2: self.country = newcountry else: while len(newcountry) != 2: - newcountry = get_input("2 letter country code (eg. US): ") + newcountry = input("2 letter country code (eg. US): ") if len(newcountry) == 2: self.country = newcountry break else: self.country = 'US' - newstate = get_input("State or Province Name (full name) for certificate: ") + newstate = input("State or Province Name (full name) for certificate: ") if newstate != '': self.state = newstate else: self.state = 'Illinois' - newlocation = get_input("Locality Name (eg, city) for certificate: ") + newlocation = input("Locality Name (eg, city) for certificate: ") if newlocation != '': self.location = newlocation else: @@ -313,10 +310,10 @@ class Init(Bcfg2.Server.Admin.Mode): """Initialize each plugin-specific portion of the repository.""" for plugin in self.plugins: if plugin == 'Metadata': - Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(self.repopath, - groups, - self.os_sel, - clients) + Bcfg2.Server.Plugins.Metadata.Metadata.init_repo( + self.repopath, + groups_xml=groups % self.os_sel, + clients_xml=clients % socket.getfqdn()) else: try: module = __import__("Bcfg2.Server.Plugins.%s" % plugin, '', diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py index daf353107..64327e018 100644 --- a/src/lib/Bcfg2/Server/Admin/Pull.py +++ b/src/lib/Bcfg2/Server/Admin/Pull.py @@ -2,6 +2,7 @@ import getopt import sys import Bcfg2.Server.Admin +from Bcfg2.Bcfg2Py3k import input class Pull(Bcfg2.Server.Admin.MetadataCore): @@ -26,7 +27,7 @@ class Pull(Bcfg2.Server.Admin.MetadataCore): "interactive", "-s", "stdin")) - allowed = ['Metadata', 'BB', "DBStats", "Statistics", "Cfg", "SSHbase"] + allowed = ['Metadata', "DBStats", "Statistics", "Cfg", "SSHbase"] def __init__(self, setup): Bcfg2.Server.Admin.MetadataCore.__init__(self, setup) @@ -92,7 +93,6 @@ class Pull(Bcfg2.Server.Admin.MetadataCore): for k, v in list(data.items()): if v: new_entry[k] = v - #print new_entry return new_entry def Choose(self, choices): @@ -109,11 +109,8 @@ class Pull(Bcfg2.Server.Admin.MetadataCore): (choice.group, choice.prio)) else: print(" => host entry: %s" % (choice.hostname)) - # py3k compatibility - try: - ans = raw_input("Use this entry? [yN]: ") in ['y', 'Y'] - except NameError: - ans = input("Use this entry? [yN]: ") in ['y', 'Y'] + + ans = input("Use this entry? [yN]: ") in ['y', 'Y'] if ans: return choice return False diff --git a/src/lib/Bcfg2/Server/Admin/Query.py b/src/lib/Bcfg2/Server/Admin/Query.py index 3dd326645..f81ec41d2 100644 --- a/src/lib/Bcfg2/Server/Admin/Query.py +++ b/src/lib/Bcfg2/Server/Admin/Query.py @@ -7,8 +7,8 @@ import Bcfg2.Server.Admin class Query(Bcfg2.Server.Admin.MetadataCore): __shorthelp__ = "Query clients" __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin query [-n] [-c] " - "[-f filename] g=group p=profile") - __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile>\n\n" + "[-f filename] g=group p=profile b=bundle") + __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile> <b=bundle>\n\n" " %-25s%s\n" " %-25s%s\n" " %-25s%s\n" % @@ -22,7 +22,8 @@ class Query(Bcfg2.Server.Admin.MetadataCore): def __init__(self, setup): Bcfg2.Server.Admin.MetadataCore.__init__(self, setup) logging.root.setLevel(100) - Bcfg2.Logger.setup_logging(100, to_console=False, to_syslog=False) + Bcfg2.Logger.setup_logging(100, to_console=False, + to_syslog=setup['syslog']) def __call__(self, args): Bcfg2.Server.Admin.MetadataCore.__call__(self, args) @@ -55,8 +56,10 @@ class Query(Bcfg2.Server.Admin.MetadataCore): for g in glist: if g in v.split(','): nc.append(c) + elif k == 'b': + nc = self.metadata.get_client_names_by_bundles(v.split(',')) else: - print("One of g= or p= must be specified") + print("One of g=, p= or b= must be specified") raise SystemExit(1) clients = [c for c in clients if c in nc] if '-n' in args: diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py index 974cdff9d..335d6a1e7 100644 --- a/src/lib/Bcfg2/Server/Admin/Reports.py +++ b/src/lib/Bcfg2/Server/Admin/Reports.py @@ -19,17 +19,15 @@ if sys.version_info >= (2, 5): else: from md5 import md5 -# Prereq issues can be signaled with ImportError, so no try needed -# FIXME - settings file uses a hardcoded path for /etc/bcfg2.conf -import Bcfg2.Server.Reports.settings +import Bcfg2.settings # Load django and reports stuff _after_ we know we can load settings import django.core.management from Bcfg2.Server.Reports.importscript import load_stats -from Bcfg2.Server.Reports.updatefix import update_database +from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError from Bcfg2.Server.Reports.utils import * -project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__) +project_directory = os.path.dirname(Bcfg2.settings.__file__) project_name = os.path.basename(project_directory) sys.path.append(os.path.join(project_directory, '..')) project_module = __import__(project_name, '', '', ['']) @@ -41,7 +39,7 @@ from django.db import connection, transaction from Bcfg2.Server.Reports.reports.models import Client, Interaction, Entries, \ Entries_interactions, Performance, \ - Reason, Ping + Reason def printStats(fn): @@ -55,7 +53,6 @@ def printStats(fn): start_i = Interaction.objects.count() start_ei = Entries_interactions.objects.count() start_perf = Performance.objects.count() - start_ping = Ping.objects.count() fn(self, *data) @@ -67,8 +64,6 @@ def printStats(fn): (start_ei - Entries_interactions.objects.count())) self.log.info("Metrics removed: %s" % (start_perf - Performance.objects.count())) - self.log.info("Ping metrics removed: %s" % - (start_ping - Ping.objects.count())) return print_stats @@ -77,16 +72,13 @@ class Reports(Bcfg2.Server.Admin.Mode): '''Admin interface for dynamic reports''' __shorthelp__ = "Manage dynamic reports" __longhelp__ = (__shorthelp__) - django_commands = ['syncdb', 'sqlall', 'validate'] + django_commands = ['dbshell', 'shell', 'syncdb', 'sqlall', 'validate'] __usage__ = ("bcfg2-admin reports [command] [options]\n" - " -v|--verbose Be verbose\n" - " -q|--quiet Print only errors\n" "\n" " Commands:\n" " init Initialize the database\n" " load_stats Load statistics data\n" " -s|--stats Path to statistics.xml file\n" - " -c|--clients-file Path to clients.xml file\n" " -O3 Fast mode. Duplicates data!\n" " purge Purge records\n" " --client [n] Client to operate on\n" @@ -95,12 +87,11 @@ class Reports(Bcfg2.Server.Admin.Mode): " scrub Scrub the database for duplicate reasons and orphaned entries\n" " update Apply any updates to the reporting database\n" "\n" - " Django commands:\n " - "\n ".join(django_commands)) + " Django commands:\n " \ + + "\n ".join(django_commands)) def __init__(self, setup): Bcfg2.Server.Admin.Mode.__init__(self, setup) - self.log.setLevel(logging.INFO) def __call__(self, args): Bcfg2.Server.Admin.Mode.__call__(self, args) @@ -108,28 +99,21 @@ class Reports(Bcfg2.Server.Admin.Mode): print(self.__usage__) raise SystemExit(0) - verb = 0 - - if '-v' in args or '--verbose' in args: - self.log.setLevel(logging.DEBUG) - verb = 1 - if '-q' in args or '--quiet' in args: - self.log.setLevel(logging.WARNING) - # FIXME - dry run if args[0] in self.django_commands: self.django_command_proxy(args[0]) elif args[0] == 'scrub': self.scrub() - elif args[0] == 'init': - update_database() - elif args[0] == 'update': - update_database() + elif args[0] in ['init', 'update']: + try: + update_database() + except UpdaterError: + print("Update failed") + raise SystemExit(-1) elif args[0] == 'load_stats': quick = '-O3' in args stats_file = None - clients_file = None i = 1 while i < len(args): if args[i] == '-s' or args[i] == '--stats': @@ -137,11 +121,9 @@ class Reports(Bcfg2.Server.Admin.Mode): if stats_file[0] == '-': self.errExit("Invalid statistics file: %s" % stats_file) elif args[i] == '-c' or args[i] == '--clients-file': - clients_file = args[i + 1] - if clients_file[0] == '-': - self.errExit("Invalid clients file: %s" % clients_file) + print("DeprecationWarning: %s is no longer used" % args[i]) i = i + 1 - self.load_stats(stats_file, clients_file, verb, quick) + self.load_stats(stats_file, self.log.getEffectiveLevel() > logging.WARNING, quick) elif args[0] == 'purge': expired = False client = None @@ -239,7 +221,7 @@ class Reports(Bcfg2.Server.Admin.Mode): else: django.core.management.call_command(command) - def load_stats(self, stats_file=None, clientspath=None, verb=0, quick=False): + def load_stats(self, stats_file=None, verb=0, quick=False): '''Load statistics data into the database''' location = '' @@ -258,27 +240,18 @@ class Reports(Bcfg2.Server.Admin.Mode): except: encoding = 'UTF-8' - if not clientspath: - try: - clientspath = "%s/Metadata/clients.xml" % \ - self.cfp.get('server', 'repository') - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): - self.errExit("Could not read bcfg2.conf; exiting") - try: - clientsdata = XML(open(clientspath).read()) - except (IOError, XMLSyntaxError): - self.errExit("StatReports: Failed to parse %s" % (clientspath)) - try: - load_stats(clientsdata, - statsdata, + load_stats(statsdata, encoding, verb, self.log, quick=quick, location=platform.node()) + except UpdaterError: + self.errExit("StatReports: Database updater failed") except: - pass + self.errExit("failed to import stats: %s" + % traceback.format_exc().splitlines()[-1]) @printStats def purge(self, client=None, maxdate=None, state=None): @@ -306,12 +279,10 @@ class Reports(Bcfg2.Server.Admin.Mode): self.log.debug("Filtering by maxdate: %s" % maxdate) ipurge = ipurge.filter(timestamp__lt=maxdate) - # Handle ping data as well - ping = Ping.objects.filter(endtime__lt=maxdate) - if client: - ping = ping.filter(client=cobj) - ping.delete() - + if Bcfg2.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': + grp_limit = 100 + else: + grp_limit = 1000 if state: filtered = True if state not in ('dirty', 'clean', 'modified'): @@ -324,7 +295,7 @@ class Reports(Bcfg2.Server.Admin.Mode): rnum = 0 try: while rnum < count: - grp = list(ipurge[:1000].values("id")) + grp = list(ipurge[:grp_limit].values("id")) # just in case... if not grp: break diff --git a/src/lib/Bcfg2/Server/Admin/Syncdb.py b/src/lib/Bcfg2/Server/Admin/Syncdb.py new file mode 100644 index 000000000..bff232b05 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Syncdb.py @@ -0,0 +1,36 @@ +import Bcfg2.settings +import Bcfg2.Options +import Bcfg2.Server.Admin +from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError +from django.core.management import setup_environ + +class Syncdb(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("Sync the Django ORM with the configured database") + __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin syncdb" + __usage__ = "bcfg2-admin syncdb" + options = {'web_configfile': Bcfg2.Options.WEB_CFILE, + 'repo': Bcfg2.Options.SERVER_REPOSITORY} + + def __call__(self, args): + import Bcfg2.Server.Admin + Bcfg2.Server.Admin.Mode.__call__(self, args) + + # Parse options + self.opts = Bcfg2.Options.OptionParser(self.options) + self.opts.parse(args) + + # we have to set up the django environment before we import + # the syncdb command, but we have to wait to set up the + # environment until we've read the config, which has to wait + # until we've parsed options. it's a windy, twisting road. + Bcfg2.settings.read_config(cfile=self.opts['web_configfile'], + repo=self.opts['repo']) + setup_environ(Bcfg2.settings) + import Bcfg2.Server.models + Bcfg2.Server.models.load_models(cfile=self.opts['configfile']) + + try: + update_database() + except UpdaterError: + print("Update failed") + raise SystemExit(-1) diff --git a/src/lib/Bcfg2/Server/Admin/Tidy.py b/src/lib/Bcfg2/Server/Admin/Tidy.py index 82319b93e..65aa955b4 100644 --- a/src/lib/Bcfg2/Server/Admin/Tidy.py +++ b/src/lib/Bcfg2/Server/Admin/Tidy.py @@ -3,6 +3,7 @@ import re import socket import Bcfg2.Server.Admin +from Bcfg2.Bcfg2Py3k import input class Tidy(Bcfg2.Server.Admin.Mode): @@ -22,11 +23,7 @@ class Tidy(Bcfg2.Server.Admin.Mode): if '-f' in args or '-I' in args: if '-I' in args: for name in badfiles[:]: - # py3k compatibility - try: - answer = raw_input("Unlink file %s? [yN] " % name) - except NameError: - answer = input("Unlink file %s? [yN] " % name) + answer = input("Unlink file %s? [yN] " % name) if answer not in ['y', 'Y']: badfiles.remove(name) for name in badfiles: diff --git a/src/lib/Bcfg2/Server/Admin/Viz.py b/src/lib/Bcfg2/Server/Admin/Viz.py index 2faa423c1..b190dd62a 100644 --- a/src/lib/Bcfg2/Server/Admin/Viz.py +++ b/src/lib/Bcfg2/Server/Admin/Viz.py @@ -35,7 +35,7 @@ class Viz(Bcfg2.Server.Admin.MetadataCore): __plugin_blacklist__ = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr', 'Packages', 'Rules', 'Account', 'Decisions', 'Deps', 'Git', 'Svn', 'Fossil', 'Bzr', 'Bundler', 'TGenshi', - 'SGenshi', 'Base'] + 'Base'] def __call__(self, args): Bcfg2.Server.Admin.MetadataCore.__call__(self, args) diff --git a/src/lib/Bcfg2/Server/Admin/__init__.py b/src/lib/Bcfg2/Server/Admin/__init__.py index 618fa450e..3a7ba45cf 100644 --- a/src/lib/Bcfg2/Server/Admin/__init__.py +++ b/src/lib/Bcfg2/Server/Admin/__init__.py @@ -11,6 +11,7 @@ __all__ = [ 'Query', 'Reports', 'Snapshots', + 'Syncdb', 'Tidy', 'Viz', 'Xcmd' @@ -117,15 +118,7 @@ class MetadataCore(Mode): if p not in self.__plugin_blacklist__] try: - self.bcore = \ - Bcfg2.Server.Core.Core(setup['repo'], - setup['plugins'], - setup['password'], - setup['encoding'], - filemonitor=setup['filemonitor'], - setup=setup) - if setup['event debug']: - self.bcore.fam.debug = True + self.bcore = Bcfg2.Server.Core.BaseCore(setup) except Bcfg2.Server.Core.CoreInitError: msg = sys.exc_info()[1] self.errExit("Core load failed: %s" % msg) diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py new file mode 100644 index 000000000..c52c49931 --- /dev/null +++ b/src/lib/Bcfg2/Server/BuiltinCore.py @@ -0,0 +1,103 @@ +""" the core of the builtin bcfg2 server """ + +import os +import sys +import time +import socket +import logging +from Bcfg2.Server.Core import BaseCore +from Bcfg2.Bcfg2Py3k import xmlrpclib, urlparse +from Bcfg2.SSLServer import XMLRPCServer + +logger = logging.getLogger() + +class NoExposedMethod (Exception): + """There is no method exposed with the given name.""" + + +class Core(BaseCore): + name = 'bcfg2-server' + + def _resolve_exposed_method(self, method_name): + """Resolve an exposed method. + + Arguments: + method_name -- name of the method to resolve + + """ + try: + func = getattr(self, method_name) + except AttributeError: + raise NoExposedMethod(method_name) + if not getattr(func, "exposed", False): + raise NoExposedMethod(method_name) + return func + + def _dispatch(self, method, args, dispatch_dict): + """Custom XML-RPC dispatcher for components. + + method -- XML-RPC method name + args -- tuple of paramaters to method + + """ + if method in dispatch_dict: + method_func = dispatch_dict[method] + else: + try: + method_func = self._resolve_exposed_method(method) + except NoExposedMethod: + self.logger.error("Unknown method %s" % (method)) + raise xmlrpclib.Fault(xmlrpclib.METHOD_NOT_FOUND, + "Unknown method %s" % method) + + try: + method_start = time.time() + try: + result = method_func(*args) + finally: + method_done = time.time() + except xmlrpclib.Fault: + raise + except Exception: + e = sys.exc_info()[1] + if getattr(e, "log", True): + self.logger.error(e, exc_info=True) + raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e)) + return result + + def run(self): + if self.setup['daemon']: + self._daemonize() + + hostname, port = urlparse(self.setup['location'])[1].split(':') + server_address = socket.getaddrinfo(hostname, + port, + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0][4] + try: + server = XMLRPCServer(self.setup['listen_all'], + server_address, + keyfile=self.setup['key'], + certfile=self.setup['cert'], + register=False, + timeout=1, + ca=self.setup['ca'], + protocol=self.setup['protocol']) + except: + err = sys.exc_info()[1] + self.logger.error("Server startup failed: %s" % err) + os._exit(1) + server.register_instance(self) + + try: + server.serve_forever() + finally: + server.server_close() + self.shutdown() + + def methodHelp(self, method_name): + try: + func = self._resolve_exposed_method(method_name) + except NoExposedMethod: + return "" + return func.__doc__ diff --git a/src/lib/Bcfg2/Server/CherryPyCore.py b/src/lib/Bcfg2/Server/CherryPyCore.py new file mode 100644 index 000000000..91e7f89bd --- /dev/null +++ b/src/lib/Bcfg2/Server/CherryPyCore.py @@ -0,0 +1,131 @@ +""" the core of the CherryPy-powered server """ + +import sys +import base64 +import atexit +import cherrypy +import Bcfg2.Options +from Bcfg2.Bcfg2Py3k import urlparse, xmlrpclib +from Bcfg2.Server.Core import BaseCore +from cherrypy.lib import xmlrpcutil +from cherrypy._cptools import ErrorTool + +if cherrypy.engine.state == 0: + cherrypy.engine.start(blocking=False) + atexit.register(cherrypy.engine.stop) + +# define our own error handler that handles xmlrpclib.Fault objects +# and so allows for the possibility of returning proper error +# codes. this obviates the need to use the builtin CherryPy xmlrpc +# tool +def on_error(*args, **kwargs): + err = sys.exc_info()[1] + if not isinstance(err, xmlrpclib.Fault): + err = xmlrpclib.Fault(xmlrpclib.INTERNAL_ERROR, str(err)) + xmlrpcutil._set_response(xmlrpclib.dumps(err)) +cherrypy.tools.xmlrpc_error = ErrorTool(on_error) + + +class Core(BaseCore): + _cp_config = {'tools.xmlrpc_error.on': True, + 'tools.bcfg2_authn.on': True} + + def __init__(self, *args, **kwargs): + BaseCore.__init__(self, *args, **kwargs) + + cherrypy.tools.bcfg2_authn = cherrypy.Tool('on_start_resource', + self.do_authn) + + self.rmi = self._get_rmi() + + def do_authn(self): + try: + header = cherrypy.request.headers['Authorization'] + except KeyError: + self.critical_error("No authentication data presented") + auth_type, auth_content = header.split() + try: + # py3k compatibility + auth_content = base64.standard_b64decode(auth_content) + except TypeError: + auth_content = \ + base64.standard_b64decode(bytes(auth_content.encode('ascii'))) + try: + # py3k compatibility + try: + username, password = auth_content.split(":") + except TypeError: + username, pw = auth_content.split(bytes(":", encoding='utf-8')) + password = pw.decode('utf-8') + except ValueError: + username = auth_content + password = "" + + # FIXME: Get client cert + cert = None + address = (cherrypy.request.remote.ip, cherrypy.request.remote.name) + return self.authenticate(cert, username, password, address) + + @cherrypy.expose + def default(self, *vpath, **params): + # needed to make enough changes to the stock XMLRPCController + # to support plugin.__rmi__ and prepending client address that + # we just rewrote. it clearly wasn't written with inheritance + # in mind :( + rpcparams, rpcmethod = xmlrpcutil.process_body() + if "." not in rpcmethod: + address = (cherrypy.request.remote.ip, cherrypy.request.remote.name) + rpcparams = (address, ) + rpcparams + + handler = getattr(self, rpcmethod) + if not handler or not getattr(handler, "exposed", False): + raise Exception('method "%s" is not supported' % attr) + else: + try: + handler = self.rmi[rpcmethod] + except: + raise Exception('method "%s" is not supported' % rpcmethod) + + body = handler(*rpcparams, **params) + + xmlrpcutil.respond(body, 'utf-8', True) + return cherrypy.serving.response.body + + def run(self): + hostname, port = urlparse(self.setup['location'])[1].split(':') + if self.setup['listen_all']: + hostname = '0.0.0.0' + + config = {'engine.autoreload.on': False, + 'server.socket_port': int(port)} + if self.setup['cert'] and self.setup['key']: + config.update({'server.ssl_module': 'pyopenssl', + 'server.ssl_certificate': self.setup['cert'], + 'server.ssl_private_key': self.setup['key']}) + if self.setup['debug']: + config['log.screen'] = True + cherrypy.config.update(config) + cherrypy.quickstart(self, config={'/': self.setup}) + + +def parse_opts(argv=None): + if argv is None: + argv = sys.argv[1:] + optinfo = dict() + optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.DAEMON_COMMON_OPTIONS) + setup = Bcfg2.Options.OptionParser(optinfo, argv=argv) + setup.parse(argv) + return setup + +def application(environ, start_response): + """ running behind Apache as a WSGI app is not currently + supported, but I'm keeping this code here because I hope for it to + be supported some day. we'll need to set up an AMQP task queue + and related magic for that to happen, though. """ + cherrypy.config.update({'environment': 'embedded'}) + setup = parse_opts(argv=['-C', environ['config']]) + root = Core(setup, start_fam_thread=True) + cherrypy.tree.mount(root) + return cherrypy.tree(environ, start_response) diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py index 8482925b7..f39453edd 100644 --- a/src/lib/Bcfg2/Server/Core.py +++ b/src/lib/Bcfg2/Server/Core.py @@ -1,35 +1,21 @@ """Bcfg2.Server.Core provides the runtime support for Bcfg2 modules.""" +import os import atexit import logging import select import sys import threading import time +import inspect +import lxml.etree from traceback import format_exc - -try: - import lxml.etree -except ImportError: - print("Failed to import lxml dependency. Shutting down server.") - raise SystemExit(1) - -from Bcfg2.Component import Component, exposed -from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError +import Bcfg2.settings +import Bcfg2.Server +import Bcfg2.Logger import Bcfg2.Server.FileMonitor -import Bcfg2.Server.Plugins.Metadata -# Compatibility imports -from Bcfg2.Bcfg2Py3k import xmlrpclib -if sys.hexversion >= 0x03000000: - from functools import reduce - -logger = logging.getLogger('Bcfg2.Server.Core') - - -def critical_error(operation): - """Log and err, traceback and return an xmlrpc fault to client.""" - logger.error(operation, exc_info=1) - raise xmlrpclib.Fault(7, "Critical unexpected failure: %s" % (operation)) +from Bcfg2.Bcfg2Py3k import xmlrpclib, reduce +from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError try: import psyco @@ -37,6 +23,11 @@ try: except: pass +os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings' + +def exposed(func): + func.exposed = True + return func def sort_xml(node, key=None): for child in node: @@ -54,88 +45,134 @@ class CoreInitError(Exception): pass -class Core(Component): +class BaseCore(object): """The Core object is the container for all Bcfg2 Server logic and modules. """ - name = 'bcfg2-server' - implementation = 'bcfg2-server' - - def __init__(self, repo, plugins, password, encoding, - cfile='/etc/bcfg2.conf', ca=None, setup=None, - filemonitor='default', start_fam_thread=False): - Component.__init__(self) - self.datastore = repo - if filemonitor not in Bcfg2.Server.FileMonitor.available: - logger.error("File monitor driver %s not available; " - "forcing to default" % filemonitor) - filemonitor = 'default' + + def __init__(self, setup, start_fam_thread=False): + self.datastore = setup['repo'] + + if setup['debug']: + level = logging.DEBUG + elif setup['verbose']: + level = logging.INFO + else: + level = logging.WARNING + # we set a higher log level for the console by default. we + # assume that if someone is running bcfg2-server in such a way + # that it _can_ log to console, they want more output. if + # level is set to DEBUG, that will get handled by + # setup_logging and the console will get DEBUG output. + Bcfg2.Logger.setup_logging('bcfg2-server', + to_console=logging.INFO, + to_syslog=setup['syslog'], + to_file=setup['logging'], + level=level) + self.logger = logging.getLogger('bcfg2-server') + try: - self.fam = Bcfg2.Server.FileMonitor.available[filemonitor]() + fm = Bcfg2.Server.FileMonitor.available[setup['filemonitor']] + except KeyError: + self.logger.error("File monitor driver %s not available; " + "forcing to default" % filemonitor) + fm = Bcfg2.Server.FileMonitor.available['default'] + famargs = dict(ignore=[], debug=False) + if 'ignore' in setup: + famargs['ignore'] = setup['ignore'] + if 'debug' in setup: + famargs['debug'] = setup['debug'] + try: + self.fam = fm(**famargs) except IOError: - logger.error("Failed to instantiate fam driver %s" % filemonitor, - exc_info=1) - raise CoreInitError("failed to instantiate fam driver (used %s)" % \ - filemonitor) + msg = "Failed to instantiate fam driver %s" % setup['filemonitor'] + self.logger.error(msg, exc_info=1) + raise CoreInitError(msg) self.pubspace = {} - self.cfile = cfile + self.cfile = setup['configfile'] self.cron = {} self.plugins = {} self.plugin_blacklist = {} self.revision = '-1' - self.password = password - self.encoding = encoding + self.password = setup['password'] + self.encoding = setup['encoding'] self.setup = setup atexit.register(self.shutdown) # Create an event to signal worker threads to shutdown self.terminate = threading.Event() - if '' in plugins: - plugins.remove('') + # generate Django ORM settings. this must be done _before_ we + # load plugins + Bcfg2.settings.read_config(cfile=self.setup['web_configfile'], + repo=self.datastore) - for plugin in plugins: + self._database_available = False + # verify our database schema + try: + from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError + try: + update_database() + self._database_available = True + except UpdaterError: + err = sys.exc_info()[1] + self.logger.error("Failed to update database schema: %s" % err) + except ImportError: + # assume django is not installed + pass + except Exception: + inst = sys.exc_info()[1] + self.logger.error("Failed to update database schema") + self.logger.error(str(inst)) + self.logger.error(str(type(inst))) + raise CoreInitError + + if '' in setup['plugins']: + setup['plugins'].remove('') + + for plugin in setup['plugins']: if not plugin in self.plugins: self.init_plugins(plugin) # Remove blacklisted plugins for p, bl in list(self.plugin_blacklist.items()): if len(bl) > 0: - logger.error("The following plugins conflict with %s;" - "Unloading %s" % (p, bl)) + self.logger.error("The following plugins conflict with %s;" + "Unloading %s" % (p, bl)) for plug in bl: del self.plugins[plug] # This section logs the experimental plugins expl = [plug for (name, plug) in list(self.plugins.items()) if plug.experimental] if expl: - logger.info("Loading experimental plugin(s): %s" % \ - (" ".join([x.name for x in expl]))) - logger.info("NOTE: Interfaces subject to change") + self.logger.info("Loading experimental plugin(s): %s" % + (" ".join([x.name for x in expl]))) + self.logger.info("NOTE: Interfaces subject to change") # This section logs the deprecated plugins depr = [plug for (name, plug) in list(self.plugins.items()) if plug.deprecated] if depr: - logger.info("Loading deprecated plugin(s): %s" % \ - (" ".join([x.name for x in depr]))) + self.logger.info("Loading deprecated plugin(s): %s" % + (" ".join([x.name for x in depr]))) mlist = self.plugins_by_type(Bcfg2.Server.Plugin.Metadata) if len(mlist) == 1: self.metadata = mlist[0] else: - logger.error("No Metadata Plugin loaded; failed to instantiate Core") + self.logger.error("No Metadata Plugin loaded; " + "failed to instantiate Core") raise CoreInitError("No Metadata Plugin") self.statistics = self.plugins_by_type(Bcfg2.Server.Plugin.Statistics) self.pull_sources = self.plugins_by_type(Bcfg2.Server.Plugin.PullSource) self.generators = self.plugins_by_type(Bcfg2.Server.Plugin.Generator) self.structures = self.plugins_by_type(Bcfg2.Server.Plugin.Structure) self.connectors = self.plugins_by_type(Bcfg2.Server.Plugin.Connector) - self.ca = ca - self.fam_thread = threading.Thread(target=self._file_monitor_thread) + self.ca = setup['ca'] + self.fam_thread = \ + threading.Thread(name="%sFAMThread" % setup['filemonitor'], + target=self._file_monitor_thread) + self.lock = threading.Lock() + if start_fam_thread: self.fam_thread.start() - self.monitor_cfile() - - def monitor_cfile(self): - if self.setup: self.fam.AddMonitor(self.cfile, self.setup) def plugins_by_type(self, base_cls): @@ -171,16 +208,21 @@ class Core(Component): def init_plugins(self, plugin): """Handling for the plugins.""" + self.logger.debug("Loading plugin %s" % plugin) try: mod = getattr(__import__("Bcfg2.Server.Plugins.%s" % (plugin)).Server.Plugins, plugin) except ImportError: try: - mod = __import__(plugin) + mod = __import__(plugin, globals(), locals(), [plugin.split('.')[-1]]) except: - logger.error("Failed to load plugin %s" % (plugin)) + self.logger.error("Failed to load plugin %s" % plugin) return - plug = getattr(mod, plugin) + try: + plug = getattr(mod, plugin.split('.')[-1]) + except AttributeError: + self.logger.error("Failed to load plugin %s (AttributeError)" % plugin) + return # Blacklist conflicting plugins cplugs = [conflict for conflict in plug.conflicts if conflict in self.plugins] @@ -188,18 +230,35 @@ class Core(Component): try: self.plugins[plugin] = plug(self, self.datastore) except PluginInitError: - logger.error("Failed to instantiate plugin %s" % (plugin)) + self.logger.error("Failed to instantiate plugin %s" % plugin, + exc_info=1) except: - logger.error("Unexpected instantiation failure for plugin %s" % - (plugin), exc_info=1) + self.logger.error("Unexpected instantiation failure for plugin %s" % + plugin, exc_info=1) def shutdown(self): """Shutting down the plugins.""" if not self.terminate.isSet(): self.terminate.set() + self.fam.shutdown() for plugin in list(self.plugins.values()): plugin.shutdown() + def client_run_hook(self, hook, metadata): + """Checks the data structure.""" + for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.ClientRunHooks): + try: + getattr(plugin, hook)(metadata) + except AttributeError: + err = sys.exc_info()[1] + self.logger.error("Unknown attribute: %s" % err) + raise + except: + err = sys.exc_info()[1] + self.logger.error("%s: Error invoking hook %s: %s" % (plugin, + hook, + err)) + def validate_structures(self, metadata, data): """Checks the data structure.""" for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.StructureValidator): @@ -207,12 +266,12 @@ class Core(Component): plugin.validate_structures(metadata, data) except Bcfg2.Server.Plugin.ValidationError: err = sys.exc_info()[1] - logger.error("Plugin %s structure validation failed: %s" \ - % (plugin.name, err.message)) + self.logger.error("Plugin %s structure validation failed: %s" % + (plugin.name, err)) raise except: - logger.error("Plugin %s: unexpected structure validation failure" \ - % (plugin.name), exc_info=1) + self.logger.error("Plugin %s: unexpected structure validation " + "failure" % plugin.name, exc_info=1) def validate_goals(self, metadata, data): """Checks that the config matches the goals enforced by the plugins.""" @@ -221,23 +280,23 @@ class Core(Component): plugin.validate_goals(metadata, data) except Bcfg2.Server.Plugin.ValidationError: err = sys.exc_info()[1] - logger.error("Plugin %s goal validation failed: %s" \ - % (plugin.name, err.message)) + self.logger.error("Plugin %s goal validation failed: %s" % + (plugin.name, err.message)) raise except: - logger.error("Plugin %s: unexpected goal validation failure" \ - % (plugin.name), exc_info=1) + self.logger.error("Plugin %s: unexpected goal validation " + "failure" % plugin.name, exc_info=1) def GetStructures(self, metadata): """Get all structures for client specified by metadata.""" structures = reduce(lambda x, y: x + y, - [struct.BuildStructures(metadata) for struct \ - in self.structures], []) + [struct.BuildStructures(metadata) + for struct in self.structures], []) sbundles = [b.get('name') for b in structures if b.tag == 'Bundle'] missing = [b for b in metadata.bundles if b not in sbundles] if missing: - logger.error("Client %s configuration missing bundles: %s" \ - % (metadata.hostname, ':'.join(missing))) + self.logger.error("Client %s configuration missing bundles: %s" % + (metadata.hostname, ':'.join(missing))) return structures def BindStructure(self, structure, metadata): @@ -252,14 +311,14 @@ class Core(Component): exc = sys.exc_info()[1] if 'failure' not in entry.attrib: entry.set('failure', 'bind error: %s' % format_exc()) - logger.error("Failed to bind entry %s:%s: %s" % - (entry.tag, entry.get('name'), exc)) + self.logger.error("Failed to bind entry %s:%s: %s" % + (entry.tag, entry.get('name'), exc)) except Exception: exc = sys.exc_info()[1] if 'failure' not in entry.attrib: entry.set('failure', 'bind error: %s' % format_exc()) - logger.error("Unexpected failure in BindStructure: %s %s" \ - % (entry.tag, entry.get('name')), exc_info=1) + self.logger.error("Unexpected failure in BindStructure: %s %s" % + (entry.tag, entry.get('name')), exc_info=1) def Bind(self, entry, metadata): """Bind an entry using the appropriate generator.""" @@ -275,11 +334,11 @@ class Core(Component): return ret except: entry.set('name', oldname) - logger.error("Failed binding entry %s:%s with altsrc %s" \ - % (entry.tag, entry.get('name'), - entry.get('altsrc'))) - logger.error("Falling back to %s:%s" % (entry.tag, - entry.get('name'))) + self.logger.error("Failed binding entry %s:%s with altsrc %s" % + (entry.tag, entry.get('name'), + entry.get('altsrc'))) + self.logger.error("Falling back to %s:%s" % (entry.tag, + entry.get('name'))) glist = [gen for gen in self.generators if entry.get('name') in gen.Entries.get(entry.tag, {})] @@ -288,8 +347,8 @@ class Core(Component): metadata) elif len(glist) > 1: generators = ", ".join([gen.name for gen in glist]) - logger.error("%s %s served by multiple generators: %s" % \ - (entry.tag, entry.get('name'), generators)) + self.logger.error("%s %s served by multiple generators: %s" % + (entry.tag, entry.get('name'), generators)) g2list = [gen for gen in self.generators if gen.HandlesEntry(entry, metadata)] if len(g2list) == 1: @@ -301,18 +360,21 @@ class Core(Component): def BuildConfiguration(self, client): """Build configuration for clients.""" start = time.time() - config = lxml.etree.Element("Configuration", version='2.0', \ + config = lxml.etree.Element("Configuration", version='2.0', revision=self.revision) try: meta = self.build_metadata(client) - except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: - logger.error("Metadata consistency error for client %s" % client) + except Bcfg2.Server.Plugin.MetadataConsistencyError: + self.logger.error("Metadata consistency error for client %s" % + client) return lxml.etree.Element("error", type='metadata error') + self.client_run_hook("start_client_run", meta) + try: structures = self.GetStructures(meta) except: - logger.error("error in GetStructures", exc_info=1) + self.logger.error("error in GetStructures", exc_info=1) return lxml.etree.Element("error", type='structure error') self.validate_structures(meta, structures) @@ -324,7 +386,8 @@ class Core(Component): key = (entry.tag, entry.get('name')) if key in esrcs: if esrcs[key] != entry.get('altsrc'): - logger.error("Found inconsistent altsrc mapping for entry %s:%s" % key) + self.logger.error("Found inconsistent altsrc mapping " + "for entry %s:%s" % key) else: esrcs[key] = entry.get('altsrc', None) del esrcs @@ -334,15 +397,49 @@ class Core(Component): self.BindStructure(astruct, meta) config.append(astruct) except: - logger.error("error in BindStructure", exc_info=1) + self.logger.error("error in BindStructure", exc_info=1) self.validate_goals(meta, config) + self.client_run_hook("end_client_run", meta) + sort_xml(config, key=lambda e: e.get('name')) - logger.info("Generated config for %s in %.03f seconds" % \ - (client, time.time() - start)) + self.logger.info("Generated config for %s in %.03f seconds" % + (client, time.time() - start)) return config + def run(self, **kwargs): + """ run the server core """ + raise NotImplementedError + + def _daemonize(self): + child_pid = os.fork() + if child_pid != 0: + return + + os.setsid() + + child_pid = os.fork() + if child_pid != 0: + os._exit(0) + + redirect_file = open("/dev/null", "w+") + os.dup2(redirect_file.fileno(), sys.__stdin__.fileno()) + os.dup2(redirect_file.fileno(), sys.__stdout__.fileno()) + os.dup2(redirect_file.fileno(), sys.__stderr__.fileno()) + + os.chdir(os.sep) + + pidfile = open(self.setup['daemon'] or "/dev/null", "w") + pidfile.write("%s\n" % os.getpid()) + pidfile.close() + + return os.getpid() + + def critical_error(self, operation): + """ this should be overridden by child classes """ + self.logger.fatal(operation, exc_info=1) + def GetDecisions(self, metadata, mode): """Get data for the decision list.""" result = [] @@ -350,15 +447,15 @@ class Core(Component): try: result += plugin.GetDecisions(metadata, mode) except: - logger.error("Plugin: %s failed to generate decision list" \ - % plugin.name, exc_info=1) + self.logger.error("Plugin: %s failed to generate decision list" + % plugin.name, exc_info=1) return result def build_metadata(self, client_name): """Build the metadata structure.""" if not hasattr(self, 'metadata'): # some threads start before metadata is even loaded - raise Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError + raise Bcfg2.Server.Plugin.MetadataRuntimeError imd = self.metadata.get_initial_metadata(client_name) for conn in self.connectors: grps = conn.get_additional_groups(imd) @@ -378,102 +475,147 @@ class Core(Component): try: plugin.process_statistics(meta, statistics) except: - logger.error("Plugin %s failed to process stats from %s" \ - % (plugin.name, meta.hostname), - exc_info=1) + self.logger.error("Plugin %s failed to process stats from " + "%s" % (plugin.name, meta.hostname), + exc_info=1) + + self.logger.info("Client %s reported state %s" % (client_name, + state.get('state'))) + self.client_run_hook("end_statistics", meta) + + def resolve_client(self, address, cleanup_cache=False, metadata=True): + try: + client = self.metadata.resolve_client(address, + cleanup_cache=cleanup_cache) + if metadata: + meta = self.build_metadata(client) + else: + meta = None + except Bcfg2.Server.Plugin.MetadataConsistencyError: + err = sys.exc_info()[1] + self.critical_error("Client metadata resolution error for %s: %s" % + (address[0], err)) + except Bcfg2.Server.Plugin.MetadataRuntimeError: + err = sys.exc_info()[1] + self.critical_error('Metadata system runtime failure for %s: %s' % + (address[0], err)) + return (client, meta) + + def critical_error(self, operation): + """Log and err, traceback and return an xmlrpc fault to client.""" + self.logger.error(operation, exc_info=1) + raise xmlrpclib.Fault(xmlrpclib.APPLICATION_ERROR, + "Critical failure: %s" % operation) + + def _get_rmi(self): + rmi = dict() + if self.plugins: + for pname, pinst in list(self.plugins.items()): + for mname in pinst.__rmi__: + rmi["%s.%s" % (pname, mname)] = getattr(pinst, mname) + return rmi - logger.info("Client %s reported state %s" % (client_name, - state.get('state'))) # XMLRPC handlers start here + @exposed + def listMethods(self, address): + methods = [name + for name, func in inspect.getmembers(self, callable) + if getattr(func, "exposed", False)] + methods.extend(self._get_rmi().keys()) + return methods + + @exposed + def methodHelp(self, address, method_name): + raise NotImplementedError + + @exposed + def DeclareVersion(self, address, version): + """ declare the client version """ + client, metadata = self.resolve_client(address) + try: + self.metadata.set_version(client, version) + except (Bcfg2.Server.Plugin.MetadataConsistencyError, + Bcfg2.Server.Plugin.MetadataRuntimeError): + err = sys.exc_info()[1] + self.critical_error("Unable to set version for %s: %s" % + (client, err)) + return True @exposed def GetProbes(self, address): """Fetch probes for a particular client.""" resp = lxml.etree.Element('probes') + client, metadata = self.resolve_client(address, cleanup_cache=True) try: - name = self.metadata.resolve_client(address, cleanup_cache=True) - meta = self.build_metadata(name) - for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Probing): - for probe in plugin.GetProbes(meta): + for probe in plugin.GetProbes(metadata): resp.append(probe) - return lxml.etree.tostring(resp, encoding='UTF-8', - xml_declaration=True) - except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: - warning = 'Client metadata resolution error for %s' % address[0] - self.logger.warning(warning) - raise xmlrpclib.Fault(6, warning + "; check server log") - except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError: - err_msg = 'Metadata system runtime failure' - self.logger.error(err_msg) - raise xmlrpclib.Fault(6, err_msg) + return lxml.etree.tostring(resp, + xml_declaration=False).decode('UTF-8') except: - critical_error("Error determining client probes") + err = sys.exc_info()[1] + self.critical_error("Error determining probes for %s: %s" % + (client, err)) @exposed def RecvProbeData(self, address, probedata): """Receive probe data from clients.""" + client, metadata = self.resolve_client(address) try: - name = self.metadata.resolve_client(address) - meta = self.build_metadata(name) - except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: - warning = 'Metadata consistency error' - self.logger.warning(warning) - raise xmlrpclib.Fault(6, warning) - # clear dynamic groups - self.metadata.cgroups[meta.hostname] = [] - try: - xpdata = lxml.etree.XML(probedata.encode('utf-8')) + xpdata = lxml.etree.XML(probedata.encode('utf-8'), + parser=Bcfg2.Server.XMLParser) except: - self.logger.error("Failed to parse probe data from client %s" % \ - (address[0])) - return False + err = sys.exc_info()[1] + self.critical_error("Failed to parse probe data from client %s: %s" + % (client, err)) sources = [] [sources.append(data.get('source')) for data in xpdata if data.get('source') not in sources] for source in sources: if source not in self.plugins: - self.logger.warning("Failed to locate plugin %s" % (source)) + self.logger.warning("Failed to locate plugin %s" % source) continue dl = [data for data in xpdata if data.get('source') == source] try: - self.plugins[source].ReceiveData(meta, dl) + self.plugins[source].ReceiveData(metadata, dl) except: - logger.error("Failed to process probe data from client %s" % \ - (address[0]), exc_info=1) + err = sys.exc_info()[1] + self.critical_error("Failed to process probe data from client " + "%s: %s" % + (client, err)) return True @exposed def AssertProfile(self, address, profile): """Set profile for a client.""" + client = self.resolve_client(address, metadata=False)[0] try: - client = self.metadata.resolve_client(address) self.metadata.set_profile(client, profile, address) - except (Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError, - Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError): - warning = 'Metadata consistency error' - self.logger.warning(warning) - raise xmlrpclib.Fault(6, warning) + except (Bcfg2.Server.Plugin.MetadataConsistencyError, + Bcfg2.Server.Plugin.MetadataRuntimeError): + err = sys.exc_info()[1] + self.critical_error("Unable to assert profile for %s: %s" % + (client, err)) return True @exposed def GetConfig(self, address, checksum=False): """Build config for a client.""" + client = self.resolve_client(address)[0] try: - client = self.metadata.resolve_client(address) config = self.BuildConfiguration(client) - return lxml.etree.tostring(config, encoding='UTF-8', - xml_declaration=True) - except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: - self.logger.warning("Metadata consistency failure for %s" % (address)) - raise xmlrpclib.Fault(6, "Metadata consistency failure") + return lxml.etree.tostring(config, + xml_declaration=False).decode('UTF-8') + except Bcfg2.Server.Plugin.MetadataConsistencyError: + self.critical_error("Metadata consistency failure for %s" % client) @exposed def RecvStats(self, address, stats): """Act on statistics upload.""" - sdata = lxml.etree.XML(stats.encode('utf-8')) - client = self.metadata.resolve_client(address) + client = self.resolve_client(address)[0] + sdata = lxml.etree.XML(stats.encode('utf-8'), + parser=Bcfg2.Server.XMLParser) self.process_statistics(client, sdata) return "<ok/>" @@ -483,11 +625,17 @@ class Core(Component): else: # No ca, so no cert validation can be done acert = None - return self.metadata.AuthenticateConnection(acert, user, password, address) + return self.metadata.AuthenticateConnection(acert, user, password, + address) @exposed def GetDecisionList(self, address, mode): """Get the data of the decision list.""" - client = self.metadata.resolve_client(address) - meta = self.build_metadata(client) - return self.GetDecisions(meta, mode) + client, metadata = self.resolve_client(address) + return self.GetDecisions(metadata, mode) + + @property + def database_available(self): + """Is the database configured and available""" + return self._database_available + diff --git a/src/lib/Bcfg2/Server/FileMonitor.py b/src/lib/Bcfg2/Server/FileMonitor.py deleted file mode 100644 index d6b313e6b..000000000 --- a/src/lib/Bcfg2/Server/FileMonitor.py +++ /dev/null @@ -1,315 +0,0 @@ -"""Bcfg2.Server.FileMonitor provides the support for monitorung files.""" - -import logging -import os -import stat -from time import sleep, time - -logger = logging.getLogger('Bcfg2.Server.FileMonitor') - - -def ShouldIgnore(event): - """Test if the event should be suppresed.""" - # FIXME should move event suppression out of the core - if event.filename.split('/')[-1] == '.svn': - return True - if event.filename.endswith('~') or \ - event.filename.startswith('#') or event.filename.startswith('.#'): - #logger.error("Suppressing event for file %s" % (event.filename)) - return True - return False - - -class Event(object): - def __init__(self, request_id, filename, code): - self.requestID = request_id - self.filename = filename - self.action = code - - def code2str(self): - """return static code for event""" - return self.action - -available = {} - - -class FileMonitor(object): - """File Monitor baseclass.""" - def __init__(self, debug=False): - object.__init__(self) - self.debug = debug - self.handles = dict() - - def get_event(self): - return None - - def pending(self): - return False - - def fileno(self): - return 0 - - def handle_one_event(self, event): - if ShouldIgnore(event): - return - if event.requestID not in self.handles: - logger.info("Got event for unexpected id %s, file %s" % - (event.requestID, event.filename)) - return - if self.debug: - logger.info("Dispatching event %s %s to obj %s" \ - % (event.code2str(), event.filename, - self.handles[event.requestID])) - try: - self.handles[event.requestID].HandleEvent(event) - except: - logger.error("error in handling of gamin event for %s" % \ - (event.filename), exc_info=1) - - def handle_event_set(self, lock=None): - count = 1 - event = self.get_event() - start = time() - if lock: - lock.acquire() - try: - self.handle_one_event(event) - while self.pending(): - self.handle_one_event(self.get_event()) - count += 1 - except: - pass - if lock: - lock.release() - end = time() - logger.info("Handled %d events in %.03fs" % (count, (end - start))) - - def handle_events_in_interval(self, interval): - end = time() + interval - while time() < end: - if self.pending(): - self.handle_event_set() - end = time() + interval - else: - sleep(0.5) - - -class FamFam(object): - """The fam object is a set of callbacks for - file alteration events (FAM support). - """ - - def __init__(self): - object.__init__(self) - self.fm = _fam.open() - self.users = {} - self.handles = {} - self.debug = False - - def fileno(self): - """Return fam file handle number.""" - return self.fm.fileno() - - def handle_event_set(self, _): - self.Service() - - def handle_events_in_interval(self, interval): - now = time() - while (time() - now) < interval: - if self.Service(): - now = time() - - def AddMonitor(self, path, obj): - """Add a monitor to path, installing a callback to obj.HandleEvent.""" - mode = os.stat(path)[stat.ST_MODE] - if stat.S_ISDIR(mode): - handle = self.fm.monitorDirectory(path, None) - else: - handle = self.fm.monitorFile(path, None) - self.handles[handle.requestID()] = handle - if obj != None: - self.users[handle.requestID()] = obj - return handle.requestID() - - def Service(self, interval=0.50): - """Handle all fam work.""" - count = 0 - collapsed = 0 - rawevents = [] - start = time() - now = time() - while (time() - now) < interval: - if self.fm.pending(): - while self.fm.pending(): - count += 1 - rawevents.append(self.fm.nextEvent()) - now = time() - unique = [] - bookkeeping = [] - for event in rawevents: - if ShouldIgnore(event): - continue - if event.code2str() != 'changed': - # process all non-change events - unique.append(event) - else: - if (event.filename, event.requestID) not in bookkeeping: - bookkeeping.append((event.filename, event.requestID)) - unique.append(event) - else: - collapsed += 1 - for event in unique: - if event.requestID in self.users: - try: - self.users[event.requestID].HandleEvent(event) - except: - logger.error("handling event for file %s" % (event.filename), exc_info=1) - end = time() - logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" % - (count, (end - start), collapsed)) - return count - - -class Fam(FileMonitor): - """ - The fam object is a set of callbacks for - file alteration events (FAM support). - """ - - def __init__(self, debug=False): - FileMonitor.__init__(self, debug) - self.fm = _fam.open() - - def fileno(self): - return self.fm.fileno() - - def AddMonitor(self, path, obj): - """Add a monitor to path, installing a callback to obj.HandleEvent.""" - mode = os.stat(path)[stat.ST_MODE] - if stat.S_ISDIR(mode): - handle = self.fm.monitorDirectory(path, None) - else: - handle = self.fm.monitorFile(path, None) - if obj != None: - self.handles[handle.requestID()] = obj - return handle.requestID() - - def pending(self): - return self.fm.pending() - - def get_event(self): - return self.fm.nextEvent() - - -class Pseudo(FileMonitor): - """ - The fam object is a set of callbacks for - file alteration events (static monitor support). - """ - - def __init__(self, debug=False): - FileMonitor.__init__(self, debug=False) - self.pending_events = [] - - def pending(self): - return len(self.pending_events) != 0 - - def get_event(self): - return self.pending_events.pop() - - def AddMonitor(self, path, obj): - """add a monitor to path, installing a callback to obj.HandleEvent""" - handleID = len(list(self.handles.keys())) - mode = os.stat(path)[stat.ST_MODE] - handle = Event(handleID, path, 'exists') - if stat.S_ISDIR(mode): - dirList = os.listdir(path) - self.pending_events.append(handle) - for includedFile in dirList: - self.pending_events.append(Event(handleID, - includedFile, - 'exists')) - self.pending_events.append(Event(handleID, path, 'endExist')) - else: - self.pending_events.append(Event(handleID, path, 'exists')) - if obj != None: - self.handles[handleID] = obj - return handleID - - -try: - from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \ - GAMChanged, GAMDeleted, GAMMoved - - class GaminEvent(Event): - """ - This class provides an event analogous to - python-fam events based on gamin sources. - """ - def __init__(self, request_id, filename, code): - Event.__init__(self, request_id, filename, code) - action_map = {GAMCreated: 'created', GAMExists: 'exists', - GAMChanged: 'changed', GAMDeleted: 'deleted', - GAMEndExist: 'endExist', GAMMoved: 'moved'} - if code in action_map: - self.action = action_map[code] - - class Gamin(FileMonitor): - """ - The fam object is a set of callbacks for - file alteration events (Gamin support) - """ - def __init__(self, debug=False): - FileMonitor.__init__(self, debug) - self.mon = WatchMonitor() - self.counter = 0 - self.events = [] - - def fileno(self): - return self.mon.get_fd() - - def queue(self, path, action, request_id): - """queue up the event for later handling""" - self.events.append(GaminEvent(request_id, path, action)) - - def AddMonitor(self, path, obj): - """Add a monitor to path, installing a callback to obj.HandleEvent.""" - handle = self.counter - self.counter += 1 - mode = os.stat(path)[stat.ST_MODE] - - # Flush queued gamin events - while self.mon.event_pending(): - self.mon.handle_one_event() - - if stat.S_ISDIR(mode): - self.mon.watch_directory(path, self.queue, handle) - else: - self.mon.watch_file(path, self.queue, handle) - self.handles[handle] = obj - return handle - - def pending(self): - return len(self.events) > 0 or self.mon.event_pending() - - def get_event(self): - if self.mon.event_pending(): - self.mon.handle_one_event() - return self.events.pop(0) - - available['gamin'] = Gamin -except ImportError: - # fall back to _fam - pass - -try: - import _fam - available['fam'] = FamFam -except ImportError: - pass -available['pseudo'] = Pseudo - -for fdrv in ['gamin', 'fam', 'pseudo']: - if fdrv in available: - available['default'] = available[fdrv] - break diff --git a/src/lib/Bcfg2/Server/FileMonitor/Fam.py b/src/lib/Bcfg2/Server/FileMonitor/Fam.py new file mode 100644 index 000000000..1a00fffa0 --- /dev/null +++ b/src/lib/Bcfg2/Server/FileMonitor/Fam.py @@ -0,0 +1,82 @@ +""" Fam provides FAM support for file alteration events """ + +import os +import _fam +import stat +import logging +from time import time +from Bcfg2.Server.FileMonitor import FileMonitor + +logger = logging.getLogger(__name__) + +class Fam(FileMonitor): + __priority__ = 90 + + def __init__(self, ignore=None, debug=False): + FileMonitor.__init__(self, ignore=ignore, debug=debug) + self.fm = _fam.open() + self.users = {} + + def fileno(self): + """Return fam file handle number.""" + return self.fm.fileno() + + def handle_event_set(self, _): + self.Service() + + def handle_events_in_interval(self, interval): + now = time() + while (time() - now) < interval: + if self.Service(): + now = time() + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.HandleEvent.""" + mode = os.stat(path)[stat.ST_MODE] + if stat.S_ISDIR(mode): + handle = self.fm.monitorDirectory(path, None) + else: + handle = self.fm.monitorFile(path, None) + self.handles[handle.requestID()] = handle + if obj != None: + self.users[handle.requestID()] = obj + return handle.requestID() + + def Service(self, interval=0.50): + """Handle all fam work.""" + count = 0 + collapsed = 0 + rawevents = [] + start = time() + now = time() + while (time() - now) < interval: + if self.fm.pending(): + while self.fm.pending(): + count += 1 + rawevents.append(self.fm.nextEvent()) + now = time() + unique = [] + bookkeeping = [] + for event in rawevents: + if self.should_ignore(event): + continue + if event.code2str() != 'changed': + # process all non-change events + unique.append(event) + else: + if (event.filename, event.requestID) not in bookkeeping: + bookkeeping.append((event.filename, event.requestID)) + unique.append(event) + else: + collapsed += 1 + for event in unique: + if event.requestID in self.users: + try: + self.users[event.requestID].HandleEvent(event) + except: + logger.error("Handling event for file %s" % event.filename, + exc_info=1) + end = time() + logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" % + (count, (end - start), collapsed)) + return count diff --git a/src/lib/Bcfg2/Server/FileMonitor/Gamin.py b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py new file mode 100644 index 000000000..60f80c9c3 --- /dev/null +++ b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py @@ -0,0 +1,64 @@ +""" Gamin driver for file alteration events """ + +import os +import stat +import logging +from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \ + GAMChanged, GAMDeleted +from Bcfg2.Server.FileMonitor import Event, FileMonitor + +logger = logging.getLogger(__name__) + +class GaminEvent(Event): + """ + This class provides an event analogous to + python-fam events based on gamin sources. + """ + action_map = {GAMCreated: 'created', GAMExists: 'exists', + GAMChanged: 'changed', GAMDeleted: 'deleted', + GAMEndExist: 'endExist'} + + def __init__(self, request_id, filename, code): + Event.__init__(self, request_id, filename, code) + if code in self.action_map: + self.action = self.action_map[code] + +class Gamin(FileMonitor): + __priority__ = 10 + + def __init__(self, ignore=None, debug=False): + FileMonitor.__init__(self, ignore=ignore, debug=debug) + self.mon = WatchMonitor() + self.counter = 0 + + def fileno(self): + return self.mon.get_fd() + + def queue(self, path, action, request_id): + """queue up the event for later handling""" + self.events.append(GaminEvent(request_id, path, action)) + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.""" + handle = self.counter + self.counter += 1 + mode = os.stat(path)[stat.ST_MODE] + + # Flush queued gamin events + while self.mon.event_pending(): + self.mon.handle_one_event() + + if stat.S_ISDIR(mode): + self.mon.watch_directory(path, self.queue, handle) + else: + self.mon.watch_file(path, self.queue, handle) + self.handles[handle] = obj + return handle + + def pending(self): + return FileMonitor.pending(self) or self.mon.event_pending() + + def get_event(self): + if self.mon.event_pending(): + self.mon.handle_one_event() + return FileMonitor.get_event(self) diff --git a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py new file mode 100644 index 000000000..880ac7e8d --- /dev/null +++ b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py @@ -0,0 +1,126 @@ +""" Inotify driver for file alteration events """ + +import logging +import operator +import os +import pyinotify +import sys +from Bcfg2.Bcfg2Py3k import reduce +from Bcfg2.Server.FileMonitor import Event +from Bcfg2.Server.FileMonitor.Pseudo import Pseudo + +logger = logging.getLogger(__name__) + +class Inotify(Pseudo, pyinotify.ProcessEvent): + __priority__ = 1 + action_map = {pyinotify.IN_CREATE: 'created', + pyinotify.IN_DELETE: 'deleted', + pyinotify.IN_MODIFY: 'changed', + pyinotify.IN_MOVED_FROM: 'deleted', + pyinotify.IN_MOVED_TO: 'created'} + mask = reduce(lambda x, y: x | y, action_map.keys()) + + def __init__(self, ignore=None, debug=False): + Pseudo.__init__(self, ignore=ignore, debug=debug) + self.wm = pyinotify.WatchManager() + self.notifier = pyinotify.ThreadedNotifier(self.wm, self) + self.notifier.start() + self.event_filter = dict() + self.watches_by_path = dict() + + def fileno(self): + return self.wm.get_fd() + + def process_default(self, ievent): + action = ievent.maskname + for amask, aname in self.action_map.items(): + if ievent.mask & amask: + action = aname + break + try: + watch = self.wm.watches[ievent.wd] + except KeyError: + err = sys.exc_info()[1] + logger.error("Error handling event for %s: Watch %s not found" % + (ievent.pathname, ievent.wd)) + return + # FAM-style file monitors return the full path to the parent + # directory that is being watched, relative paths to anything + # contained within the directory. since we can't use inotify + # to watch files directly, we have to sort of guess at whether + # this watch was actually added on a file (and thus is in + # self.event_filter because we're filtering out other events + # on the directory) or was added directly on a directory. + if (watch.path == ievent.pathname or ievent.wd in self.event_filter): + path = ievent.pathname + else: + # relative path + path = os.path.basename(ievent.pathname) + # figure out the handleID. start with the path of the event; + # that should catch events on files that are watched directly. + # (we have to watch the directory that a file is in, so this + # lets us handle events on different files in the same + # directory -- and thus under the same watch -- with different + # objects.) If the path to the event doesn't have a handler, + # use the path of the watch itself. + handleID = ievent.pathname + if handleID not in self.handles: + handleID = watch.path + evt = Event(handleID, path, action) + + if (ievent.wd not in self.event_filter or + ievent.pathname in self.event_filter[ievent.wd]): + self.events.append(evt) + + def AddMonitor(self, path, obj): + # strip trailing slashes + path = path.rstrip("/") + if not os.path.isdir(path): + # inotify is a little wonky about watching files. for + # instance, if you watch /tmp/foo, and then do 'mv + # /tmp/bar /tmp/foo', it processes that as a deletion of + # /tmp/foo (which it technically _is_, but that's rather + # useless -- we care that /tmp/foo changed, not that it + # was first deleted and then created). In order to + # effectively watch a file, we have to watch the directory + # it's in, and filter out events for other files in the + # same directory that are not similarly watched. + # watch_transient_file requires a Processor _class_, not + # an object, so we can't have this object handle events, + # which is Wrong, so we can't use that function. + watch_path = os.path.dirname(path) + is_dir = False + else: + watch_path = path + is_dir = True + + # see if this path is already being watched + try: + wd = self.watches_by_path[watch_path] + except KeyError: + wd = self.wm.add_watch(watch_path, self.mask, + quiet=False)[watch_path] + self.watches_by_path[watch_path] = wd + + produce_exists = True + if not is_dir: + if wd not in self.event_filter: + self.event_filter[wd] = [path] + elif path not in self.event_filter[wd]: + self.event_filter[wd].append(path) + else: + # we've been asked to watch a file that we're already + # watching, so we don't need to produce 'exists' + # events + produce_exists = False + + # inotify doesn't produce initial 'exists' events, so we + # inherit from Pseudo to produce those + if produce_exists: + return Pseudo.AddMonitor(self, path, obj, handleID=path) + else: + self.handles[path] = obj + return path + + def shutdown(self): + self.notifier.stop() diff --git a/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py new file mode 100644 index 000000000..089d4cf0f --- /dev/null +++ b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py @@ -0,0 +1,25 @@ +""" Pseudo provides static monitor support for file alteration events """ + +import os +import logging +from Bcfg2.Server.FileMonitor import FileMonitor, Event + +logger = logging.getLogger(__name__) + +class Pseudo(FileMonitor): + __priority__ = 99 + + def AddMonitor(self, path, obj, handleID=None): + """add a monitor to path, installing a callback to obj.HandleEvent""" + if handleID is None: + handleID = len(list(self.handles.keys())) + self.events.append(Event(handleID, path, 'exists')) + if os.path.isdir(path): + dirList = os.listdir(path) + for includedFile in dirList: + self.events.append(Event(handleID, includedFile, 'exists')) + self.events.append(Event(handleID, path, 'endExist')) + + if obj != None: + self.handles[handleID] = obj + return handleID diff --git a/src/lib/Bcfg2/Server/FileMonitor/__init__.py b/src/lib/Bcfg2/Server/FileMonitor/__init__.py new file mode 100644 index 000000000..c490acc81 --- /dev/null +++ b/src/lib/Bcfg2/Server/FileMonitor/__init__.py @@ -0,0 +1,143 @@ +"""Bcfg2.Server.FileMonitor provides the support for monitoring files.""" + +import os +import sys +import fnmatch +import logging +import pkgutil +from time import sleep, time + +logger = logging.getLogger(__name__) + +class Event(object): + def __init__(self, request_id, filename, code): + self.requestID = request_id + self.filename = filename + self.action = code + + def code2str(self): + """return static code for event""" + return self.action + + def __str__(self): + return "%s: %s %s" % (self.__class__.__name__, + self.filename, self.action) + + def __repr__(self): + return "%s (request ID %s)" % (str(self), self.requestID) + + +class FileMonitor(object): + """File Monitor baseclass.""" + def __init__(self, ignore=None, debug=False): + object.__init__(self) + self.debug = debug + self.handles = dict() + self.events = [] + if ignore is None: + ignore = [] + self.ignore = ignore + + def __str__(self): + return "%s: %s" % (__name__, self.__class__.__name__) + + def __repr__(self): + return "%s (%s events, fd %s)" % (str(self), len(self.events), self.fileno) + + def debug_log(self, msg): + if self.debug: + logger.info(msg) + + def should_ignore(self, event): + for pattern in self.ignore: + if (fnmatch.fnmatch(event.filename, pattern) or + fnmatch.fnmatch(os.path.split(event.filename)[-1], pattern)): + self.debug_log("Ignoring %s" % event) + return True + return False + + def pending(self): + return bool(self.events) + + def get_event(self): + return self.events.pop(0) + + def fileno(self): + return 0 + + def handle_one_event(self, event): + if self.should_ignore(event): + return + if event.requestID not in self.handles: + logger.info("Got event for unexpected id %s, file %s" % + (event.requestID, event.filename)) + return + self.debug_log("Dispatching event %s %s to obj %s" % + (event.code2str(), event.filename, + self.handles[event.requestID])) + try: + self.handles[event.requestID].HandleEvent(event) + except: + err = sys.exc_info()[1] + logger.error("Error in handling of event %s for %s: %s" % + (event.code2str(), event.filename, err)) + + def handle_event_set(self, lock=None): + count = 1 + event = self.get_event() + start = time() + if lock: + lock.acquire() + try: + self.handle_one_event(event) + while self.pending(): + self.handle_one_event(self.get_event()) + count += 1 + except: + pass + if lock: + lock.release() + end = time() + logger.info("Handled %d events in %.03fs" % (count, (end - start))) + + def handle_events_in_interval(self, interval): + end = time() + interval + while time() < end: + if self.pending(): + self.handle_event_set() + end = time() + interval + else: + sleep(0.5) + + def shutdown(self): + pass + + +available = dict() + +# todo: loading the monitor drivers should be automatic +from Bcfg2.Server.FileMonitor.Pseudo import Pseudo +available['pseudo'] = Pseudo + +try: + from Bcfg2.Server.FileMonitor.Fam import Fam + available['fam'] = Fam +except ImportError: + pass + +try: + from Bcfg2.Server.FileMonitor.Gamin import Gamin + available['gamin'] = Gamin +except ImportError: + pass + +try: + from Bcfg2.Server.FileMonitor.Inotify import Inotify + available['inotify'] = Inotify +except ImportError: + pass + +for fdrv in sorted(available.keys(), key=lambda k: available[k].__priority__): + if fdrv in available: + available['default'] = available[fdrv] + break diff --git a/src/lib/Bcfg2/Server/Hostbase/backends.py b/src/lib/Bcfg2/Server/Hostbase/backends.py index ecaf3c109..cfa9e1e16 100644 --- a/src/lib/Bcfg2/Server/Hostbase/backends.py +++ b/src/lib/Bcfg2/Server/Hostbase/backends.py @@ -18,21 +18,16 @@ from nisauth import * ## uid=l.badge_no ## ) ## #fixme: need to add this user session obj to session -## #print str(ldap_user) ## user,created = User.objects.get_or_create(username=username) -## #print user -## #print "created " + str(created) ## return user ## except LDAPAUTHError,e: -## #print str(e) ## return None ## def get_user(self,user_id): ## try: ## return User.objects.get(pk=user_id) ## except User.DoesNotExist, e: -## print str(e) ## return None diff --git a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py index f3db26f67..fc2ca1bf1 100644 --- a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py +++ b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py @@ -144,7 +144,6 @@ class ldapauth(object): def member_of(self): """See if this user is in our group that is allowed to login""" m = [g for g in self.memberOf if g == self.check_member_of] - #print m if len(m) == 1: return True else: diff --git a/src/lib/Bcfg2/Server/Lint/Bundles.py b/src/lib/Bcfg2/Server/Lint/Bundles.py deleted file mode 100644 index e6b6307f2..000000000 --- a/src/lib/Bcfg2/Server/Lint/Bundles.py +++ /dev/null @@ -1,54 +0,0 @@ -import lxml.etree -import Bcfg2.Server.Lint - -class Bundles(Bcfg2.Server.Lint.ServerPlugin): - """ Perform various bundle checks """ - def Run(self): - """ run plugin """ - if 'Bundler' in self.core.plugins: - self.missing_bundles() - for bundle in self.core.plugins['Bundler'].entries.values(): - if self.HandlesFile(bundle.name): - if (not Bcfg2.Server.Plugins.Bundler.have_genshi or - type(bundle) is not - Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile): - self.bundle_names(bundle) - - @classmethod - def Errors(cls): - return {"bundle-not-found":"error", - "inconsistent-bundle-name":"warning"} - - def missing_bundles(self): - """ find bundles listed in Metadata but not implemented in Bundler """ - if self.files is None: - # when given a list of files on stdin, this check is - # useless, so skip it - groupdata = self.metadata.groups_xml.xdata - ref_bundles = set([b.get("name") - for b in groupdata.findall("//Bundle")]) - - allbundles = self.core.plugins['Bundler'].entries.keys() - for bundle in ref_bundles: - xmlbundle = "%s.xml" % bundle - genshibundle = "%s.genshi" % bundle - if (xmlbundle not in allbundles and - genshibundle not in allbundles): - self.LintError("bundle-not-found", - "Bundle %s referenced, but does not exist" % - bundle) - - def bundle_names(self, bundle): - """ verify bundle name attribute matches filename """ - try: - xdata = lxml.etree.XML(bundle.data) - except AttributeError: - # genshi template - xdata = lxml.etree.parse(bundle.template.filepath).getroot() - - fname = bundle.name.split('Bundler/')[1].split('.')[0] - bname = xdata.get('name') - if fname != bname: - self.LintError("inconsistent-bundle-name", - "Inconsistent bundle name: filename is %s, bundle name is %s" % - (fname, bname)) diff --git a/src/lib/Bcfg2/Server/Lint/Comments.py b/src/lib/Bcfg2/Server/Lint/Comments.py index f5d0e265f..59d18fc57 100644 --- a/src/lib/Bcfg2/Server/Lint/Comments.py +++ b/src/lib/Bcfg2/Server/Lint/Comments.py @@ -1,6 +1,7 @@ -import os.path +import os import lxml.etree import Bcfg2.Server.Lint +from Bcfg2.Server import XI, XI_NAMESPACE from Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator import CfgPlaintextGenerator from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator @@ -186,7 +187,7 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin): path = os.path.join(self.metadata.data, mfile) if path in self.files: xdata = lxml.etree.parse(path) - for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'): + for el in xdata.findall('./%sinclude' % XI_NAMESPACE): if not self.has_all_xincludes(el.get('href')): self.LintError("broken-xinclude-chain", "Broken XInclude chain: could not include %s" % path) diff --git a/src/lib/Bcfg2/Server/Lint/Deltas.py b/src/lib/Bcfg2/Server/Lint/Deltas.py deleted file mode 100644 index 114f2e348..000000000 --- a/src/lib/Bcfg2/Server/Lint/Deltas.py +++ /dev/null @@ -1,25 +0,0 @@ -import Bcfg2.Server.Lint -from Bcfg2.Server.Plugins.Cfg import CfgFilter - -class Deltas(Bcfg2.Server.Lint.ServerPlugin): - """ Warn about usage of .cat and .diff files """ - - def Run(self): - """ run plugin """ - if 'Cfg' in self.core.plugins: - cfg = self.core.plugins['Cfg'] - for basename, entry in list(cfg.entries.items()): - self.check_entry(basename, entry) - - @classmethod - def Errors(cls): - return {"cat-file-used":"warning", - "diff-file-used":"warning"} - - def check_entry(self, basename, entry): - for fname, processor in entry.entries.items(): - if self.HandlesFile(fname) and isinstance(processor, CfgFilter): - extension = fname.split(".")[-1] - self.LintError("%s-file-used" % extension, - "%s file used on %s: %s" % - (extension, basename, fname)) diff --git a/src/lib/Bcfg2/Server/Lint/Duplicates.py b/src/lib/Bcfg2/Server/Lint/Duplicates.py index ee6b7a2e6..60a02ffb9 100644 --- a/src/lib/Bcfg2/Server/Lint/Duplicates.py +++ b/src/lib/Bcfg2/Server/Lint/Duplicates.py @@ -1,6 +1,7 @@ -import os.path +import os import lxml.etree import Bcfg2.Server.Lint +from Bcfg2.Server import XI, XI_NAMESPACE class Duplicates(Bcfg2.Server.Lint.ServerPlugin): """ Find duplicate clients, groups, etc. """ @@ -80,7 +81,7 @@ class Duplicates(Bcfg2.Server.Lint.ServerPlugin): path = os.path.join(self.metadata.data, mfile) if path in self.files: xdata = lxml.etree.parse(path) - for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'): + for el in xdata.findall('./%sinclude' % XI_NAMESPACE): if not self.has_all_xincludes(el.get('href')): self.LintError("broken-xinclude-chain", "Broken XInclude chain: could not include %s" % path) diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py index b6007161e..74142b446 100755 --- a/src/lib/Bcfg2/Server/Lint/Genshi.py +++ b/src/lib/Bcfg2/Server/Lint/Genshi.py @@ -1,3 +1,4 @@ +import sys import genshi.template import Bcfg2.Server.Lint diff --git a/src/lib/Bcfg2/Server/Lint/GroupNames.py b/src/lib/Bcfg2/Server/Lint/GroupNames.py new file mode 100644 index 000000000..5df98a30e --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/GroupNames.py @@ -0,0 +1,78 @@ +import os +import re +import Bcfg2.Server.Lint +try: + from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile + has_genshi = True +except ImportError: + has_genshi = False + +class GroupNames(Bcfg2.Server.Lint.ServerPlugin): + """ ensure that all named groups are valid group names """ + pattern = r'\S+$' + valid = re.compile(r'^' + pattern) + + def Run(self): + self.check_metadata() + if 'Rules' in self.core.plugins: + self.check_rules() + if 'Bundler' in self.core.plugins: + self.check_bundles() + if 'GroupPatterns' in self.core.plugins: + self.check_grouppatterns() + if 'Cfg' in self.core.plugins: + self.check_cfg() + + @classmethod + def Errors(cls): + return {"invalid-group-name": "error"} + + def check_rules(self): + for rules in self.core.plugins['Rules'].entries.values(): + if not self.HandlesFile(rules.name): + continue + xdata = rules.pnode.data + self.check_entries(xdata.xpath("//Group"), + os.path.join(self.config['repo'], rules.name)) + + def check_bundles(self): + """ check bundles for BoundPath entries with missing attrs """ + for bundle in self.core.plugins['Bundler'].entries.values(): + if (self.HandlesFile(bundle.name) and + (not has_genshi or + not isinstance(bundle, BundleTemplateFile))): + self.check_entries(bundle.xdata.xpath("//Group"), + bundle.name) + + def check_metadata(self): + self.check_entries(self.metadata.groups_xml.xdata.xpath("//Group"), + os.path.join(self.config['repo'], + self.metadata.groups_xml.name)) + + def check_grouppatterns(self): + cfg = self.core.plugins['GroupPatterns'].config + if not self.HandlesFile(cfg.name): + return + for grp in cfg.xdata.xpath('//GroupPattern/Group'): + if not self.valid.search(grp.text): + self.LintError("invalid-group-name", + "Invalid group name in %s: %s" % + (cfg.name, self.RenderXML(grp, keep_text=True))) + + def check_cfg(self): + for root, dirs, files in os.walk(self.core.plugins['Cfg'].data): + for fname in files: + basename = os.path.basename(root) + if (re.search(r'^%s\.G\d\d_' % basename, fname) and + not re.search(r'^%s\.G\d\d_' % basename + self.pattern, + fname)): + self.LintError("invalid-group-name", + "Invalid group name referenced in %s" % + os.path.join(root, fname)) + + def check_entries(self, entries, fname): + for grp in entries: + if not self.valid.search(grp.get("name")): + self.LintError("invalid-group-name", + "Invalid group name in %s: %s" % + (fname, self.RenderXML(grp))) diff --git a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py b/src/lib/Bcfg2/Server/Lint/GroupPatterns.py deleted file mode 100644 index 431ba4056..000000000 --- a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py +++ /dev/null @@ -1,35 +0,0 @@ -import sys -import Bcfg2.Server.Lint -from Bcfg2.Server.Plugins.GroupPatterns import PatternMap - -class GroupPatterns(Bcfg2.Server.Lint.ServerPlugin): - """ Check Genshi templates for syntax errors """ - - def Run(self): - """ run plugin """ - if 'GroupPatterns' in self.core.plugins: - cfg = self.core.plugins['GroupPatterns'].config - for entry in cfg.xdata.xpath('//GroupPattern'): - groups = [g.text for g in entry.findall('Group')] - self.check(entry, groups, ptype='NamePattern') - self.check(entry, groups, ptype='NameRange') - - @classmethod - def Errors(cls): - return {"pattern-fails-to-initialize":"error"} - - def check(self, entry, groups, ptype="NamePattern"): - if ptype == "NamePattern": - pmap = lambda p: PatternMap(p, None, groups) - else: - pmap = lambda p: PatternMap(None, p, groups) - - for el in entry.findall(ptype): - pat = el.text - try: - pmap(pat) - except: - err = sys.exc_info()[1] - self.LintError("pattern-fails-to-initialize", - "Failed to initialize %s %s for %s: %s" % - (ptype, pat, entry.get('pattern'), err)) diff --git a/src/lib/Bcfg2/Server/Lint/InfoXML.py b/src/lib/Bcfg2/Server/Lint/InfoXML.py index db6aeea73..5e4e21e18 100644 --- a/src/lib/Bcfg2/Server/Lint/InfoXML.py +++ b/src/lib/Bcfg2/Server/Lint/InfoXML.py @@ -1,28 +1,41 @@ -import os.path +import os import Bcfg2.Options import Bcfg2.Server.Lint from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML +from Bcfg2.Server.Plugins.Cfg.CfgLegacyInfo import CfgLegacyInfo class InfoXML(Bcfg2.Server.Lint.ServerPlugin): """ ensure that all config files have an info.xml file""" def Run(self): - if 'Cfg' in self.core.plugins: - for filename, entryset in self.core.plugins['Cfg'].entries.items(): - infoxml_fname = os.path.join(entryset.path, "info.xml") - if self.HandlesFile(infoxml_fname): - found = False - for entry in entryset.entries.values(): - if isinstance(entry, CfgInfoXML): - self.check_infoxml(infoxml_fname, - entry.infoxml.pnode.data) - found = True - if not found: - self.LintError("no-infoxml", - "No info.xml found for %s" % filename) + if 'Cfg' not in self.core.plugins: + return + + for filename, entryset in self.core.plugins['Cfg'].entries.items(): + infoxml_fname = os.path.join(entryset.path, "info.xml") + if self.HandlesFile(infoxml_fname): + found = False + for entry in entryset.entries.values(): + if isinstance(entry, CfgInfoXML): + self.check_infoxml(infoxml_fname, + entry.infoxml.pnode.data) + found = True + if not found: + self.LintError("no-infoxml", + "No info.xml found for %s" % filename) + + for entry in entryset.entries.values(): + if isinstance(entry, CfgLegacyInfo): + if not self.HandlesFile(entry.path): + continue + self.LintError("deprecated-info-file", + "Deprecated %s file found at %s" % + (os.path.basename(entry.name), + entry.path)) @classmethod def Errors(cls): return {"no-infoxml":"warning", + "deprecated-info-file":"warning", "paranoid-false":"warning", "broken-xinclude-chain":"warning", "required-infoxml-attrs-missing":"error"} diff --git a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py deleted file mode 100644 index ceb46238a..000000000 --- a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py +++ /dev/null @@ -1,38 +0,0 @@ -import glob -import lxml.etree -import Bcfg2.Server.Lint - -class Pkgmgr(Bcfg2.Server.Lint.ServerlessPlugin): - """ find duplicate Pkgmgr entries with the same priority """ - def Run(self): - pset = set() - for pfile in glob.glob("%s/Pkgmgr/*.xml" % self.config['repo']): - if self.HandlesFile(pfile): - xdata = lxml.etree.parse(pfile).getroot() - # get priority, type, group - priority = xdata.get('priority') - ptype = xdata.get('type') - for pkg in xdata.xpath("//Package"): - if pkg.getparent().tag == 'Group': - grp = pkg.getparent().get('name') - if (type(grp) is not str and - grp.getparent().tag == 'Group'): - pgrp = grp.getparent().get('name') - else: - pgrp = 'none' - else: - grp = 'none' - pgrp = 'none' - ptuple = (pkg.get('name'), priority, ptype, grp, pgrp) - # check if package is already listed with same - # priority, type, grp - if ptuple in pset: - self.LintError("duplicate-package", - "Duplicate Package %s, priority:%s, type:%s" % - (pkg.get('name'), priority, ptype)) - else: - pset.add(ptuple) - - @classmethod - def Errors(cls): - return {"duplicate-packages":"error"} diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py index 6f76cf2db..fcb7c6c28 100644 --- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py +++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py @@ -1,32 +1,105 @@ -import os.path +import os +import re import lxml.etree import Bcfg2.Server.Lint +import Bcfg2.Client.Tools.POSIX +import Bcfg2.Client.Tools.VCS from Bcfg2.Server.Plugins.Packages import Apt, Yum +try: + from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile + has_genshi = True +except ImportError: + has_genshi = False + +# format verifying functions +def is_filename(val): + return val.startswith("/") and len(val) > 1 + +def is_selinux_type(val): + return re.match(r'^[a-z_]+_t', val) + +def is_selinux_user(val): + return re.match(r'^[a-z_]+_u', val) + +def is_octal_mode(val): + return re.match(r'[0-7]{3,4}', val) + +def is_username(val): + return re.match(r'^([a-z]\w{0,30}|\d+)$', val) + +def is_device_mode(val): + try: + # checking upper bound seems like a good way to discover some + # obscure OS with >8-bit device numbers + return int(val) > 0 + except: + return False class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): """ verify attributes for configuration entries (as defined in doc/server/configurationentries) """ def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs) - self.required_attrs = { - 'Path': { - 'device': ['name', 'owner', 'group', 'dev_type'], - 'directory': ['name', 'owner', 'group', 'perms'], - 'file': ['name', 'owner', 'group', 'perms', '__text__'], - 'hardlink': ['name', 'to'], - 'symlink': ['name', 'to'], - 'ignore': ['name'], - 'nonexistent': ['name'], - 'permissions': ['name', 'owner', 'group', 'perms'], - 'vcs': ['vcstype', 'revision', 'sourceurl']}, - 'Service': { - 'chkconfig': ['name'], - 'deb': ['name'], - 'rc-update': ['name'], - 'smf': ['name', 'FMRI'], - 'upstart': ['name']}, - 'Action': ['name', 'timing', 'when', 'status', 'command'], - 'Package': ['name']} + self.required_attrs = dict( + Path=dict( + device=dict(name=is_filename, owner=is_username, + group=is_username, + dev_type=lambda v: \ + v in Bcfg2.Client.Tools.POSIX.device_map), + directory=dict(name=is_filename, owner=is_username, + group=is_username, perms=is_octal_mode), + file=dict(name=is_filename, owner=is_username, + group=is_username, perms=is_octal_mode, + __text__=None), + hardlink=dict(name=is_filename, to=is_filename), + symlink=dict(name=is_filename, to=is_filename), + ignore=dict(name=is_filename), + nonexistent=dict(name=is_filename), + permissions=dict(name=is_filename, owner=is_username, + group=is_username, perms=is_octal_mode), + vcs=dict(vcstype=lambda v: (v != 'Path' and + hasattr(Bcfg2.Client.Tools.VCS, + "Install%s" % v)), + revision=None, sourceurl=None)), + Service={ + "chkconfig": dict(name=None), + "deb": dict(name=None), + "rc-update": dict(name=None), + "smf": dict(name=None, FMRI=None), + "upstart": dict(name=None)}, + Action={None: dict(name=None, + timing=lambda v: v in ['pre', 'post', 'both'], + when=lambda v: v in ['modified', 'always'], + status=lambda v: v in ['ignore', 'check'], + command=None)}, + ACL=dict( + default=dict(scope=lambda v: v in ['user', 'group'], + perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', + v)), + access=dict(scope=lambda v: v in ['user', 'group'], + perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', + v)), + mask=dict(perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', v))), + Package={None: dict(name=None)}, + SELinux=dict( + boolean=dict(name=None, + value=lambda v: v in ['on', 'off']), + module=dict(name=None, __text__=None), + port=dict(name=lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)', v), + selinuxtype=is_selinux_type), + fcontext=dict(name=None, selinuxtype=is_selinux_type), + node=dict(name=lambda v: "/" in v, + selinuxtype=is_selinux_type, + proto=lambda v: v in ['ipv6', 'ipv4']), + login=dict(name=is_username, + selinuxuser=is_selinux_user), + user=dict(name=is_selinux_user, + roles=lambda v: all(is_selinux_user(u) + for u in " ".split(v)), + prefix=None), + interface=dict(name=None, selinuxtype=is_selinux_type), + permissive=dict(name=is_selinux_type)) + ) def Run(self): self.check_packages() @@ -42,9 +115,9 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): return {"unknown-entry-type":"error", "unknown-entry-tag":"error", "required-attrs-missing":"error", + "required-attr-format":"error", "extra-attrs":"warning"} - def check_packages(self): """ check package sources for Source entries with missing attrs """ if 'Packages' in self.core.plugins: @@ -85,13 +158,17 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): """ check bundles for BoundPath entries with missing attrs """ if 'Bundler' in self.core.plugins: for bundle in self.core.plugins['Bundler'].entries.values(): - try: - xdata = lxml.etree.XML(bundle.data) - except (lxml.etree.XMLSyntaxError, AttributeError): - xdata = lxml.etree.parse(bundle.template.filepath).getroot() + if (self.HandlesFile(bundle.name) and + (not has_genshi or + not isinstance(bundle, BundleTemplateFile))): + try: + xdata = lxml.etree.XML(bundle.data) + except (lxml.etree.XMLSyntaxError, AttributeError): + xdata = \ + lxml.etree.parse(bundle.template.filepath).getroot() - for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"): - self.check_entry(path, bundle.name) + for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"): + self.check_entry(path, bundle.name) def check_entry(self, entry, filename): """ generic entry check """ @@ -103,43 +180,55 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): if tag not in self.required_attrs: self.LintError("unknown-entry-tag", "Unknown entry tag '%s': %s" % - (entry.tag, self.RenderXML(entry))) + (tag, self.RenderXML(entry))) if isinstance(self.required_attrs[tag], dict): etype = entry.get('type') if etype in self.required_attrs[tag]: - required_attrs = set(self.required_attrs[tag][etype] + - ['type']) + required_attrs = self.required_attrs[tag][etype] else: self.LintError("unknown-entry-type", "Unknown %s type %s: %s" % (tag, etype, self.RenderXML(entry))) return else: - required_attrs = set(self.required_attrs[tag]) + required_attrs = self.required_attrs[tag] attrs = set(entry.attrib.keys()) if 'dev_type' in required_attrs: dev_type = entry.get('dev_type') if dev_type in ['block', 'char']: # check if major/minor are specified - required_attrs |= set(['major', 'minor']) + required_attrs['major'] = is_device_mode + required_attrs['minor'] = is_device_mode + + if tag == 'ACL' and 'scope' in required_attrs: + required_attrs[entry.get('scope')] = is_username if '__text__' in required_attrs: - required_attrs.remove('__text__') + del required_attrs['__text__'] if (not entry.text and not entry.get('empty', 'false').lower() == 'true'): self.LintError("required-attrs-missing", "Text missing for %s %s in %s: %s" % - (entry.tag, name, filename, + (tag, name, filename, self.RenderXML(entry))) - if not attrs.issuperset(required_attrs): + if not attrs.issuperset(required_attrs.keys()): self.LintError("required-attrs-missing", "The following required attribute(s) are " "missing for %s %s in %s: %s\n%s" % - (entry.tag, name, filename, + (tag, name, filename, ", ".join([attr for attr in - required_attrs.difference(attrs)]), + set(required_attrs.keys()).difference(attrs)]), self.RenderXML(entry))) + + for attr, fmt in required_attrs.items(): + if fmt and attr in attrs and not fmt(entry.attrib[attr]): + self.LintError("required-attr-format", + "The %s attribute of %s %s in %s is " + "malformed\n%s" % + (attr, tag, name, filename, + self.RenderXML(entry))) + diff --git a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py deleted file mode 100644 index be270a59c..000000000 --- a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py +++ /dev/null @@ -1,64 +0,0 @@ -import sys -import imp -import glob -import Bcfg2.Server.Lint -from Bcfg2.Server.Plugins.TemplateHelper import HelperModule - -class TemplateHelper(Bcfg2.Server.Lint.ServerlessPlugin): - """ find duplicate Pkgmgr entries with the same priority """ - def __init__(self, *args, **kwargs): - Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) - hm = HelperModule("foo.py", None, None) - self.reserved_keywords = dir(hm) - - def Run(self): - for helper in glob.glob("%s/TemplateHelper/*.py" % self.config['repo']): - if not self.HandlesFile(helper): - continue - - match = HelperModule._module_name_re.search(helper) - if match: - module_name = match.group(1) - else: - module_name = helper - - try: - module = imp.load_source(module_name, helper) - except: - err = sys.exc_info()[1] - self.LintError("templatehelper-import-error", - "Failed to import %s: %s" % - (helper, err)) - continue - - if not hasattr(module, "__export__"): - self.LintError("templatehelper-no-export", - "%s has no __export__ list" % helper) - continue - elif not isinstance(module.__export__, list): - self.LintError("templatehelper-nonlist-export", - "__export__ is not a list in %s" % helper) - continue - - for sym in module.__export__: - if not hasattr(module, sym): - self.LintError("templatehelper-nonexistent-export", - "%s: exported symbol %s does not exist" % - (helper, sym)) - elif sym in self.reserved_keywords: - self.LintError("templatehelper-reserved-export", - "%s: exported symbol %s is reserved" % - (helper, sym)) - elif sym.startswith("_"): - self.LintError("templatehelper-underscore-export", - "%s: exported symbol %s starts with underscore" % - (helper, sym)) - - @classmethod - def Errors(cls): - return {"templatehelper-import-error":"error", - "templatehelper-no-export":"error", - "templatehelper-nonlist-export":"error", - "templatehelper-nonexistent-export":"error", - "templatehelper-reserved-export":"error", - "templatehelper-underscore-export":"warning"} diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py index 05fedc313..b8bdb4755 100644 --- a/src/lib/Bcfg2/Server/Lint/Validate.py +++ b/src/lib/Bcfg2/Server/Lint/Validate.py @@ -1,10 +1,10 @@ -import fnmatch +import os +import sys import glob +import fnmatch import lxml.etree -import os from subprocess import Popen, PIPE, STDOUT -import sys - +from Bcfg2.Server import XI, XI_NAMESPACE import Bcfg2.Server.Lint class Validate(Bcfg2.Server.Lint.ServerlessPlugin): @@ -22,7 +22,6 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin): "%s/Rules/*.xml":"%s/rules.xsd", "%s/Defaults/*.xml":"%s/defaults.xsd", "%s/etc/report-configuration.xml":"%s/report-configuration.xsd", - "%s/Svcmgr/*.xml":"%s/services.xsd", "%s/Deps/*.xml":"%s/deps.xsd", "%s/Decisions/*.xml":"%s/decisions.xsd", "%s/Packages/sources.xml":"%s/packages.xsd", @@ -46,20 +45,10 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin): if filelist: # avoid loading schemas for empty file lists schemafile = schemaname % schemadir - try: - schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile)) - except IOError: - e = sys.exc_info()[1] - self.LintError("input-output-error", str(e)) - continue - except lxml.etree.XMLSchemaParseError: - e = sys.exc_info()[1] - self.LintError("schema-failed-to-parse", - "Failed to process schema %s: %s" % - (schemafile, e)) - continue - for filename in filelist: - self.validate(filename, schemafile, schema=schema) + schema = self._load_schema(schemafile) + if schema: + for filename in filelist: + self.validate(filename, schemafile, schema=schema) self.check_properties() @@ -88,11 +77,8 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin): return True on success, False on failure """ if schema is None: # if no schema object was provided, instantiate one - try: - schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile)) - except: - self.LintError("schema-failed-to-parse", - "Failed to process schema %s" % schemafile) + schema = self._load_schema(schemafile) + if not schema: return False try: @@ -187,24 +173,42 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin): def follow_xinclude(self, xfile): """ follow xincludes in the given file """ xdata = lxml.etree.parse(xfile) - included = set([ent.get('href') for ent in - xdata.findall('./{http://www.w3.org/2001/XInclude}include')]) + included = set([el + for el in xdata.findall('./%sinclude' % XI_NAMESPACE)]) rv = [] while included: try: - filename = included.pop() + el = included.pop() except KeyError: continue + filename = el.get("href") path = os.path.join(os.path.dirname(xfile), filename) - if self.HandlesFile(path): + if not os.path.exists(path): + if not el.findall('./%sfallback' % XI_NAMESPACE): + self.LintError("broken-xinclude-chain", + "XInclude %s does not exist in %s: %s" % + (filename, xfile, self.RenderXML(el))) + elif self.HandlesFile(path): rv.append(path) groupdata = lxml.etree.parse(path) [included.add(el.get('href')) for el in - groupdata.findall('./{http://www.w3.org/2001/XInclude}include')] + groupdata.findall('./%sinclude' % XI_NAMESPACE)] included.discard(filename) return rv + def _load_schema(self, filename): + try: + return lxml.etree.XMLSchema(lxml.etree.parse(filename)) + except IOError: + e = sys.exc_info()[1] + self.LintError("input-output-error", str(e)) + except lxml.etree.XMLSchemaParseError: + e = sys.exc_info()[1] + self.LintError("schema-failed-to-parse", + "Failed to process schema %s: %s" % + (filename, e)) + return None diff --git a/src/lib/Bcfg2/Server/Lint/__init__.py b/src/lib/Bcfg2/Server/Lint/__init__.py index 5d7dd707b..e3b4c8ea7 100644 --- a/src/lib/Bcfg2/Server/Lint/__init__.py +++ b/src/lib/Bcfg2/Server/Lint/__init__.py @@ -81,18 +81,20 @@ class Plugin (object): def LintError(self, err, msg): self.errorhandler.dispatch(err, msg) - def RenderXML(self, element): + def RenderXML(self, element, keep_text=False): """render an XML element for error output -- line number prefixed, no children""" xml = None if len(element) or element.text: el = copy(element) - if el.text: + if el.text and not keep_text: el.text = '...' [el.remove(c) for c in el.iterchildren()] - xml = lxml.etree.tostring(el).strip() + xml = lxml.etree.tostring(el, + xml_declaration=False).decode("UTF-8").strip() else: - xml = lxml.etree.tostring(element).strip() + xml = lxml.etree.tostring(element, + xml_declaration=False).decode("UTF-8").strip() return " line %s: %s" % (element.sourceline, xml) diff --git a/src/lib/Bcfg2/Server/Plugin.py b/src/lib/Bcfg2/Server/Plugin.py index ca37431a2..910bc0108 100644 --- a/src/lib/Bcfg2/Server/Plugin.py +++ b/src/lib/Bcfg2/Server/Plugin.py @@ -1,54 +1,60 @@ """This module provides the baseclass for Bcfg2 Server Plugins.""" -import copy -import logging -import lxml.etree import os -import pickle -import posixpath import re import sys +import copy +import logging +import operator import threading -from Bcfg2.Bcfg2Py3k import ConfigParser - -from lxml.etree import XML, XMLSyntaxError - +import lxml.etree +import Bcfg2.Server import Bcfg2.Options +from Bcfg2.Bcfg2Py3k import ConfigParser, CmpMixin, reduce, Queue, Empty, \ + Full, cPickle -# py3k compatibility -if sys.hexversion >= 0x03000000: - from functools import reduce - from io import FileIO as BUILTIN_FILE_TYPE -else: - BUILTIN_FILE_TYPE = file -from Bcfg2.Bcfg2Py3k import Queue -from Bcfg2.Bcfg2Py3k import Empty -from Bcfg2.Bcfg2Py3k import Full +try: + import django + has_django = True +except ImportError: + has_django = False # grab default metadata info from bcfg2.conf opts = {'owner': Bcfg2.Options.MDATA_OWNER, 'group': Bcfg2.Options.MDATA_GROUP, - 'important': Bcfg2.Options.MDATA_IMPORTANT, 'perms': Bcfg2.Options.MDATA_PERMS, + 'secontext': Bcfg2.Options.MDATA_SECONTEXT, + 'important': Bcfg2.Options.MDATA_IMPORTANT, 'paranoid': Bcfg2.Options.MDATA_PARANOID, 'sensitive': Bcfg2.Options.MDATA_SENSITIVE} -mdata_setup = Bcfg2.Options.OptionParser(opts) -mdata_setup.parse([]) -del mdata_setup['args'] +default_file_metadata = Bcfg2.Options.OptionParser(opts) +default_file_metadata.parse([]) +del default_file_metadata['args'] logger = logging.getLogger('Bcfg2.Server.Plugin') -default_file_metadata = mdata_setup - -info_regex = re.compile( \ - 'encoding:(\s)*(?P<encoding>\w+)|' + - 'group:(\s)*(?P<group>\S+)|' + - 'important:(\s)*(?P<important>\S+)|' + - 'mtime:(\s)*(?P<mtime>\w+)|' + - 'owner:(\s)*(?P<owner>\S+)|' + - 'paranoid:(\s)*(?P<paranoid>\S+)|' + - 'perms:(\s)*(?P<perms>\w+)|' + - 'sensitive:(\s)*(?P<sensitive>\S+)|') +info_regex = re.compile('owner:(\s)*(?P<owner>\S+)|' + + 'group:(\s)*(?P<group>\S+)|' + + 'perms:(\s)*(?P<perms>\w+)|' + + 'secontext:(\s)*(?P<secontext>\S+)|' + + 'paranoid:(\s)*(?P<paranoid>\S+)|' + + 'sensitive:(\s)*(?P<sensitive>\S+)|' + + 'encoding:(\s)*(?P<encoding>\S+)|' + + 'important:(\s)*(?P<important>\S+)|' + + 'mtime:(\s)*(?P<mtime>\w+)|') + +def bind_info(entry, metadata, infoxml=None, default=default_file_metadata): + for attr, val in list(default.items()): + entry.set(attr, val) + if infoxml: + mdata = dict() + infoxml.pnode.Match(metadata, mdata, entry=entry) + if 'Info' not in mdata: + msg = "Failed to set metadata for file %s" % entry.get('name') + logger.error(msg) + raise PluginExecutionError(msg) + for attr, val in list(mdata['Info'][None].items()): + entry.set(attr, val) class PluginInitError(Exception): @@ -61,6 +67,18 @@ class PluginExecutionError(Exception): pass +class MetadataConsistencyError(Exception): + """This error gets raised when metadata is internally inconsistent.""" + pass + + +class MetadataRuntimeError(Exception): + """This error is raised when the metadata engine + is called prior to reading enough data. + """ + pass + + class Debuggable(object): __rmi__ = ['toggle_debug'] @@ -73,6 +91,10 @@ class Debuggable(object): def toggle_debug(self): self.debug_flag = not self.debug_flag + self.debug_log("%s: debug_flag = %s" % (self.__class__.__name__, + self.debug_flag), + flag=True) + return self.debug_flag def debug_log(self, message, flag=None): if (flag is None and self.debug_flag) or flag: @@ -116,8 +138,7 @@ class Plugin(Debuggable): @classmethod def init_repo(cls, repo): - path = "%s/%s" % (repo, cls.name) - os.makedirs(path) + os.makedirs(os.path.join(repo, cls.name)) def shutdown(self): self.running = False @@ -126,6 +147,26 @@ class Plugin(Debuggable): return "%s Plugin" % self.__class__.__name__ +class DatabaseBacked(Plugin): + @property + def _use_db(self): + use_db = self.core.setup.cfp.getboolean(self.name.lower(), + "use_database", + default=False) + if use_db and has_django and self.core.database_available: + return True + elif not use_db: + return False + else: + self.logger.error("use_database is true but django not found") + return False + + +class PluginDatabaseModel(object): + class Meta: + app_label = "Server" + + class Generator(object): """Generator plugins contribute to literal client configurations.""" def HandlesEntry(self, entry, metadata): @@ -134,19 +175,19 @@ class Generator(object): def HandleEntry(self, entry, metadata): """This is the slow-path handler for configuration entry binding.""" - raise PluginExecutionError + return entry class Structure(object): """Structure Plugins contribute to abstract client configurations.""" def BuildStructures(self, metadata): """Return a list of abstract goal structures for client.""" - raise PluginExecutionError + raise NotImplementedError class Metadata(object): """Signal metadata capabilities for this plugin""" - def add_client(self, client_name, attribs): + def add_client(self, client_name): """Add client.""" pass @@ -158,11 +199,17 @@ class Metadata(object): """Create viz str for viz admin mode.""" pass + def _handle_default_event(self, event): + pass + def get_initial_metadata(self, client_name): - raise PluginExecutionError + raise NotImplementedError - def merge_additional_data(self, imd, source, groups, data): - raise PluginExecutionError + def merge_additional_data(self, imd, source, data): + raise NotImplementedError + + def merge_additional_groups(self, imd, groups): + raise NotImplementedError class Connector(object): @@ -187,23 +234,23 @@ class Probing(object): pass -class Statistics(object): +class Statistics(Plugin): """Signal statistics handling capability.""" def process_statistics(self, client, xdata): pass -class ThreadedStatistics(Statistics, - threading.Thread): +class ThreadedStatistics(Statistics, threading.Thread): """Threaded statistics handling capability.""" def __init__(self, core, datastore): - Statistics.__init__(self) + Statistics.__init__(self, core, datastore) threading.Thread.__init__(self) # Event from the core signaling an exit self.terminate = core.terminate self.work_queue = Queue(100000) - self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__) - self.daemon = True + self.pending_file = os.path.join(datastore, "etc", + "%s.pending" % self.name) + self.daemon = False self.start() def save(self): @@ -213,32 +260,38 @@ class ThreadedStatistics(Statistics, while not self.work_queue.empty(): (metadata, data) = self.work_queue.get_nowait() try: - pending_data.append((metadata.hostname, lxml.etree.tostring(data))) + pending_data.append((metadata.hostname, + lxml.etree.tostring(data, + xml_declaration=False).decode("UTF-8"))) except: - self.logger.warning("Dropping interaction for %s" % metadata.hostname) + err = sys.exc_info()[1] + self.logger.warning("Dropping interaction for %s: %s" % + (metadata.hostname, err)) except Empty: pass try: savefile = open(self.pending_file, 'w') - pickle.dump(pending_data, savefile) + cPickle.dump(pending_data, savefile) savefile.close() - self.logger.info("Saved pending %s data" % self.__class__.__name__) + self.logger.info("Saved pending %s data" % self.name) except: - self.logger.warning("Failed to save pending data") + err = sys.exc_info()[1] + self.logger.warning("Failed to save pending data: %s" % err) def load(self): - """Load any pending data to a file.""" + """Load any pending data from a file.""" if not os.path.exists(self.pending_file): return True pending_data = [] try: savefile = open(self.pending_file, 'r') - pending_data = pickle.load(savefile) + pending_data = cPickle.load(savefile) savefile.close() except Exception: e = sys.exc_info()[1] self.logger.warning("Failed to load pending data: %s" % e) + return False for (pmetadata, pdata) in pending_data: # check that shutdown wasnt called early if self.terminate.isSet(): @@ -249,56 +302,58 @@ class ThreadedStatistics(Statistics, try: metadata = self.core.build_metadata(pmetadata) break - except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError: + except MetadataRuntimeError: pass self.terminate.wait(5) if self.terminate.isSet(): return False - self.work_queue.put_nowait((metadata, lxml.etree.fromstring(pdata))) + self.work_queue.put_nowait((metadata, + lxml.etree.XML(pdata, + parser=Bcfg2.Server.XMLParser))) except Full: self.logger.warning("Queue.Full: Failed to load queue data") break except lxml.etree.LxmlError: lxml_error = sys.exc_info()[1] - self.logger.error("Unable to load save interaction: %s" % lxml_error) - except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: - self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata) + self.logger.error("Unable to load saved interaction: %s" % + lxml_error) + except MetadataConsistencyError: + self.logger.error("Unable to load metadata for save " + "interaction: %s" % pmetadata) try: os.unlink(self.pending_file) except: - self.logger.error("Failed to unlink save file: %s" % self.pending_file) - self.logger.info("Loaded pending %s data" % self.__class__.__name__) + self.logger.error("Failed to unlink save file: %s" % + self.pending_file) + self.logger.info("Loaded pending %s data" % self.name) return True def run(self): if not self.load(): return - while not self.terminate.isSet(): + while not self.terminate.isSet() and self.work_queue != None: try: - (xdata, client) = self.work_queue.get(block=True, timeout=2) + (client, xdata) = self.work_queue.get(block=True, timeout=2) except Empty: continue except Exception: e = sys.exc_info()[1] self.logger.error("ThreadedStatistics: %s" % e) continue - self.handle_statistic(xdata, client) - if not self.work_queue.empty(): + self.handle_statistic(client, xdata) + if self.work_queue != None and not self.work_queue.empty(): self.save() def process_statistics(self, metadata, data): - warned = False try: self.work_queue.put_nowait((metadata, copy.copy(data))) - warned = False except Full: - if not warned: - self.logger.warning("%s: Queue is full. Dropping interactions." % self.__class__.__name__) - warned = True + self.logger.warning("%s: Queue is full. Dropping interactions." % + self.name) - def handle_statistics(self, metadata, data): + def handle_statistic(self, metadata, data): """Handle stats here.""" pass @@ -308,17 +363,17 @@ class PullSource(object): return [] def GetCurrentEntry(self, client, e_type, e_name): - raise PluginExecutionError + raise NotImplementedError class PullTarget(object): def AcceptChoices(self, entry, metadata): - raise PluginExecutionError + raise NotImplementedError def AcceptPullData(self, specific, new_entry, verbose): """This is the null per-plugin implementation of bcfg2-admin pull.""" - raise PluginExecutionError + raise NotImplementedError class Decision(object): @@ -334,13 +389,13 @@ class ValidationError(Exception): class StructureValidator(object): """Validate/modify goal structures.""" def validate_structures(self, metadata, structures): - raise ValidationError("not implemented") + raise NotImplementedError class GoalValidator(object): """Validate/modify configuration goals.""" def validate_goals(self, metadata, goals): - raise ValidationError("not implemented") + raise NotImplementedError class Version(object): @@ -352,6 +407,17 @@ class Version(object): pass +class ClientRunHooks(object): + """ Provides hooks to interact with client runs """ + def start_client_run(self, metadata): + pass + + def end_client_run(self, metadata): + pass + + def end_statistics(self, metadata): + pass + # the rest of the file contains classes for coherent file caching class FileBacked(object): @@ -361,17 +427,18 @@ class FileBacked(object): This object is meant to be used as a part of DirectoryBacked. """ - def __init__(self, name): + def __init__(self, name, fam=None): object.__init__(self) self.data = '' self.name = name + self.fam = fam def HandleEvent(self, event=None): """Read file upon update.""" if event and event.code2str() not in ['exists', 'changed', 'created']: return try: - self.data = BUILTIN_FILE_TYPE(self.name).read() + self.data = open(self.name).read() self.Index() except IOError: err = sys.exc_info()[1] @@ -382,16 +449,14 @@ class FileBacked(object): pass def __repr__(self): - return "%s: %s" % (self.__class__.__name__, str(self)) - - def __str__(self): - return "%s: %s" % (self.name, self.data) + return "%s: %s" % (self.__class__.__name__, self.name) class DirectoryBacked(object): """This object is a coherent cache for a filesystem hierarchy of files.""" __child__ = FileBacked patterns = re.compile('.*') + ignore = None def __init__(self, data, fam): """Initialize the DirectoryBacked object. @@ -438,8 +503,8 @@ class DirectoryBacked(object): """ dirpathname = os.path.join(self.data, relative) if relative not in self.handles.values(): - if not posixpath.isdir(dirpathname): - logger.error("Failed to open directory %s" % (dirpathname)) + if not os.path.isdir(dirpathname): + logger.error("%s is not a directory" % dirpathname) return reqid = self.fam.AddMonitor(dirpathname, self) self.handles[reqid] = relative @@ -453,7 +518,8 @@ class DirectoryBacked(object): added. """ self.entries[relative] = self.__child__(os.path.join(self.data, - relative)) + relative), + self.fam) self.entries[relative].HandleEvent(event) def HandleEvent(self, event): @@ -470,27 +536,33 @@ class DirectoryBacked(object): """ action = event.code2str() - # Clean up the absolute path names passed in - event.filename = os.path.normpath(event.filename) - if event.filename.startswith(self.data): - event.filename = event.filename[len(self.data)+1:] - # Exclude events for actions we don't care about if action == 'endExist': return if event.requestID not in self.handles: - logger.warn("Got %s event with unknown handle (%s) for %s" - % (action, event.requestID, abspath)) + logger.warn("Got %s event with unknown handle (%s) for %s" % + (action, event.requestID, event.filename)) + return + + # Clean up path names + event.filename = os.path.normpath(event.filename) + if event.filename.startswith(self.data): + # the first event we get is on the data directory itself + event.filename = event.filename[len(self.data) + 1:] + + if self.ignore and self.ignore.search(event.filename): + logger.debug("Ignoring event %s" % event.filename) return # Calculate the absolute and relative paths this event refers to abspath = os.path.join(self.data, self.handles[event.requestID], event.filename) - relpath = os.path.join(self.handles[event.requestID], event.filename) + relpath = os.path.join(self.handles[event.requestID], + event.filename).lstrip('/') if action == 'deleted': - for key in self.entries.keys(): + for key in list(self.entries.keys()): if key.startswith(relpath): del self.entries[key] # We remove values from self.entries, but not @@ -498,7 +570,7 @@ class DirectoryBacked(object): # watching a directory just because it gets deleted. If it # is recreated, we will start getting notifications for it # again without having to add a new monitor. - elif posixpath.isdir(abspath): + elif os.path.isdir(abspath): # Deal with events for directories if action in ['exists', 'created']: self.add_directory_monitor(relpath) @@ -522,21 +594,13 @@ class DirectoryBacked(object): # didn't know about. Go ahead and treat it like a # "created" event, but log a warning, because this # is unexpected. - logger.warn("Got %s event for unexpected dir %s" % (action, - abspath)) + logger.warn("Got %s event for unexpected dir %s" % + (action, abspath)) self.add_directory_monitor(relpath) else: - logger.warn("Got unknown dir event %s %s %s" % (event.requestID, - event.code2str(), - abspath)) - else: - # Deal with events for non-directories - if ((event.filename[-1] == '~') or - (event.filename[:2] == '.#') or - (event.filename[-4:] == '.swp') or - (event.filename in ['SCCS', '.svn', '4913']) or - (not self.patterns.match(event.filename))): - return + logger.warn("Got unknown dir event %s %s %s" % + (event.requestID, event.code2str(), abspath)) + elif self.patterns.search(event.filename): if action in ['exists', 'created']: self.add_entry(relpath, event) elif action == 'changed': @@ -547,13 +611,16 @@ class DirectoryBacked(object): # know about. Go ahead and treat it like a # "created" event, but log a warning, because this # is unexpected. - logger.warn("Got %s event for unexpected file %s" % (action, - abspath)) + logger.warn("Got %s event for unexpected file %s" % + (action, + abspath)) self.add_entry(relpath, event) else: - logger.warn("Got unknown file event %s %s %s" % (event.requestID, - event.code2str(), - abspath)) + logger.warn("Got unknown file event %s %s %s" % + (event.requestID, event.code2str(), abspath)) + else: + logger.warn("Could not process filename %s; ignoring" % + event.filename) class XMLFileBacked(FileBacked): @@ -563,68 +630,55 @@ class XMLFileBacked(FileBacked): """ __identifier__ = 'name' - def __init__(self, filename): - self.label = "dummy" - self.entries = [] + def __init__(self, filename, fam=None, should_monitor=False): FileBacked.__init__(self, filename) - - def Index(self): - """Build local data structures.""" - try: - self.xdata = XML(self.data) - except XMLSyntaxError: - logger.error("Failed to parse %s" % (self.name)) - return - self.entries = self.xdata.getchildren() - if self.__identifier__ is not None: - self.label = self.xdata.attrib[self.__identifier__] - - def __iter__(self): - return iter(self.entries) - - def __str__(self): - return "%s: %s" % (self.name, lxml.etree.tostring(self.xdata)) - - -class SingleXMLFileBacked(XMLFileBacked): - """This object is a coherent cache for an independent XML file.""" - def __init__(self, filename, fam): - XMLFileBacked.__init__(self, filename) + self.label = "" + self.entries = [] self.extras = [] self.fam = fam - self.fam.AddMonitor(filename, self) + self.should_monitor = should_monitor + if fam and should_monitor: + self.fam.AddMonitor(filename, self) def _follow_xincludes(self, fname=None, xdata=None): - ''' follow xincludes, adding included files to fam and to - self.extras ''' + ''' follow xincludes, adding included files to self.extras ''' if xdata is None: if fname is None: xdata = self.xdata.getroottree() else: xdata = lxml.etree.parse(fname) - included = [ent.get('href') - for ent in xdata.findall('//{http://www.w3.org/2001/XInclude}include')] - for name in included: - if name not in self.extras: - if name.startswith("/"): - fpath = name + included = [el for el in xdata.findall('//%sinclude' % + Bcfg2.Server.XI_NAMESPACE)] + for el in included: + name = el.get("href") + if name.startswith("/"): + fpath = name + else: + if fname: + rel = fname else: - fpath = os.path.join(os.path.dirname(self.name), name) - self.add_monitor(fpath, name) - self._follow_xincludes(fname=fpath) - - def add_monitor(self, fpath, fname): - self.fam.AddMonitor(fpath, self) - self.extras.append(fname) + rel = self.name + fpath = os.path.join(os.path.dirname(rel), name) + if fpath not in self.extras: + if os.path.exists(fpath): + self._follow_xincludes(fname=fpath) + self.add_monitor(fpath) + else: + msg = "%s: %s does not exist, skipping" % (self.name, name) + if el.findall('./%sfallback' % Bcfg2.Server.XI_NAMESPACE): + self.logger.debug(msg) + else: + self.logger.warning(msg) def Index(self): """Build local data structures.""" try: - self.xdata = lxml.etree.XML(self.data, base_url=self.name) + self.xdata = lxml.etree.XML(self.data, base_url=self.name, + parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: - err = sys.exc_info()[1] - logger.error("Failed to parse %s: %s" % (self.name, err)) - raise Bcfg2.Server.Plugin.PluginInitError + msg = "Failed to parse %s: %s" % (self.name, sys.exc_info()[1]) + logger.error(msg) + raise PluginInitError(msg) self._follow_xincludes() if self.extras: @@ -638,43 +692,52 @@ class SingleXMLFileBacked(XMLFileBacked): if self.__identifier__ is not None: self.label = self.xdata.attrib[self.__identifier__] + def add_monitor(self, fpath): + self.extras.append(fpath) + if self.fam and self.should_monitor: + self.fam.AddMonitor(fpath, self) + + def __iter__(self): + return iter(self.entries) + + def __str__(self): + return "%s at %s" % (self.__class__.__name__, self.name) + class StructFile(XMLFileBacked): """This file contains a set of structure file formatting logic.""" __identifier__ = None - def __init__(self, name): - XMLFileBacked.__init__(self, name) + def _include_element(self, item, metadata): + """ determine if an XML element matches the metadata """ + if isinstance(item, lxml.etree._Comment): + return False + negate = item.get('negate', 'false').lower() == 'true' + if item.tag == 'Group': + return negate == (item.get('name') not in metadata.groups) + elif item.tag == 'Client': + return negate == (item.get('name') != metadata.hostname) + else: + return True def _match(self, item, metadata): """ recursive helper for Match() """ - if isinstance(item, lxml.etree._Comment): - return [] - elif item.tag == 'Group': - rv = [] - if ((item.get('negate', 'false').lower() == 'true' and - item.get('name') not in metadata.groups) or - (item.get('negate', 'false').lower() == 'false' and - item.get('name') in metadata.groups)): - for child in item.iterchildren(): - rv.extend(self._match(child, metadata)) - return rv - elif item.tag == 'Client': - rv = [] - if ((item.get('negate', 'false').lower() == 'true' and - item.get('name') != metadata.hostname) or - (item.get('negate', 'false').lower() == 'false' and - item.get('name') == metadata.hostname)): + if self._include_element(item, metadata): + if item.tag == 'Group' or item.tag == 'Client': + rv = [] + if self._include_element(item, metadata): + for child in item.iterchildren(): + rv.extend(self._match(child, metadata)) + return rv + else: + rv = copy.deepcopy(item) + for child in rv.iterchildren(): + rv.remove(child) for child in item.iterchildren(): rv.extend(self._match(child, metadata)) - return rv + return [rv] else: - rv = copy.copy(item) - for child in rv.iterchildren(): - rv.remove(child) - for child in item.iterchildren(): - rv.extend(self._match(child, metadata)) - return [rv] + return [] def Match(self, metadata): """Return matching fragments of independent.""" @@ -683,27 +746,52 @@ class StructFile(XMLFileBacked): rv.extend(self._match(child, metadata)) return rv + def _xml_match(self, item, metadata): + """ recursive helper for XMLMatch """ + if self._include_element(item, metadata): + if item.tag == 'Group' or item.tag == 'Client': + for child in item.iterchildren(): + item.remove(child) + item.getparent().append(child) + self._xml_match(child, metadata) + item.getparent().remove(item) + else: + for child in item.iterchildren(): + self._xml_match(child, metadata) + else: + item.getparent().remove(item) + + def XMLMatch(self, metadata): + """ Return a rebuilt XML document that only contains the + matching portions """ + rv = copy.deepcopy(self.xdata) + for child in rv.iterchildren(): + self._xml_match(child, metadata) + return rv + -class INode: +class INode(object): """ LNodes provide lists of things available at a particular group intersection. """ - raw = {'Client': "lambda m, e:'%(name)s' == m.hostname and predicate(m, e)", - 'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"} - nraw = {'Client': "lambda m, e:'%(name)s' != m.hostname and predicate(m, e)", - 'Group': "lambda m, e:'%(name)s' not in m.groups and predicate(m, e)"} + raw = dict( + Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)", + Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)") + nraw = dict( + Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)", + Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)") containers = ['Group', 'Client'] ignore = [] def __init__(self, data, idict, parent=None): self.data = data self.contents = {} - if parent == None: - self.predicate = lambda m, d: True + if parent is None: + self.predicate = lambda m, e: True else: predicate = parent.predicate - if data.get('negate', 'false') in ['true', 'True']: + if data.get('negate', 'false').lower() == 'true': psrc = self.nraw else: psrc = self.raw @@ -712,21 +800,29 @@ class INode: {'name': data.get('name')}, {'predicate': predicate}) else: - raise Exception - mytype = self.__class__ + raise PluginExecutionError("Unknown tag: %s" % data.tag) self.children = [] + self._load_children(data, idict) + + def _load_children(self, data, idict): for item in data.getchildren(): if item.tag in self.ignore: continue elif item.tag in self.containers: - self.children.append(mytype(item, idict, self)) + self.children.append(self.__class__(item, idict, self)) else: try: - self.contents[item.tag][item.get('name')] = item.attrib + self.contents[item.tag][item.get('name')] = \ + dict(item.attrib) except KeyError: - self.contents[item.tag] = {item.get('name'): item.attrib} + self.contents[item.tag] = \ + {item.get('name'): dict(item.attrib)} if item.text: - self.contents[item.tag]['__text__'] = item.text + self.contents[item.tag][item.get('name')]['__text__'] = \ + item.text + if item.getchildren(): + self.contents[item.tag][item.get('name')]['__children__'] =\ + item.getchildren() try: idict[item.tag].append(item.get('name')) except KeyError: @@ -760,43 +856,48 @@ class XMLSrc(XMLFileBacked): """XMLSrc files contain a LNode hierarchy that returns matching entries.""" __node__ = INode __cacheobj__ = dict + __priority_required__ = True - def __init__(self, filename, noprio=False): - XMLFileBacked.__init__(self, filename) + def __init__(self, filename, fam=None, should_monitor=False): + XMLFileBacked.__init__(self, filename, fam, should_monitor) self.items = {} self.cache = None self.pnode = None self.priority = -1 - self.noprio = noprio def HandleEvent(self, _=None): """Read file upon update.""" try: - data = BUILTIN_FILE_TYPE(self.name).read() + data = open(self.name).read() except IOError: - logger.error("Failed to read file %s" % (self.name)) - return + msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1]) + logger.error(msg) + raise PluginExecutionError(msg) self.items = {} try: - xdata = lxml.etree.XML(data) + xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: - logger.error("Failed to parse file %s" % (self.name)) - return + msg = "Failed to parse file %s" % (self.name, sys.exc_info()[1]) + logger.error(msg) + raise PluginExecutionError(msg) self.pnode = self.__node__(xdata, self.items) self.cache = None try: self.priority = int(xdata.get('priority')) except (ValueError, TypeError): - if not self.noprio: - logger.error("Got bogus priority %s for file %s" % - (xdata.get('priority'), self.name)) + if self.__priority_required__: + msg = "Got bogus priority %s for file %s" % \ + (xdata.get('priority'), self.name) + logger.error(msg) + raise PluginExecutionError(msg) + del xdata, data def Cache(self, metadata): """Build a package dict for a given host.""" - if self.cache == None or self.cache[0] != metadata: + if self.cache is None or self.cache[0] != metadata: cache = (metadata, self.__cacheobj__()) - if self.pnode == None: + if self.pnode is None: logger.error("Cache method called early for %s; forcing data load" % (self.name)) self.HandleEvent() return @@ -809,11 +910,13 @@ class XMLSrc(XMLFileBacked): class InfoXML(XMLSrc): __node__ = InfoNode + __priority_required__ = False class XMLDirectoryBacked(DirectoryBacked): """Directorybacked for *.xml.""" - patterns = re.compile('.*\.xml') + patterns = re.compile('^.*\.xml$') + __child__ = XMLFileBacked class PrioDir(Plugin, Generator, XMLDirectoryBacked): @@ -824,11 +927,7 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked): def __init__(self, core, datastore): Plugin.__init__(self, core, datastore) Generator.__init__(self) - try: - XMLDirectoryBacked.__init__(self, self.data, self.core.fam) - except OSError: - self.logger.error("Failed to load %s indices" % (self.name)) - raise PluginInitError + XMLDirectoryBacked.__init__(self, self.data, self.core.fam) def HandleEvent(self, event): """Handle events and update dispatch table.""" @@ -867,19 +966,22 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked): else: prio = [int(src.priority) for src in matching] if prio.count(max(prio)) > 1: - self.logger.error("Found conflicting sources with " - "same priority for %s, %s %s" % - (metadata.hostname, - entry.tag.lower(), entry.get('name'))) + msg = "Found conflicting sources with same priority for " + \ + "%s:%s for %s" % (entry.tag, entry.get("name"), + metadata.hostname) + self.logger.error(msg) self.logger.error([item.name for item in matching]) self.logger.error("Priority was %s" % max(prio)) - raise PluginExecutionError + raise PluginExecutionError(msg) index = prio.index(max(prio)) for rname in list(matching[index].cache[1][entry.tag].keys()): if self._matches(entry, metadata, [rname]): data = matching[index].cache[1][entry.tag][rname] break + else: + # Fall back on __getitem__. Required if override used + data = matching[index].cache[1][entry.tag][entry.get('name')] if '__text__' in data: entry.text = data['__text__'] if '__children__' in data: @@ -896,18 +998,16 @@ class SpecificityError(Exception): pass -class Specificity: - - def __init__(self, all=False, group=False, hostname=False, prio=0, delta=False): +class Specificity(CmpMixin): + def __init__(self, all=False, group=False, hostname=False, prio=0, + delta=False): + CmpMixin.__init__(self) self.hostname = hostname self.all = all self.group = group self.prio = prio self.delta = delta - def __lt__(self, other): - return self.__cmp__(other) < 0 - def matches(self, metadata): return self.all or \ self.hostname == metadata.hostname or \ @@ -916,26 +1016,36 @@ class Specificity: def __cmp__(self, other): """Sort most to least specific.""" if self.all: - return 1 - if self.group: + if other.all: + return 0 + else: + return 1 + elif other.all: + return -1 + elif self.group: if other.hostname: return 1 if other.group and other.prio > self.prio: return 1 if other.group and other.prio == self.prio: return 0 + elif other.group: + return -1 + elif self.hostname and other.hostname: + return 0 return -1 - def more_specific(self, other): - """Test if self is more specific than other.""" + def __str__(self): + rv = [self.__class__.__name__, ': '] if self.all: - True + rv.append("all") elif self.group: - if other.hostname: - return True - elif other.group and other.prio > self.prio: - return True - return False + rv.append("Group %s, priority %s" % (self.group, self.prio)) + elif self.hostname: + rv.append("Host %s" % self.hostname) + if self.delta: + rv.append(", delta=%s" % self.delta) + return "".join(rv) class SpecificData(object): @@ -957,6 +1067,7 @@ class SpecificData(object): class EntrySet(Debuggable): """Entry sets deal with the host- and group-specific entries.""" ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\\.genshi_include)$") + basename_is_regex=False def __init__(self, basename, path, entry_type, encoding): Debuggable.__init__(self, name=basename) @@ -966,14 +1077,15 @@ class EntrySet(Debuggable): self.metadata = default_file_metadata.copy() self.infoxml = None self.encoding = encoding - pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % basename + + if self.basename_is_regex: + base_pat = basename + else: + base_pat = re.escape(basename) + pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % base_pat pattern += '(G(?P<prio>\d+)_(?P<group>\S+))))?$' self.specific = re.compile(pattern) - def debug_log(self, message, flag=None): - if (flag is None and self.debug_flag) or flag: - logger.error(message) - def sort_by_specific(self, one, other): return cmp(one.specific, other.specific) @@ -987,20 +1099,13 @@ class EntrySet(Debuggable): if matching is None: matching = self.get_matching(metadata) - hspec = [ent for ent in matching if ent.specific.hostname] - if hspec: - return hspec[0] - - gspec = [ent for ent in matching if ent.specific.group] - if gspec: - gspec.sort(self.group_sortfunc) - return gspec[-1] - - aspec = [ent for ent in matching if ent.specific.all] - if aspec: - return aspec[0] - - raise PluginExecutionError + if matching: + matching.sort(key=operator.attrgetter("specific")) + return matching[0] + else: + raise PluginExecutionError("No matching entries available for %s " + "for %s" % (self.path, + metadata.hostname)) def handle_event(self, event): """Handle FAM events for the TemplateSet.""" @@ -1074,7 +1179,7 @@ class EntrySet(Debuggable): fpath = os.path.join(self.path, event.filename) if event.filename == 'info.xml': if not self.infoxml: - self.infoxml = InfoXML(fpath, True) + self.infoxml = InfoXML(fpath) self.infoxml.HandleEvent(event) elif event.filename in [':info', 'info']: for line in open(fpath).readlines(): @@ -1089,8 +1194,7 @@ class EntrySet(Debuggable): if value: self.metadata[key] = value if len(self.metadata['perms']) == 3: - self.metadata['perms'] = "0%s" % \ - (self.metadata['perms']) + self.metadata['perms'] = "0%s" % self.metadata['perms'] def reset_metadata(self, event): """Reset metadata to defaults if info or info.xml removed.""" @@ -1099,26 +1203,12 @@ class EntrySet(Debuggable): elif event.filename in [':info', 'info']: self.metadata = default_file_metadata.copy() - def group_sortfunc(self, x, y): - """sort groups by their priority""" - return cmp(x.specific.prio, y.specific.prio) - def bind_info_to_entry(self, entry, metadata): - # first set defaults from global metadata/:info - for key in self.metadata: - entry.set(key, self.metadata[key]) - if self.infoxml: - mdata = {} - self.infoxml.pnode.Match(metadata, mdata, entry=entry) - if 'Info' not in mdata: - logger.error("Failed to set metadata for file %s" % \ - (entry.get('name'))) - raise PluginExecutionError - [entry.attrib.__setitem__(key, value) \ - for (key, value) in list(mdata['Info'][None].items())] + bind_info(entry, metadata, infoxml=self.infoxml, default=self.metadata) def bind_entry(self, entry, metadata): - """Return the appropriate interpreted template from the set of available templates.""" + """Return the appropriate interpreted template from the set of + available templates.""" self.bind_info_to_entry(entry, metadata) return self.best_matching(metadata).bind_entry(entry, metadata) @@ -1130,13 +1220,14 @@ class GroupSpool(Plugin, Generator): filename_pattern = "" es_child_cls = object es_cls = EntrySet + entry_type = 'Path' def __init__(self, core, datastore): Plugin.__init__(self, core, datastore) Generator.__init__(self) if self.data[-1] == '/': self.data = self.data[:-1] - self.Entries['Path'] = {} + self.Entries[self.entry_type] = {} self.entries = {} self.handles = {} self.AddDirectoryMonitor('') @@ -1145,29 +1236,38 @@ class GroupSpool(Plugin, Generator): def add_entry(self, event): epath = self.event_path(event) ident = self.event_id(event) - if posixpath.isdir(epath): + if os.path.isdir(epath): self.AddDirectoryMonitor(epath[len(self.data):]) - if ident not in self.entries and posixpath.isfile(epath): - dirpath = "".join([self.data, ident]) + if ident not in self.entries and os.path.isfile(epath): + dirpath = self.data + ident self.entries[ident] = self.es_cls(self.filename_pattern, dirpath, self.es_child_cls, self.encoding) - self.Entries['Path'][ident] = self.entries[ident].bind_entry - if not posixpath.isdir(epath): + self.Entries[self.entry_type][ident] = \ + self.entries[ident].bind_entry + if not os.path.isdir(epath): # do not pass through directory events self.entries[ident].handle_event(event) def event_path(self, event): - return "".join([self.data, self.handles[event.requestID], - event.filename]) + return os.path.join(self.data, + self.handles[event.requestID].lstrip("/"), + event.filename) def event_id(self, event): epath = self.event_path(event) - if posixpath.isdir(epath): - return self.handles[event.requestID] + event.filename + if os.path.isdir(epath): + return os.path.join(self.handles[event.requestID].lstrip("/"), + event.filename) else: - return self.handles[event.requestID][:-1] + return self.handles[event.requestID].rstrip("/") + + def toggle_debug(self): + for entry in self.entries.values(): + if hasattr(entry, "toggle_debug"): + entry.toggle_debug() + return Plugin.toggle_debug(self) def HandleEvent(self, event): """Unified FAM event handler for GroupSpool.""" @@ -1178,7 +1278,7 @@ class GroupSpool(Plugin, Generator): if action in ['exists', 'created']: self.add_entry(event) - if action == 'changed': + elif action == 'changed': if ident in self.entries: self.entries[ident].handle_event(event) else: @@ -1193,7 +1293,7 @@ class GroupSpool(Plugin, Generator): if fbase in self.entries: # a directory was deleted del self.entries[fbase] - del self.Entries['Path'][fbase] + del self.Entries[self.entry_type][fbase] elif ident in self.entries: self.entries[ident].handle_event(event) elif ident not in self.entries: @@ -1206,8 +1306,8 @@ class GroupSpool(Plugin, Generator): relative += '/' name = self.data + relative if relative not in list(self.handles.values()): - if not posixpath.isdir(name): - print("Failed to open directory %s" % (name)) + if not os.path.isdir(name): + self.logger.error("Failed to open directory %s" % name) return reqid = self.core.fam.AddMonitor(name, self) self.handles[reqid] = relative diff --git a/src/lib/Bcfg2/Server/Plugins/BB.py b/src/lib/Bcfg2/Server/Plugins/BB.py deleted file mode 100644 index c015ec47c..000000000 --- a/src/lib/Bcfg2/Server/Plugins/BB.py +++ /dev/null @@ -1,83 +0,0 @@ -import lxml.etree -import Bcfg2.Server.Plugin -import glob -import os -import socket - -#manage boot symlinks - #add statistics check to do build->boot mods - -#map profiles: first array is not empty we replace the -p with a determined profile. -logger = Bcfg2.Server.Plugin.logger - -class BBfile(Bcfg2.Server.Plugin.XMLFileBacked): - """Class for bb files.""" - def Index(self): - """Build data into an xml object.""" - - try: - self.data = lxml.etree.XML(self.data) - except lxml.etree.XMLSyntaxError: - Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name) - return - self.tftppath = self.data.get('tftp', '/tftpboot') - self.macs = {} - self.users = {} - self.actions = {} - self.bootlinks = [] - - for node in self.data.findall('Node'): - iface = node.find('Interface') - if iface != None: - mac = "01-%s" % (iface.get('mac'.replace(':','-').lower())) - self.actions[node.get('name')] = node.get('action') - self.bootlinks.append((mac, node.get('action'))) - try: - ip = socket.gethostbyname(node.get('name')) - except: - logger.error("failed host resolution for %s" % node.get('name')) - - self.macs[node.get('name')] = (iface.get('mac'), ip) - else: - logger.error("%s" % lxml.etree.tostring(node)) - self.users[node.get('name')] = node.get('user',"").split(':') - - def enforce_bootlinks(self): - for mac, target in self.bootlinks: - path = self.tftppath + '/' + mac - if not os.path.islink(path): - logger.error("Boot file %s not a link" % path) - if target != os.readlink(path): - try: - os.unlink(path) - os.symlink(target, path) - except: - logger.error("Failed to modify link %s" % path) - -class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): - __child__ = BBfile - - -class BB(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.Connector): - """The BB plugin maps users to machines and metadata to machines.""" - name = 'BB' - deprecated = True - - def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) - Bcfg2.Server.Plugin.Connector.__init__(self) - self.store = BBDirectoryBacked(self.data, core.fam) - - def get_additional_data(self, metadata): - - users = {} - for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []): - pubkeys = [] - for fname in glob.glob('/home/%s/.ssh/*.pub'%user): - pubkeys.append(open(fname).read()) - - users[user] = pubkeys - - return dict([('users', users), - ('macs', self.store.entries['bb.xml'].macs)]) diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py index 389ca7a95..2020f7795 100644 --- a/src/lib/Bcfg2/Server/Plugins/Base.py +++ b/src/lib/Bcfg2/Server/Plugins/Base.py @@ -3,10 +3,7 @@ import copy import lxml.etree import sys -# py3k compatibility -if sys.hexversion >= 0x03000000: - from functools import reduce - +from Bcfg2.Bcfg2Py3k import reduce import Bcfg2.Server.Plugin diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py index ccb99481e..65914c371 100644 --- a/src/lib/Bcfg2/Server/Plugins/Bundler.py +++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py @@ -1,25 +1,25 @@ """This provides bundle clauses with translation functionality.""" import copy +import logging import lxml.etree import os import os.path import re import sys - +import Bcfg2.Server import Bcfg2.Server.Plugin +import Bcfg2.Server.Lint try: - import genshi.template import genshi.template.base - import Bcfg2.Server.Plugins.SGenshi + import Bcfg2.Server.Plugins.TGenshi have_genshi = True except: have_genshi = False class BundleFile(Bcfg2.Server.Plugin.StructFile): - def get_xml_value(self, metadata): bundlename = os.path.splitext(os.path.basename(self.name))[0] bundle = lxml.etree.Element('Bundle', name=bundlename) @@ -27,6 +27,58 @@ class BundleFile(Bcfg2.Server.Plugin.StructFile): return bundle +if have_genshi: + class BundleTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile, + Bcfg2.Server.Plugin.StructFile): + def __init__(self, name, specific, encoding): + Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name, + specific, + encoding) + Bcfg2.Server.Plugin.StructFile.__init__(self, name) + self.logger = logging.getLogger(name) + + def get_xml_value(self, metadata): + if not hasattr(self, 'template'): + self.logger.error("No parsed template information for %s" % + self.name) + raise Bcfg2.Server.Plugin.PluginExecutionError + try: + stream = self.template.generate(metadata=metadata).filter( + Bcfg2.Server.Plugins.TGenshi.removecomment) + data = lxml.etree.XML(stream.render('xml', + strip_whitespace=False), + parser=Bcfg2.Server.XMLParser) + bundlename = os.path.splitext(os.path.basename(self.name))[0] + bundle = lxml.etree.Element('Bundle', name=bundlename) + for item in self.Match(metadata, data): + bundle.append(copy.deepcopy(item)) + return bundle + except LookupError: + lerror = sys.exc_info()[1] + self.logger.error('Genshi lookup error: %s' % lerror) + except genshi.template.TemplateError: + terror = sys.exc_info()[1] + self.logger.error('Genshi template error: %s' % terror) + raise + except genshi.input.ParseError: + perror = sys.exc_info()[1] + self.logger.error('Genshi parse error: %s' % perror) + raise + + def Match(self, metadata, xdata): + """Return matching fragments of parsed template.""" + rv = [] + for child in xdata.getchildren(): + rv.extend(self._match(child, metadata)) + self.logger.debug("File %s got %d match(es)" % (self.name, len(rv))) + return rv + + + class SGenshiTemplateFile(BundleTemplateFile): + # provided for backwards compat + pass + + class Bundler(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Structure, Bcfg2.Server.Plugin.XMLDirectoryBacked): @@ -50,25 +102,20 @@ class Bundler(Bcfg2.Server.Plugin.Plugin, self.logger.error("Failed to load Bundle repository") raise Bcfg2.Server.Plugin.PluginInitError - def template_dispatch(self, name): - bundle = lxml.etree.parse(name) + def template_dispatch(self, name, _): + bundle = lxml.etree.parse(name, + parser=Bcfg2.Server.XMLParser) nsmap = bundle.getroot().nsmap - if name.endswith('.xml'): - if have_genshi and \ - (nsmap == {'py': 'http://genshi.edgewall.org/'}): - # allow for genshi bundles with .xml extensions - spec = Bcfg2.Server.Plugin.Specificity() - return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name, - spec, - self.encoding) - else: - return BundleFile(name) - elif name.endswith('.genshi'): + if (name.endswith('.genshi') or + ('py' in nsmap and + nsmap['py'] == 'http://genshi.edgewall.org/')): if have_genshi: spec = Bcfg2.Server.Plugin.Specificity() - return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name, - spec, - self.encoding) + return BundleTemplateFile(name, spec, self.encoding) + else: + raise Bcfg2.Server.Plugin.PluginExecutionError("Genshi not available: %s" % name) + else: + return BundleFile(name, self.fam) def BuildStructures(self, metadata): """Build all structures for client (metadata).""" @@ -97,3 +144,54 @@ class Bundler(Bcfg2.Server.Plugin.Plugin, self.logger.error("Bundler: Unexpected bundler error for %s" % bundlename, exc_info=1) return bundleset + + +class BundlerLint(Bcfg2.Server.Lint.ServerPlugin): + """ Perform various bundle checks """ + def Run(self): + """ run plugin """ + self.missing_bundles() + for bundle in self.core.plugins['Bundler'].entries.values(): + if (self.HandlesFile(bundle.name) and + (not have_genshi or + not isinstance(bundle, BundleTemplateFile))): + self.bundle_names(bundle) + + @classmethod + def Errors(cls): + return {"bundle-not-found":"error", + "inconsistent-bundle-name":"warning"} + + def missing_bundles(self): + """ find bundles listed in Metadata but not implemented in Bundler """ + if self.files is None: + # when given a list of files on stdin, this check is + # useless, so skip it + groupdata = self.metadata.groups_xml.xdata + ref_bundles = set([b.get("name") + for b in groupdata.findall("//Bundle")]) + + allbundles = self.core.plugins['Bundler'].entries.keys() + for bundle in ref_bundles: + xmlbundle = "%s.xml" % bundle + genshibundle = "%s.genshi" % bundle + if (xmlbundle not in allbundles and + genshibundle not in allbundles): + self.LintError("bundle-not-found", + "Bundle %s referenced, but does not exist" % + bundle) + + def bundle_names(self, bundle): + """ verify bundle name attribute matches filename """ + try: + xdata = lxml.etree.XML(bundle.data) + except AttributeError: + # genshi template + xdata = lxml.etree.parse(bundle.template.filepath).getroot() + + fname = bundle.name.split('Bundler/')[1].split('.')[0] + bname = xdata.get('name') + if fname != bname: + self.LintError("inconsistent-bundle-name", + "Inconsistent bundle name: filename is %s, " + "bundle name is %s" % (fname, bname)) diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py index 3edd1d8cb..f02461673 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py @@ -6,8 +6,7 @@ from Bcfg2.Server.Plugins.Cfg import CfgGenerator logger = logging.getLogger(__name__) try: - import Cheetah.Template - import Cheetah.Parser + from Cheetah.Template import Template have_cheetah = True except ImportError: have_cheetah = False @@ -25,9 +24,9 @@ class CfgCheetahGenerator(CfgGenerator): raise Bcfg2.Server.Plugin.PluginExecutionError(msg) def get_data(self, entry, metadata): - template = Cheetah.Template.Template(self.data, - compilerSettings=self.settings) + template = Template(self.data.decode(self.encoding), + compilerSettings=self.settings) template.metadata = metadata template.path = entry.get('realname', entry.get('name')) - template.source_path = self.path + template.source_path = self.name return template.respond() diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py new file mode 100644 index 000000000..a75329d2a --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py @@ -0,0 +1,14 @@ +import logging +from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator +from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import CfgEncryptedGenerator + +logger = logging.getLogger(__name__) + +class CfgEncryptedCheetahGenerator(CfgCheetahGenerator, CfgEncryptedGenerator): + __extensions__ = ['cheetah.crypt', 'crypt.cheetah'] + + def handle_event(self, event): + CfgEncryptedGenerator.handle_event(self, event) + + def get_data(self, entry, metadata): + return CfgCheetahGenerator.get_data(self, entry, metadata) diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py new file mode 100644 index 000000000..2c926fae7 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py @@ -0,0 +1,63 @@ +import logging +import Bcfg2.Server.Plugin +from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP +try: + from Bcfg2.Encryption import ssl_decrypt, EVPError + have_crypto = True +except ImportError: + have_crypto = False + +logger = logging.getLogger(__name__) + +def passphrases(): + section = "encryption" + if SETUP.cfp.has_section(section): + return dict([(o, SETUP.cfp.get(section, o)) + for o in SETUP.cfp.options(section)]) + else: + return dict() + +def decrypt(crypted): + if not have_crypto: + msg = "Cfg: M2Crypto is not available: %s" % entry.get("name") + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + for passwd in passphrases().values(): + try: + return ssl_decrypt(crypted, passwd) + except EVPError: + pass + raise EVPError("Failed to decrypt") + +class CfgEncryptedGenerator(CfgGenerator): + __extensions__ = ["crypt"] + + def __init__(self, fname, spec, encoding): + CfgGenerator.__init__(self, fname, spec, encoding) + if not have_crypto: + msg = "Cfg: M2Crypto is not available: %s" % entry.get("name") + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + + def handle_event(self, event): + if event.code2str() == 'deleted': + return + try: + crypted = open(self.name).read() + except UnicodeDecodeError: + crypted = open(self.name, mode='rb').read() + except: + logger.error("Failed to read %s" % self.name) + return + # todo: let the user specify a passphrase by name + try: + self.data = decrypt(crypted) + except EVPError: + msg = "Failed to decrypt %s" % self.name + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + + def get_data(self, entry, metadata): + if self.data is None: + raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to decrypt %s" % self.name) + return CfgGenerator.get_data(self, entry, metadata) diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py new file mode 100644 index 000000000..6605cca7c --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py @@ -0,0 +1,26 @@ +import logging +from Bcfg2.Bcfg2Py3k import StringIO +from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator +from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import decrypt, \ + CfgEncryptedGenerator + +logger = logging.getLogger(__name__) + +try: + from genshi.template import TemplateLoader +except ImportError: + # CfgGenshiGenerator will raise errors if genshi doesn't exist + TemplateLoader = object + + +class EncryptedTemplateLoader(TemplateLoader): + def _instantiate(self, cls, fileobj, filepath, filename, encoding=None): + plaintext = StringIO(decrypt(fileobj.read())) + return TemplateLoader._instantiate(self, cls, plaintext, filepath, + filename, encoding=encoding) + + +class CfgEncryptedGenshiGenerator(CfgGenshiGenerator): + __extensions__ = ['genshi.crypt', 'crypt.genshi'] + __loader_cls__ = EncryptedTemplateLoader + diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py index 2c0a076d7..277a26f97 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py @@ -1,5 +1,7 @@ +import re import sys import logging +import traceback import Bcfg2.Server.Plugin from Bcfg2.Server.Plugins.Cfg import CfgGenerator @@ -8,8 +10,10 @@ logger = logging.getLogger(__name__) try: import genshi.core from genshi.template import TemplateLoader, NewTextTemplate + from genshi.template.eval import UndefinedError have_genshi = True except ImportError: + TemplateLoader = None have_genshi = False # snipped from TGenshi @@ -23,14 +27,17 @@ def removecomment(stream): class CfgGenshiGenerator(CfgGenerator): __extensions__ = ['genshi'] + __loader_cls__ = TemplateLoader + pyerror_re = re.compile('<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>') def __init__(self, fname, spec, encoding): CfgGenerator.__init__(self, fname, spec, encoding) - self.loader = TemplateLoader() if not have_genshi: - msg = "Cfg: Genshi is not available: %s" % entry.get("name") + msg = "Cfg: Genshi is not available: %s" % fname logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + self.loader = self.__loader_cls__() + self.template = None @classmethod def ignore(cls, event, basename=None): @@ -44,10 +51,63 @@ class CfgGenshiGenerator(CfgGenerator): metadata=metadata, path=self.name).filter(removecomment) try: - return stream.render('text', encoding=self.encoding, - strip_whitespace=False) - except TypeError: - return stream.render('text', encoding=self.encoding) + try: + return stream.render('text', encoding=self.encoding, + strip_whitespace=False) + except TypeError: + return stream.render('text', encoding=self.encoding) + except UndefinedError: + # a failure in a genshi expression _other_ than %{ python ... %} + err = sys.exc_info()[1] + stack = traceback.extract_tb(sys.exc_info()[2]) + for quad in stack: + if quad[0] == self.name: + logger.error("Cfg: Error rendering %s at %s: %s" % + (fname, quad[2], err)) + break + raise + except: + # a failure in a %{ python ... %} block -- the snippet in + # the traceback is just the beginning of the block. + err = sys.exc_info()[1] + stack = traceback.extract_tb(sys.exc_info()[2]) + (filename, lineno, func, text) = stack[-1] + # this is horrible, and I deeply apologize to whoever gets + # to maintain this after I go to the Great Beer Garden in + # the Sky. genshi is incredibly opaque about what's being + # executed, so the only way I can find to determine which + # {% python %} block is being executed -- if there are + # multiples -- is to iterate through them and match the + # snippet of the first line that's in the traceback with + # the first non-empty line of the block. + execs = [contents + for etype, contents, loc in self.template.stream + if etype == self.template.EXEC] + contents = None + if len(execs) == 1: + contents = execs[0] + elif len(execs) > 1: + match = pyerror_re.match(func) + if match: + firstline = match.group(0) + for pyblock in execs: + if pyblock.startswith(firstline): + contents = pyblock + break + # else, no EXEC blocks -- WTF? + if contents: + # we now have the bogus block, but we need to get the + # offending line. To get there, we do (line number + # given in the exception) - (firstlineno from the + # internal genshi code object of the snippet) + 1 = + # (line number of the line with an error within the + # block, with all multiple line breaks elided to a + # single line break) + real_lineno = lineno - contents.code.co_firstlineno + src = re.sub(r'\n\n+', '\n', contents.source).splitlines() + logger.error("Cfg: Error rendering %s at %s: %s" % + (fname, src[real_lineno], err)) + raise def handle_event(self, event): if event.code2str() == 'deleted': diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py index 8e962efb4..956ebfe17 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py @@ -9,7 +9,7 @@ class CfgInfoXML(CfgInfo): def __init__(self, path): CfgInfo.__init__(self, path) - self.infoxml = Bcfg2.Server.Plugin.InfoXML(path, noprio=True) + self.infoxml = Bcfg2.Server.Plugin.InfoXML(path) def bind_info_to_entry(self, entry, metadata): mdata = dict() @@ -22,3 +22,9 @@ class CfgInfoXML(CfgInfo): def handle_event(self, event): self.infoxml.HandleEvent() + + def _set_info(self, entry, info): + CfgInfo._set_info(self, entry, info) + if '__children__' in info: + for child in info['__children__']: + entry.append(child) diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py index 54c17c6c5..85c13c1ac 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py @@ -7,6 +7,10 @@ logger = logging.getLogger(__name__) class CfgLegacyInfo(CfgInfo): __basenames__ = ['info', ':info'] + def __init__(self, path): + CfgInfo.__init__(self, path) + self.path = path + def bind_info_to_entry(self, entry, metadata): self._set_info(entry, self.metadata) diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py index 6c7585993..fe993ab54 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py @@ -6,11 +6,11 @@ import sys import stat import pkgutil import logging -import binascii import lxml.etree import Bcfg2.Options import Bcfg2.Server.Plugin -from Bcfg2.Bcfg2Py3k import u_str +from Bcfg2.Bcfg2Py3k import u_str, unicode, b64encode +import Bcfg2.Server.Lint logger = logging.getLogger(__name__) @@ -113,7 +113,8 @@ class CfgInfo(CfgBaseFileMatcher): def _set_info(self, entry, info): for key, value in list(info.items()): - entry.attrib.__setitem__(key, value) + if not key.startswith("__"): + entry.attrib.__setitem__(key, value) class CfgVerifier(CfgBaseFileMatcher): @@ -152,7 +153,19 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): global PROCESSORS if PROCESSORS is None: PROCESSORS = [] - for submodule in pkgutil.walk_packages(path=__path__): + if hasattr(pkgutil, 'walk_packages'): + submodules = pkgutil.walk_packages(path=__path__) + else: + #python 2.4 + import glob + submodules = [] + for path in __path__: + for submodule in glob.glob(os.path.join(path, "*.py")): + mod = '.'.join(submodule.split("/")[-1].split('.')[:-1]) + if mod != '__init__': + submodules.append((None, mod, True)) + + for submodule in submodules: module = getattr(__import__("%s.%s" % (__name__, submodule[1])).Server.Plugins.Cfg, @@ -185,6 +198,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): return elif action == 'changed': self.entries[event.filename].handle_event(event) + return elif action == 'deleted': del self.entries[event.filename] return @@ -192,6 +206,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): logger.error("Could not process event %s for %s; ignoring" % (action, event.filename)) + def get_matching(self, metadata): + return [item for item in list(self.entries.values()) + if (isinstance(item, CfgGenerator) and + item.specific.matches(metadata))] + def entry_init(self, event, proc): if proc.__specific__: Bcfg2.Server.Plugin.EntrySet.entry_init( @@ -270,10 +289,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): raise Bcfg2.Server.Plugin.PluginExecutionError(msg) if entry.get('encoding') == 'base64': - data = binascii.b2a_base64(data) + data = b64encode(data) else: try: - data = u_str(data, self.encoding) + if not isinstance(data, unicode): + data = u_str(data, self.encoding) except UnicodeDecodeError: msg = "Failed to decode %s: %s" % (entry.get('name'), sys.exc_info()[1]) @@ -287,6 +307,10 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): logger.error("You need to specify base64 encoding for %s." % entry.get('name')) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + except TypeError: + # data is already unicode; newer versions of Cheetah + # seem to return unicode + pass if data: entry.text = data @@ -298,7 +322,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): generators = [ent for ent in list(self.entries.values()) if (isinstance(ent, CfgGenerator) and ent.specific.matches(metadata))] - if not matching: + if not generators: msg = "No base file found for %s" % entry.get('name') logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) @@ -347,26 +371,26 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): if attr in new_entry] if badattr: # check for info files and inform user of their removal - if os.path.exists(self.path + "/:info"): - logger.info("Removing :info file and replacing with " - "info.xml") - os.remove(self.path + "/:info") - if os.path.exists(self.path + "/info"): - logger.info("Removing info file and replacing with " - "info.xml") - os.remove(self.path + "/info") + for ifile in ['info', ':info']: + info = os.path.join(self.path, ifile) + if os.path.exists(info): + logger.info("Removing %s and replacing with info.xml" % + info) + os.remove(info) metadata_updates = {} metadata_updates.update(self.metadata) for attr in badattr: metadata_updates[attr] = new_entry.get(attr) infoxml = lxml.etree.Element('FileInfo') infotag = lxml.etree.SubElement(infoxml, 'Info') - [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \ - for attr in metadata_updates] + [infotag.attrib.__setitem__(attr, metadata_updates[attr]) + for attr in metadata_updates] ofile = open(self.path + "/info.xml", "w") - ofile.write(lxml.etree.tostring(infoxml, pretty_print=True)) + ofile.write(lxml.etree.tostring(infoxml, xml_declaration=False, + pretty_print=True).decode('UTF-8')) ofile.close() - self.debug_log("Wrote file %s" % (self.path + "/info.xml"), + self.debug_log("Wrote file %s" % os.path.join(self.path, + "info.xml"), flag=log) @@ -385,9 +409,22 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool, SETUP = core.setup if 'validate' not in SETUP: - SETUP['validate'] = Bcfg2.Options.CFG_VALIDATION + SETUP.add_option('validate', Bcfg2.Options.CFG_VALIDATION) SETUP.reparse() + def has_generator(self, entry, metadata): + """ return True if the given entry can be generated for the + given metadata; False otherwise """ + if entry.get('name') not in self.entries: + return False + + for ent in self.entries[entry.get('name')].entries.values(): + if ent.__specific__ and not ent.specific.matches(metadata): + continue + if isinstance(ent, CfgGenerator): + return True + return False + def AcceptChoices(self, entry, metadata): return self.entries[entry.get('name')].list_accept_choices(entry, metadata) @@ -396,3 +433,26 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool, return self.entries[new_entry.get('name')].write_update(specific, new_entry, log) + +class CfgLint(Bcfg2.Server.Lint.ServerPlugin): + """ warn about usage of .cat and .diff files """ + + def Run(self): + for basename, entry in list(self.core.plugins['Cfg'].entries.items()): + self.check_entry(basename, entry) + + + @classmethod + def Errors(cls): + return {"cat-file-used":"warning", + "diff-file-used":"warning"} + + def check_entry(self, basename, entry): + cfg = self.core.plugins['Cfg'] + for basename, entry in list(cfg.entries.items()): + for fname, processor in entry.entries.items(): + if self.HandlesFile(fname) and isinstance(processor, CfgFilter): + extension = fname.split(".")[-1] + self.LintError("%s-file-used" % extension, + "%s file used on %s: %s" % + (extension, basename, fname)) diff --git a/src/lib/Bcfg2/Server/Plugins/DBStats.py b/src/lib/Bcfg2/Server/Plugins/DBStats.py index 999e078b9..63c590f0f 100644 --- a/src/lib/Bcfg2/Server/Plugins/DBStats.py +++ b/src/lib/Bcfg2/Server/Plugins/DBStats.py @@ -1,8 +1,8 @@ -import binascii import difflib import logging import lxml.etree import platform +import sys import time try: @@ -11,61 +11,47 @@ except ImportError: pass import Bcfg2.Server.Plugin -import Bcfg2.Server.Reports.importscript +from Bcfg2.Server.Reports.importscript import load_stat from Bcfg2.Server.Reports.reports.models import Client -import Bcfg2.Server.Reports.settings -from Bcfg2.Server.Reports.updatefix import update_database +from Bcfg2.Bcfg2Py3k import b64decode + # for debugging output only logger = logging.getLogger('Bcfg2.Plugins.DBStats') -class DBStats(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.ThreadedStatistics, + +class DBStats(Bcfg2.Server.Plugin.ThreadedStatistics, Bcfg2.Server.Plugin.PullSource): name = 'DBStats' def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore) Bcfg2.Server.Plugin.PullSource.__init__(self) self.cpath = "%s/Metadata/clients.xml" % datastore self.core = core - logger.debug("Searching for new models to add to the statistics database") - try: - update_database() - except Exception: - inst = sys.exc_info()[1] - logger.debug(str(inst)) - logger.debug(str(type(inst))) + if not self.core.database_available: + raise Bcfg2.Server.Plugin.PluginInitError def handle_statistic(self, metadata, data): newstats = data.find("Statistics") newstats.set('time', time.asctime(time.localtime())) - # ick - data = lxml.etree.tostring(newstats) - ndx = lxml.etree.XML(data) - e = lxml.etree.Element('Node', name=metadata.hostname) - e.append(ndx) - container = lxml.etree.Element("ConfigStatistics") - container.append(e) - # FIXME need to build a metadata interface to expose a list of clients start = time.time() for i in [1, 2, 3]: try: - Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clients_xml.xdata, - container, - self.core.encoding, - 0, - logger, - True, - platform.node()) + load_stat(metadata, + newstats, + self.core.encoding, + 0, + logger, + True, + platform.node()) logger.info("Imported data for %s in %s seconds" \ % (metadata.hostname, time.time() - start)) return except MultipleObjectsReturned: e = sys.exc_info()[1] - logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \ - (metadata.hostname, e)) + logger.error("DBStats: MultipleObjectsReturned while " + "handling %s: %s" % (metadata.hostname, e)) logger.error("DBStats: Data is inconsistent") break except: @@ -100,10 +86,10 @@ class DBStats(Bcfg2.Server.Plugin.Plugin, if entry.reason.is_sensitive: raise Bcfg2.Server.Plugin.PluginExecutionError elif len(entry.reason.unpruned) != 0: - ret.append('\n'.join(entry.reason.unpruned)) + ret.append('\n'.join(entry.reason.unpruned)) elif entry.reason.current_diff != '': if entry.reason.is_binary: - ret.append(binascii.a2b_base64(entry.reason.current_diff)) + ret.append(b64decode(entry.reason.current_diff)) else: ret.append('\n'.join(difflib.restore(\ entry.reason.current_diff.split('\n'), 1))) diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py index b432474f2..90d9ecbe3 100644 --- a/src/lib/Bcfg2/Server/Plugins/Decisions.py +++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py @@ -14,6 +14,8 @@ class DecisionFile(Bcfg2.Server.Plugin.SpecificData): return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')] class DecisionSet(Bcfg2.Server.Plugin.EntrySet): + basename_is_regex = True + def __init__(self, path, fam, encoding): """Container for decision specification files. @@ -23,8 +25,7 @@ class DecisionSet(Bcfg2.Server.Plugin.EntrySet): - `encoding`: XML character encoding """ - pattern = '(white|black)list' - Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \ + Bcfg2.Server.Plugin.EntrySet.__init__(self, '(white|black)list', path, DecisionFile, encoding) try: fam.AddMonitor(path, self) diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py index 9b848baae..d3a1ee871 100644 --- a/src/lib/Bcfg2/Server/Plugins/Deps.py +++ b/src/lib/Bcfg2/Server/Plugins/Deps.py @@ -7,27 +7,10 @@ import Bcfg2.Server.Plugin class DNode(Bcfg2.Server.Plugin.INode): """DNode provides supports for single predicate types for dependencies.""" - raw = {'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"} - containers = ['Group'] - - def __init__(self, data, idict, parent=None): - self.data = data - self.contents = {} - if parent == None: - self.predicate = lambda x, d: True - else: - predicate = parent.predicate - if data.tag in list(self.raw.keys()): - self.predicate = eval(self.raw[data.tag] % - {'name': data.get('name')}, - {'predicate': predicate}) - else: - raise Exception - mytype = self.__class__ - self.children = [] + def _load_children(self, data, idict): for item in data.getchildren(): if item.tag in self.containers: - self.children.append(mytype(item, idict, self)) + self.children.append(self.__class__(item, idict, self)) else: data = [(child.tag, child.get('name')) for child in item.getchildren()] diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py index 5beec7be0..632d586e8 100644 --- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py +++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py @@ -7,23 +7,24 @@ the client """ import os import sys import errno -import binascii import lxml.etree import Bcfg2.Options +import Bcfg2.Server import Bcfg2.Server.Plugin +from Bcfg2.Bcfg2Py3k import b64decode probecode = """#!/usr/bin/env python import os import pwd import grp -import binascii import lxml.etree +from Bcfg2.Bcfg2Py3k import b64encode path = "%s" if not os.path.exists(path): - print "%%s does not exist" %% path + print("%%s does not exist" %% path) raise SystemExit(1) stat = os.stat(path) @@ -32,18 +33,10 @@ data = lxml.etree.Element("ProbedFileData", owner=pwd.getpwuid(stat[4])[0], group=grp.getgrgid(stat[5])[0], perms=oct(stat[0] & 07777)) -data.text = binascii.b2a_base64(open(path).read()) -print lxml.etree.tostring(data) +data.text = b64encode(open(path).read()) +print(lxml.etree.tostring(data, xml_declaration=False).decode('UTF-8')) """ -class FileProbesConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked, - Bcfg2.Server.Plugin.StructFile): - """ Config file handler for FileProbes """ - def __init__(self, filename, fam): - Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam) - Bcfg2.Server.Plugin.StructFile.__init__(self, filename) - - class FileProbes(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Probing): """ This module allows you to probe a client for a file, which is then @@ -53,14 +46,15 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, the client """ name = 'FileProbes' - experimental = True __author__ = 'chris.a.st.pierre@gmail.com' def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Probing.__init__(self) - self.config = FileProbesConfig(os.path.join(self.data, 'config.xml'), - core.fam) + self.config = Bcfg2.Server.Plugin.StructFile(os.path.join(self.data, + 'config.xml'), + fam=core.fam, + should_monitor=True) self.entries = dict() self.probes = dict() @@ -75,13 +69,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, # do not probe for files that are already in Cfg and # for which update is false; we can't possibly do # anything with the data we get from such a probe - try: - if (entry.get('update', 'false').lower() == "false" and - cfg.entries[path].get_pertinent_entries(entry, - metadata)): - continue - except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError): - pass + if (entry.get('update', 'false').lower() == "false" and + not cfg.has_generator(entry, metadata)): + continue self.entries[metadata.hostname][path] = entry probe = lxml.etree.Element('probe', name=path, source=self.name, @@ -102,7 +92,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, (data.get('name'), metadata.hostname)) else: try: - self.write_data(lxml.etree.XML(data.text), metadata) + self.write_data(lxml.etree.XML(data.text, + parser=Bcfg2.Server.XMLParser), + metadata) except lxml.etree.XMLSyntaxError: # if we didn't get XML back from the probe, assume # it's an error message @@ -111,23 +103,24 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, def write_data(self, data, metadata): """Write the probed file data to the bcfg2 specification.""" filename = data.get("name") - contents = binascii.a2b_base64(data.text) + contents = b64decode(data.text) entry = self.entries[metadata.hostname][filename] cfg = self.core.plugins['Cfg'] specific = "%s.H_%s" % (os.path.basename(filename), metadata.hostname) # we can't use os.path.join() for this because specific # already has a leading /, which confuses os.path.join() - fileloc = "%s%s" % (cfg.data, os.path.join(filename, specific)) + fileloc = os.path.join(cfg.data, + os.path.join(filename, specific).lstrip("/")) create = False try: cfg.entries[filename].bind_entry(entry, metadata) - except Bcfg2.Server.Plugin.PluginExecutionError: + except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError): create = True # get current entry data if entry.text and entry.get("encoding") == "base64": - entrydata = binascii.a2b_base64(entry.text) + entrydata = b64decode(entry.text) else: entrydata = entry.text @@ -135,7 +128,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, self.logger.info("Writing new probed file %s" % fileloc) self.write_file(fileloc, contents) self.verify_file(filename, contents, metadata) - infoxml = os.path.join("%s%s" % (cfg.data, filename), "info.xml") + infoxml = os.path.join(cfg.data, filename.lstrip("/"), "info.xml") self.write_infoxml(infoxml, entry, data) elif entrydata == contents: self.debug_log("Existing %s contents match probed contents" % @@ -194,7 +187,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, # get current entry data if entry.get("encoding") == "base64": - entrydata = binascii.a2b_base64(entry.text) + entrydata = b64decode(entry.text) else: entrydata = entry.text if entrydata == contents: @@ -206,8 +199,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, if os.path.exists(infoxml): return - self.logger.info("Writing info.xml at %s for %s" % - (infoxml, data.get("name"))) + self.logger.info("Writing %s for %s" % (infoxml, data.get("name"))) info = \ lxml.etree.Element("Info", owner=data.get("owner", @@ -222,8 +214,10 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, root = lxml.etree.Element("FileInfo") root.append(info) try: - open(infoxml, "w").write(lxml.etree.tostring(root, - pretty_print=True)) + open(infoxml, + "w").write(lxml.etree.tostring(root, + xml_declaration=False, + pretty_print=True).decode('UTF-8')) except IOError: err = sys.exc_info()[1] self.logger.error("Could not write %s: %s" % (fileloc, err)) diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py index 58b4d4afb..837f47279 100644 --- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py +++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py @@ -1,6 +1,9 @@ +import os import re +import sys import logging import lxml.etree +import Bcfg2.Server.Lint import Bcfg2.Server.Plugin class PackedDigitRange(object): @@ -58,7 +61,7 @@ class PatternMap(object): return self.groups def process_re(self, name): - match = self.re.match(name) + match = self.re.search(name) if not match: return None ret = list() @@ -70,17 +73,22 @@ class PatternMap(object): ret.append(newg) return ret + def __str__(self): + return "%s: %s %s" % (self.__class__.__name__, self.pattern, + self.groups) + -class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked): +class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked): __identifier__ = None - def __init__(self, filename, fam): - Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam) + def __init__(self, filename, fam=None): + Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam, + should_monitor=True) self.patterns = [] self.logger = logging.getLogger(self.__class__.__name__) def Index(self): - Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self) + Bcfg2.Server.Plugin.XMLFileBacked.Index(self) self.patterns = [] for entry in self.xdata.xpath('//GroupPattern'): try: @@ -112,13 +120,42 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked): class GroupPatterns(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector): name = "GroupPatterns" - experimental = True def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Connector.__init__(self) - self.config = PatternFile(self.data + '/config.xml', - core.fam) + self.config = PatternFile(os.path.join(self.data, 'config.xml'), + fam=core.fam) def get_additional_groups(self, metadata): return self.config.process_patterns(metadata.hostname) + + +class GroupPatternsLint(Bcfg2.Server.Lint.ServerPlugin): + def Run(self): + """ run plugin """ + cfg = self.core.plugins['GroupPatterns'].config + for entry in cfg.xdata.xpath('//GroupPattern'): + groups = [g.text for g in entry.findall('Group')] + self.check(entry, groups, ptype='NamePattern') + self.check(entry, groups, ptype='NameRange') + + @classmethod + def Errors(cls): + return {"pattern-fails-to-initialize":"error"} + + def check(self, entry, groups, ptype="NamePattern"): + if ptype == "NamePattern": + pmap = lambda p: PatternMap(p, None, groups) + else: + pmap = lambda p: PatternMap(None, p, groups) + + for el in entry.findall(ptype): + pat = el.text + try: + pmap(pat) + except: + err = sys.exc_info()[1] + self.LintError("pattern-fails-to-initialize", + "Failed to initialize %s %s for %s: %s" % + (ptype, pat, entry.get('pattern'), err)) diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py index e9c1c1cff..69b019160 100644 --- a/src/lib/Bcfg2/Server/Plugins/Hostbase.py +++ b/src/lib/Bcfg2/Server/Plugins/Hostbase.py @@ -3,19 +3,24 @@ This file provides the Hostbase plugin. It manages dns/dhcp/nis host information """ +from lxml.etree import Element, SubElement import os +import re +from time import strftime os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings' -from lxml.etree import Element, SubElement import Bcfg2.Server.Plugin from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError -from time import strftime -from sets import Set from django.template import Context, loader from django.db import connection -import re # Compatibility imports from Bcfg2.Bcfg2Py3k import StringIO +try: + set +except NameError: + # deprecated since python 2.6 + from sets import Set as set + class Hostbase(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Structure, @@ -383,7 +388,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin, """) hostbase = cursor.fetchall() domains = [host[0].split(".", 1)[1] for host in hostbase] - domains_set = Set(domains) + domains_set = set(domains) domain_data = [(domain, domains.count(domain)) for domain in domains_set] domain_data.sort() @@ -393,7 +398,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin, ips = cursor.fetchall() three_octets = [ip[0].rstrip('0123456789').rstrip('.') \ for ip in ips] - three_octets_set = Set(three_octets) + three_octets_set = set(three_octets) three_octets_data = [(octet, three_octets.count(octet)) \ for octet in three_octets_set] three_octets_data.sort() @@ -412,7 +417,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin, append_data.append((three_octet, tuple(tosort))) two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets] - two_octets_set = Set(two_octets) + two_octets_set = set(two_octets) two_octets_data = [(octet, two_octets.count(octet)) for octet in two_octets_set] two_octets_data.sort() @@ -446,7 +451,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin, else: if appenddata[0] == ip[0]: simple = False - ips.append((appenddata[2], appenddata[0], Set(namelist), + ips.append((appenddata[2], appenddata[0], set(namelist), cnamelist, simple, appenddata[1])) appenddata = ip simple = True @@ -455,7 +460,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin, if ip[4]: cnamelist.append(ip[4].split('.', 1)[0]) simple = False - ips.append((appenddata[2], appenddata[0], Set(namelist), + ips.append((appenddata[2], appenddata[0], set(namelist), cnamelist, simple, appenddata[1])) context = Context({ 'subnet': subnet[0], diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py index 29abf5b13..9883085db 100644 --- a/src/lib/Bcfg2/Server/Plugins/Ldap.py +++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py @@ -139,7 +139,7 @@ class LdapConnection(object): result = self.conn.search_s( query.base, SCOPE_MAP[query.scope], - query.filter, + query.filter.replace("\\", "\\\\"), query.attrs, ) break diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py index 970126b80..a5fa78088 100644 --- a/src/lib/Bcfg2/Server/Plugins/Metadata.py +++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py @@ -2,17 +2,39 @@ This file stores persistent metadata for the Bcfg2 Configuration Repository. """ -import copy -import fcntl -import lxml.etree +import re import os -import os.path -import socket import sys import time - -import Bcfg2.Server.FileMonitor +import copy +import fcntl +import socket +import lxml.etree +import Bcfg2.Server +import Bcfg2.Server.Lint import Bcfg2.Server.Plugin +import Bcfg2.Server.FileMonitor +from Bcfg2.Bcfg2Py3k import MutableMapping +from Bcfg2.version import Bcfg2VersionInfo + +try: + from django.db import models + has_django = True +except ImportError: + has_django = False + + +try: + all +except NameError: + # some of the crazy lexical closure stuff below works with all() + # but not with this loop inline. i really don't understand + # lexical closures some^Wmost days + def all(iterable): + for element in iterable: + if not element: + return False + return True def locked(fd): @@ -24,28 +46,68 @@ def locked(fd): return False -class MetadataConsistencyError(Exception): - """This error gets raised when metadata is internally inconsistent.""" - pass +if has_django: + class MetadataClientModel(models.Model, + Bcfg2.Server.Plugin.PluginDatabaseModel): + hostname = models.CharField(max_length=255, primary_key=True) + version = models.CharField(max_length=31, null=True) + class ClientVersions(MutableMapping): + def __getitem__(self, key): + try: + return MetadataClientModel.objects.get(hostname=key).version + except MetadataClientModel.DoesNotExist: + raise KeyError(key) + + def __setitem__(self, key, value): + client = MetadataClientModel.objects.get_or_create(hostname=key)[0] + client.version = value + client.save() + + def __delitem__(self, key): + # UserDict didn't require __delitem__, but MutableMapping + # does. we don't want deleting a client version record to + # delete the client, so we just set the version to None, + # which is kinda like deleting it, but not really. + try: + client = MetadataClientModel.objects.get(hostname=key) + except MetadataClientModel.DoesNotExist: + raise KeyError(key) + client.version = None + client.save() -class MetadataRuntimeError(Exception): - """This error is raised when the metadata engine - is called prior to reading enough data. - """ - pass + def __len__(self): + return MetadataClientModel.objects.count() + def __iter__(self): + for client in MetadataClientModel.objects.all(): + yield client.hostname -class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked): + def keys(self): + return [c.hostname for c in MetadataClientModel.objects.all()] + + def __contains__(self, key): + try: + client = MetadataClientModel.objects.get(hostname=key) + return True + except MetadataClientModel.DoesNotExist: + return False + + +class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked): """Handles xml config files and all XInclude statements""" def __init__(self, metadata, watch_clients, basefile): - Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, - os.path.join(metadata.data, - basefile), - metadata.core.fam) + # we tell XMLFileBacked _not_ to add a monitor for this file, + # because the main Metadata plugin has already added one. + # then we immediately set should_monitor to the proper value, + # so that XInclude'd files get properly watched + fpath = os.path.join(metadata.data, basefile) + Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath, + fam=metadata.core.fam, + should_monitor=False) + self.should_monitor = watch_clients self.metadata = metadata self.basefile = basefile - self.should_monitor = watch_clients self.data = None self.basedata = None self.basedir = metadata.data @@ -56,25 +118,22 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked): @property def xdata(self): if not self.data: - raise MetadataRuntimeError + raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" % + self.basefile) return self.data @property def base_xdata(self): if not self.basedata: - raise MetadataRuntimeError + raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" % + self.basefile) return self.basedata - def add_monitor(self, fpath, fname): - """Add a fam monitor for an included file""" - if self.should_monitor: - self.metadata.core.fam.AddMonitor(fpath, self.metadata) - self.extras.append(fname) - def load_xml(self): """Load changes from XML""" try: - xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile)) + xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile), + parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: self.logger.error('Failed to parse %s' % self.basefile) return @@ -100,12 +159,14 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked): try: datafile = open(tmpfile, 'w') except IOError: - e = sys.exc_info()[1] - self.logger.error("Failed to write %s: %s" % (tmpfile, e)) - raise MetadataRuntimeError + msg = "Failed to write %s: %s" % (tmpfile, sys.exc_info()[1]) + self.logger.error(msg) + raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg) # prep data dataroot = xmltree.getroot() - newcontents = lxml.etree.tostring(dataroot, pretty_print=True) + newcontents = lxml.etree.tostring(dataroot, xml_declaration=False, + pretty_print=True).decode('UTF-8') + fd = datafile.fileno() while locked(fd) == True: @@ -114,21 +175,24 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked): datafile.write(newcontents) except: fcntl.lockf(fd, fcntl.LOCK_UN) - self.logger.error("Metadata: Failed to write new xml data to %s" % - tmpfile, exc_info=1) + msg = "Metadata: Failed to write new xml data to %s: %s" % \ + (tmpfile, sys.exc_info()[1]) + self.logger.error(msg, exc_info=1) os.unlink(tmpfile) - raise MetadataRuntimeError + raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg) datafile.close() - # check if clients.xml is a symlink if os.path.islink(fname): fname = os.readlink(fname) try: os.rename(tmpfile, fname) + except: - self.logger.error("Metadata: Failed to rename %s" % tmpfile) - raise MetadataRuntimeError + msg = "Metadata: Failed to rename %s: %s" % (tmpfile, + sys.exc_info()[1]) + self.logger.error(msg) + raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg) def find_xml_for_xpath(self, xpath): """Find and load xml file containing the xpath query""" @@ -144,22 +208,26 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked): """Try to find the data in included files""" for included in self.extras: try: - xdata = lxml.etree.parse(os.path.join(self.basedir, - included)) + xdata = lxml.etree.parse(included, + parser=Bcfg2.Server.XMLParser) cli = xdata.xpath(xpath) if len(cli) > 0: - return {'filename': os.path.join(self.basedir, - included), + return {'filename': included, 'xmltree': xdata, 'xquery': cli} except lxml.etree.XMLSyntaxError: - self.logger.error('Failed to parse %s' % (included)) + self.logger.error('Failed to parse %s' % included) return {} + def add_monitor(self, fpath): + self.extras.append(fpath) + if self.fam and self.should_monitor: + self.fam.AddMonitor(fpath, self.metadata) + def HandleEvent(self, event): """Handle fam events""" - filename = event.filename.split('/')[-1] - if filename in self.extras: + filename = os.path.basename(event.filename) + if event.filename in self.extras: if event.code2str() == 'exists': return False elif filename != self.basefile: @@ -172,8 +240,8 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked): class ClientMetadata(object): """This object contains client metadata.""" - def __init__(self, client, profile, groups, bundles, - aliases, addresses, categories, uuid, password, query): + def __init__(self, client, profile, groups, bundles, aliases, addresses, + categories, uuid, password, version, query): self.hostname = client self.profile = profile self.bundles = bundles @@ -184,6 +252,11 @@ class ClientMetadata(object): self.uuid = uuid self.password = password self.connectors = [] + self.version = version + try: + self.version_info = Bcfg2VersionInfo(version) + except: + self.version_info = None self.query = query def inGroup(self, group): @@ -198,7 +271,8 @@ class ClientMetadata(object): class MetadataQuery(object): - def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category): + def __init__(self, by_name, get_clients, by_groups, by_profiles, + all_groups, all_groups_in_category): # resolver is set later self.by_name = by_name self.names_by_groups = by_groups @@ -217,74 +291,125 @@ class MetadataQuery(object): return [self.by_name(name) for name in self.all_clients()] -class Metadata(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.Metadata, - Bcfg2.Server.Plugin.Statistics): +class MetadataGroup(tuple): + def __new__(cls, name, bundles=None, category=None, + is_profile=False, is_public=False, is_private=False): + if bundles is None: + bundles = set() + return tuple.__new__(cls, (bundles, category)) + + def __init__(self, name, bundles=None, category=None, + is_profile=False, is_public=False, is_private=False): + if bundles is None: + bundles = set() + tuple.__init__(self) + self.name = name + self.bundles = bundles + self.category = category + self.is_profile = is_profile + self.is_public = is_public + self.is_private = is_private + + def __str__(self): + return repr(self) + + def __repr__(self): + return "%s %s (bundles=%s, category=%s)" % \ + (self.__class__.__name__, self.name, self.bundles, + self.category) + + def __hash__(self): + return hash(self.name) + +class Metadata(Bcfg2.Server.Plugin.Metadata, + Bcfg2.Server.Plugin.Statistics, + Bcfg2.Server.Plugin.DatabaseBacked): """This class contains data for bcfg2 server metadata.""" __author__ = 'bcfg-dev@mcs.anl.gov' name = "Metadata" sort_order = 500 def __init__(self, core, datastore, watch_clients=True): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Metadata.__init__(self) - Bcfg2.Server.Plugin.Statistics.__init__(self) - if watch_clients: - try: - core.fam.AddMonitor(os.path.join(self.data, "groups.xml"), self) - core.fam.AddMonitor(os.path.join(self.data, "clients.xml"), self) - except: - print("Unable to add file monitor for groups.xml or clients.xml") - raise Bcfg2.Server.Plugin.PluginInitError - - self.clients_xml = XMLMetadataConfig(self, watch_clients, 'clients.xml') - self.groups_xml = XMLMetadataConfig(self, watch_clients, 'groups.xml') - self.states = {} - if watch_clients: - self.states = {"groups.xml": False, - "clients.xml": False} - self.addresses = {} + Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore) + Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore) + self.watch_clients = watch_clients + self.states = dict() + self.extra = dict() + self.handlers = [] + self._handle_file("groups.xml") + if (self._use_db and + os.path.exists(os.path.join(self.data, "clients.xml"))): + self.logger.warning("Metadata: database enabled but clients.xml" + "found, parsing in compatibility mode") + self._handle_file("clients.xml") + elif not self._use_db: + self._handle_file("clients.xml") + + # mapping of clientname -> authtype self.auth = dict() - self.clients = {} - self.aliases = {} - self.groups = {} - self.cgroups = {} - self.public = [] - self.private = [] - self.profiles = [] - self.categories = {} - self.bad_clients = {} - self.uuid = {} + # list of clients required to have non-global password self.secure = [] + # list of floating clients self.floating = [] + # mapping of clientname -> password self.passwords = {} + self.addresses = {} + self.raddresses = {} + # mapping of clientname -> [groups] + self.clientgroups = {} + # list of clients + self.clients = [] + self.aliases = {} + self.raliases = {} + # mapping of groupname -> MetadataGroup object + self.groups = {} + # mappings of predicate -> MetadataGroup object + self.group_membership = dict() + self.negated_groups = dict() + # mapping of hostname -> version string + if self._use_db: + self.versions = ClientVersions() + else: + self.versions = dict() + self.uuid = {} self.session_cache = {} self.default = None self.pdirty = False - self.extra = {'groups.xml': [], - 'clients.xml': []} self.password = core.password self.query = MetadataQuery(core.build_metadata, - lambda: list(self.clients.keys()), + lambda: list(self.clients), self.get_client_names_by_groups, self.get_client_names_by_profiles, self.get_all_group_names, self.get_all_groups_in_category) @classmethod - def init_repo(cls, repo, groups, os_selection, clients): - path = os.path.join(repo, cls.name) - os.makedirs(path) - open(os.path.join(repo, "Metadata", "groups.xml"), - "w").write(groups % os_selection) - open(os.path.join(repo, "Metadata", "clients.xml"), - "w").write(clients % socket.getfqdn()) - - def get_groups(self): - '''return groups xml tree''' - groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml")) - root = groups_tree.getroot() - return root + def init_repo(cls, repo, **kwargs): + # must use super here; inheritance works funny with class methods + super(Metadata, cls).init_repo(repo) + + for fname in ["clients.xml", "groups.xml"]: + aname = re.sub(r'[^A-z0-9_]', '_', fname) + if aname in kwargs: + open(os.path.join(repo, cls.name, fname), + "w").write(kwargs[aname]) + + def _handle_file(self, fname): + if self.watch_clients: + try: + self.core.fam.AddMonitor(os.path.join(self.data, fname), self) + except: + err = sys.exc_info()[1] + msg = "Unable to add file monitor for %s: %s" % (fname, err) + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginInitError(msg) + self.states[fname] = False + aname = re.sub(r'[^A-z0-9_]', '_', fname) + xmlcfg = XMLMetadataConfig(self, self.watch_clients, fname) + setattr(self, aname, xmlcfg) + self.handlers.append(xmlcfg.HandleEvent) + self.extra[fname] = [] def _search_xdata(self, tag, name, tree, alias=False): for node in tree.findall("//%s" % tag): @@ -312,7 +437,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, node = self._search_xdata(tag, name, config.xdata, alias=alias) if node != None: self.logger.error("%s \"%s\" already exists" % (tag, name)) - raise MetadataConsistencyError + raise Bcfg2.Server.Plugin.MetadataConsistencyError element = lxml.etree.SubElement(config.base_xdata.getroot(), tag, name=name) if attribs: @@ -322,70 +447,130 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, def add_group(self, group_name, attribs): """Add group to groups.xml.""" - return self._add_xdata(self.groups_xml, "Group", group_name, - attribs=attribs) + if self._use_db: + msg = "Metadata does not support adding groups with use_database enabled" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + else: + return self._add_xdata(self.groups_xml, "Group", group_name, + attribs=attribs) def add_bundle(self, bundle_name): """Add bundle to groups.xml.""" - return self._add_xdata(self.groups_xml, "Bundle", bundle_name) + if self._use_db: + msg = "Metadata does not support adding bundles with use_database enabled" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + else: + return self._add_xdata(self.groups_xml, "Bundle", bundle_name) - def add_client(self, client_name, attribs): + def add_client(self, client_name, attribs=None): """Add client to clients.xml.""" - return self._add_xdata(self.clients_xml, "Client", client_name, - attribs=attribs, alias=True) + if attribs is None: + attribs = dict() + if self._use_db: + client = MetadataClientModel(hostname=client_name) + client.save() + self.clients = self.list_clients() + return client + else: + return self._add_xdata(self.clients_xml, "Client", client_name, + attribs=attribs, alias=True) def _update_xdata(self, config, tag, name, attribs, alias=False): node = self._search_xdata(tag, name, config.xdata, alias=alias) if node == None: self.logger.error("%s \"%s\" does not exist" % (tag, name)) - raise MetadataConsistencyError + raise Bcfg2.Server.Plugin.MetadataConsistencyError xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % (tag, node.get('name'))) if not xdict: self.logger.error("Unexpected error finding %s \"%s\"" % (tag, name)) - raise MetadataConsistencyError + raise Bcfg2.Server.Plugin.MetadataConsistencyError for key, val in list(attribs.items()): xdict['xquery'][0].set(key, val) config.write_xml(xdict['filename'], xdict['xmltree']) def update_group(self, group_name, attribs): """Update a groups attributes.""" - return self._update_xdata(self.groups_xml, "Group", group_name, attribs) + if self._use_db: + msg = "Metadata does not support updating groups with use_database enabled" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + else: + return self._update_xdata(self.groups_xml, "Group", group_name, + attribs) def update_client(self, client_name, attribs): """Update a clients attributes.""" - return self._update_xdata(self.clients_xml, "Client", client_name, - attribs, alias=True) + if self._use_db: + msg = "Metadata does not support updating clients with use_database enabled" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + else: + return self._update_xdata(self.clients_xml, "Client", client_name, + attribs, alias=True) + + def list_clients(self): + """ List all clients in client database """ + if self._use_db: + return set([c.hostname for c in MetadataClientModel.objects.all()]) + else: + return self.clients def _remove_xdata(self, config, tag, name, alias=False): node = self._search_xdata(tag, name, config.xdata) if node == None: self.logger.error("%s \"%s\" does not exist" % (tag, name)) - raise MetadataConsistencyError + raise Bcfg2.Server.Plugin.MetadataConsistencyError xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % (tag, node.get('name'))) if not xdict: self.logger.error("Unexpected error finding %s \"%s\"" % (tag, name)) - raise MetadataConsistencyError + raise Bcfg2.Server.Plugin.MetadataConsistencyError xdict['xquery'][0].getparent().remove(xdict['xquery'][0]) - self.groups_xml.write_xml(xdict['filename'], xdict['xmltree']) + config.write_xml(xdict['filename'], xdict['xmltree']) def remove_group(self, group_name): """Remove a group.""" - return self._remove_xdata(self.groups_xml, "Group", group_name) + if self._use_db: + msg = "Metadata does not support removing groups with use_database enabled" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + else: + return self._remove_xdata(self.groups_xml, "Group", group_name) def remove_bundle(self, bundle_name): """Remove a bundle.""" - return self._remove_xdata(self.groups_xml, "Bundle", bundle_name) + if self._use_db: + msg = "Metadata does not support removing bundles with use_database enabled" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + else: + return self._remove_xdata(self.groups_xml, "Bundle", bundle_name) + + def remove_client(self, client_name): + """Remove a bundle.""" + if self._use_db: + try: + client = MetadataClientModel.objects.get(hostname=client_name) + except MetadataClientModel.DoesNotExist: + msg = "Client %s does not exist" % client_name + self.logger.warning(msg) + raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) + client.delete() + self.clients = self.list_clients() + else: + return self._remove_xdata(self.clients_xml, "Client", client_name) def _handle_clients_xml_event(self, event): xdata = self.clients_xml.xdata - self.clients = {} + self.clients = [] + self.clientgroups = {} self.aliases = {} self.raliases = {} - self.bad_clients = {} self.secure = [] self.floating = [] self.addresses = {} @@ -406,12 +591,15 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, 'cert+password') if 'uuid' in client.attrib: self.uuid[client.get('uuid')] = clname - if client.get('secure', 'false') == 'true': + if client.get('secure', 'false').lower() == 'true': self.secure.append(clname) - if client.get('location', 'fixed') == 'floating': + if (client.get('location', 'fixed') == 'floating' or + client.get('floating', 'false').lower() == 'true'): self.floating.append(clname) if 'password' in client.attrib: self.passwords[clname] = client.get('password') + if 'version' in client.attrib: + self.versions[clname] = client.get('version') self.raliases[clname] = set() for alias in client.findall('Alias'): @@ -426,115 +614,199 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, if clname not in self.raddresses: self.raddresses[clname] = set() self.raddresses[clname].add(alias.get('address')) - self.clients.update({clname: client.get('profile')}) + self.clients.append(clname) + try: + self.clientgroups[clname].append(client.get('profile')) + except KeyError: + self.clientgroups[clname] = [client.get('profile')] self.states['clients.xml'] = True + if self._use_db: + self.clients = self.list_clients() def _handle_groups_xml_event(self, event): - xdata = self.groups_xml.xdata - self.public = [] - self.private = [] - self.profiles = [] self.groups = {} - grouptmp = {} - self.categories = {} - groupseen = list() - for group in xdata.xpath('//Groups/Group'): - if group.get('name') not in groupseen: - groupseen.append(group.get('name')) + + # get_condition and aggregate_conditions must be separate + # functions in order to ensure that the scope is right for the + # closures they return + def get_condition(element): + negate = element.get('negate', 'false').lower() == 'true' + pname = element.get("name") + if element.tag == 'Group': + return lambda c, g, _: negate != (pname in g) + elif element.tag == 'Client': + return lambda c, g, _: negate != (pname == c) + + def aggregate_conditions(conditions): + return lambda client, groups, cats: \ + all(cond(client, groups, cats) for cond in conditions) + + # first, we get a list of all of the groups declared in the + # file. we do this in two stages because the old way of + # parsing groups.xml didn't support nested groups; in the old + # way, only Group tags under a Groups tag counted as + # declarative. so we parse those first, and then parse the + # other Group tags if they haven't already been declared. + # this lets you set options on a group (e.g., public="false") + # at the top level and then just use the name elsewhere, which + # is the original behavior + for grp in self.groups_xml.xdata.xpath("//Groups/Group") + \ + self.groups_xml.xdata.xpath("//Groups/Group//Group"): + if grp.get("name") in self.groups: + continue + self.groups[grp.get("name")] = \ + MetadataGroup(grp.get("name"), + bundles=[b.get("name") + for b in grp.findall("Bundle")], + category=grp.get("category"), + is_profile=grp.get("profile", "false") == "true", + is_public=grp.get("public", "false") == "true", + is_private=grp.get("public", "true") == "false") + if grp.get('default', 'false') == 'true': + self.default = grp.get('name') + + self.group_membership = dict() + self.negated_groups = dict() + self.options = dict() + # confusing loop condition; the XPath query asks for all + # elements under a Group tag under a Groups tag; that is + # infinitely recursive, so "all" elements really means _all_ + # elements. We then manually filter out non-Group elements + # since there doesn't seem to be a way to get Group elements + # of arbitrary depth with particular ultimate ancestors in + # XPath. We do the same thing for Client tags. + for el in self.groups_xml.xdata.xpath("//Groups/Group//*") + \ + self.groups_xml.xdata.xpath("//Groups/Client//*"): + if ((el.tag != 'Group' and el.tag != 'Client') or + el.getchildren()): + continue + + conditions = [] + for parent in el.iterancestors(): + cond = get_condition(parent) + if cond: + conditions.append(cond) + + gname = el.get("name") + if el.get("negate", "false").lower() == "true": + self.negated_groups[aggregate_conditions(conditions)] = \ + self.groups[gname] else: - self.logger.error("Metadata: Group %s defined multiply" % - group.get('name')) - grouptmp[group.get('name')] = \ - ([item.get('name') for item in group.findall('./Bundle')], - [item.get('name') for item in group.findall('./Group')]) - grouptmp[group.get('name')][1].append(group.get('name')) - if group.get('default', 'false') == 'true': - self.default = group.get('name') - if group.get('profile', 'false') == 'true': - self.profiles.append(group.get('name')) - if group.get('public', 'false') == 'true': - self.public.append(group.get('name')) - elif group.get('public', 'true') == 'false': - self.private.append(group.get('name')) - if 'category' in group.attrib: - self.categories[group.get('name')] = group.get('category') - - for group in grouptmp: - # self.groups[group] => (bundles, groups, categories) - self.groups[group] = (set(), set(), {}) - tocheck = [group] - group_cat = self.groups[group][2] - while tocheck: - now = tocheck.pop() - self.groups[group][1].add(now) - if now in grouptmp: - (bundles, groups) = grouptmp[now] - for ggg in groups: - if ggg in self.groups[group][1]: - continue - if (ggg not in self.categories or \ - self.categories[ggg] not in self.groups[group][2]): - self.groups[group][1].add(ggg) - tocheck.append(ggg) - if ggg in self.categories: - group_cat[self.categories[ggg]] = ggg - elif ggg in self.categories: - self.logger.info("Group %s: %s cat-suppressed %s" % \ - (group, - group_cat[self.categories[ggg]], - ggg)) - [self.groups[group][0].add(bund) for bund in bundles] + if self.groups[gname].category and gname in self.groups: + category = self.groups[gname].category + + def in_cat(client, groups, categories): + if category in categories: + # this is debug, not warning, because it + # gets called a _lot_ -- every time a + # group in a category is processed for + # every creation of client metadata. this + # message is produced in two other places, + # so the user should get warned by one of + # those. + self.logger.debug("%s: Group %s suppressed by " + "category %s; %s already a " + "member of %s" % + (self.name, gname, category, + client, categories[category])) + return False + return True + conditions.append(in_cat) + + self.group_membership[aggregate_conditions(conditions)] = \ + self.groups[gname] self.states['groups.xml'] = True def HandleEvent(self, event): """Handle update events for data files.""" - if self.clients_xml.HandleEvent(event): - self._handle_clients_xml_event(event) - elif self.groups_xml.HandleEvent(event): - self._handle_groups_xml_event(event) - - if False not in list(self.states.values()): - # check that all client groups are real and complete - real = list(self.groups.keys()) - for client in list(self.clients.keys()): - if self.clients[client] not in self.profiles: - self.logger.error("Client %s set as nonexistent or " - "incomplete group %s" % - (client, self.clients[client])) - self.logger.error("Removing client mapping for %s" % client) - self.bad_clients[client] = self.clients[client] - del self.clients[client] - for bclient in list(self.bad_clients.keys()): - if self.bad_clients[bclient] in self.profiles: - self.logger.info("Restored profile mapping for client %s" % - bclient) - self.clients[bclient] = self.bad_clients[bclient] - del self.bad_clients[bclient] - - def set_profile(self, client, profile, addresspair): + for hdlr in self.handlers: + aname = re.sub(r'[^A-z0-9_]', '_', os.path.basename(event.filename)) + if hdlr(event): + try: + proc = getattr(self, "_handle_%s_event" % aname) + except AttributeError: + proc = self._handle_default_event + proc(event) + + if False not in list(self.states.values()) and self.debug_flag: + # check that all groups are real and complete. this is + # just logged at a debug level because many groups might + # be probed, and we don't want to warn about them. + for client, groups in list(self.clientgroups.items()): + for group in groups: + if group not in self.groups: + self.debug_log("Client %s set as nonexistent group %s" % + (client, group)) + for gname, ginfo in list(self.groups.items()): + for group in ginfo.groups: + if group not in self.groups: + self.debug_log("Group %s set as nonexistent group %s" % + (gname, group)) + + + def set_profile(self, client, profile, addresspair, force=False): """Set group parameter for provided client.""" - self.logger.info("Asserting client %s profile to %s" % (client, profile)) + self.logger.info("Asserting client %s profile to %s" % + (client, profile)) if False in list(self.states.values()): - raise MetadataRuntimeError - if profile not in self.public: - self.logger.error("Failed to set client %s to private group %s" % - (client, profile)) - raise MetadataConsistencyError + raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet") + if not force and profile not in self.groups: + msg = "Profile group %s does not exist" % profile + self.logger.error(msg) + raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) + group = self.groups[profile] + if not force and not group.is_public: + msg = "Cannot set client %s to private group %s" % (client, profile) + self.logger.error(msg) + raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) + if client in self.clients: - self.logger.info("Changing %s group from %s to %s" % - (client, self.clients[client], profile)) + if self._use_db: + msg = "DBMetadata does not support asserting client profiles" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + + profiles = [g for g in self.clientgroups[client] + if g in self.groups and self.groups[g].is_profile] + self.logger.info("Changing %s profile from %s to %s" % + (client, profiles, profile)) self.update_client(client, dict(profile=profile)) + if client in self.clientgroups: + for p in profiles: + self.clientgroups[client].remove(p) + self.clientgroups[client].append(profile) + else: + self.clientgroups[client] = [profile] else: self.logger.info("Creating new client: %s, profile %s" % (client, profile)) - if addresspair in self.session_cache: - # we are working with a uuid'd client - self.add_client(self.session_cache[addresspair][1], - dict(uuid=client, profile=profile, - address=addresspair[0])) + if self._use_db: + self.add_client(client) else: - self.add_client(client, dict(profile=profile)) - self.clients[client] = profile + if addresspair in self.session_cache: + # we are working with a uuid'd client + self.add_client(self.session_cache[addresspair][1], + dict(uuid=client, profile=profile, + address=addresspair[0])) + else: + self.add_client(client, dict(profile=profile)) + self.clients.append(client) + self.clientgroups[client] = [profile] + if not self._use_db: + self.clients_xml.write() + + def set_version(self, client, version): + """Set group parameter for provided client.""" + self.logger.info("Setting client %s version to %s" % (client, version)) + if client in self.clients: + self.logger.info("Setting version on client %s to %s" % + (client, version)) + self.update_client(client, dict(version=version)) + else: + msg = "Cannot set version on non-existent client %s" % client + self.logger.error(msg) + raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) + self.versions[client] = version self.clients_xml.write() def resolve_client(self, addresspair, cleanup_cache=False): @@ -549,7 +821,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, # _any_ port numbers - perhaps a priority queue could # be faster? curtime = time.time() - for addrpair in self.session_cache.keys(): + for addrpair in list(self.session_cache.keys()): if addresspair[0] == addrpair[0]: (stamp, _) = self.session_cache[addrpair] if curtime - stamp > cache_ttl: @@ -565,9 +837,9 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, address = addresspair[0] if address in self.addresses: if len(self.addresses[address]) != 1: - self.logger.error("Address %s has multiple reverse assignments; " - "a uuid must be used" % (address)) - raise MetadataConsistencyError + err = "Address %s has multiple reverse assignments; a uuid must be used" % address + self.logger.error(err) + raise Bcfg2.Server.Plugin.MetadataConsistencyError(err) return self.addresses[address][0] try: cname = socket.gethostbyaddr(address)[0].lower() @@ -575,34 +847,102 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, return self.aliases[cname] return cname except socket.herror: - warning = "address resolution error for %s" % (address) + warning = "address resolution error for %s" % address self.logger.warning(warning) - raise MetadataConsistencyError + raise Bcfg2.Server.Plugin.MetadataConsistencyError(warning) + + def _merge_groups(self, client, groups, categories=None): + """ set group membership based on the contents of groups.xml + and initial group membership of this client. Returns a tuple + of (allgroups, categories)""" + numgroups = -1 # force one initial pass + if categories is None: + categories = dict() + while numgroups != len(groups): + numgroups = len(groups) + for predicate, group in self.group_membership.items(): + if group.name in groups: + continue + if predicate(client, groups, categories): + groups.add(group.name) + if group.category: + categories[group.category] = group.name + for predicate, group in self.negated_groups.items(): + if group.name not in groups: + continue + if predicate(client, groups, categories): + groups.remove(group.name) + if group.category: + del categories[group.category] + return (groups, categories) def get_initial_metadata(self, client): """Return the metadata for a given client.""" if False in list(self.states.values()): - raise MetadataRuntimeError + raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet") client = client.lower() if client in self.aliases: client = self.aliases[client] - if client in self.clients: - profile = self.clients[client] - (bundles, groups, categories) = self.groups[profile] - else: - if self.default == None: - self.logger.error("Cannot set group for client %s; " - "no default group set" % client) - raise MetadataConsistencyError - self.set_profile(client, self.default, (None, None)) - profile = self.default - [bundles, groups, categories] = self.groups[self.default] + + groups = set() + categories = dict() + profile = None + + if client not in self.clients: + pgroup = None + if client in self.clientgroups: + pgroup = self.clientgroups[client][0] + elif self.default: + pgroup = self.default + + if pgroup: + self.set_profile(client, pgroup, (None, None), force=True) + groups.add(pgroup) + category = self.groups[pgroup].category + if category: + categories[category] = pgroup + if (pgroup in self.groups and self.groups[pgroup].is_profile): + profile = pgroup + else: + msg = "Cannot add new client %s; no default group set" % client + self.logger.error(msg) + raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) + + if client in self.clientgroups: + for cgroup in self.clientgroups[client]: + if cgroup in groups: + continue + if cgroup not in self.groups: + self.groups[cgroup] = MetadataGroup(cgroup) + category = self.groups[cgroup].category + if category and category in categories: + self.logger.warning("%s: Group %s suppressed by " + "category %s; %s already a member " + "of %s" % + (self.name, cgroup, category, + client, categories[category])) + continue + if category: + categories[category] = cgroup + groups.add(cgroup) + # favor client groups for setting profile + if not profile and self.groups[cgroup].is_profile: + profile = cgroup + + groups, categories = self._merge_groups(client, groups, + categories=categories) + + bundles = set() + for group in groups: + try: + bundles.update(self.groups[group].bundles) + except KeyError: + self.logger.warning("%s: %s is a member of undefined group %s" % + (self.name, client, group)) + aliases = self.raliases.get(client, set()) addresses = self.raddresses.get(client, set()) - newgroups = set(groups) - newbundles = set(bundles) - newcategories = {} - newcategories.update(categories) + version = self.versions.get(client, None) if client in self.passwords: password = self.passwords[client] else: @@ -613,61 +953,70 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, uuid = uuids[0] else: uuid = None - for group in self.cgroups.get(client, []): - if group in self.groups: - nbundles, ngroups, ncategories = self.groups[group] - else: - nbundles, ngroups, ncategories = ([], [group], {}) - [newbundles.add(b) for b in nbundles if b not in newbundles] - [newgroups.add(g) for g in ngroups if g not in newgroups] - newcategories.update(ncategories) - return ClientMetadata(client, profile, newgroups, newbundles, aliases, - addresses, newcategories, uuid, password, + if not profile: + # one last ditch attempt at setting the profile + profiles = [g for g in groups + if g in self.groups and self.groups[g].is_profile] + if len(profiles) >= 1: + profile = profiles[0] + + return ClientMetadata(client, profile, groups, bundles, aliases, + addresses, categories, uuid, password, version, self.query) def get_all_group_names(self): all_groups = set() - [all_groups.update(g[1]) for g in list(self.groups.values())] + all_groups.update(self.groups.keys()) + all_groups.update([g.name for g in self.group_membership.values()]) + all_groups.update([g.name for g in self.negated_groups.values()]) + for grp in self.clientgroups.values(): + all_groups.update(grp) return all_groups def get_all_groups_in_category(self, category): - all_groups = set() - [all_groups.add(g) for g in self.categories \ - if self.categories[g] == category] - return all_groups + return set([g.name for g in self.groups.values() + if g.category == category]) def get_client_names_by_profiles(self, profiles): - return [client for client, profile in list(self.clients.items()) \ - if profile in profiles] + rv = [] + for client in list(self.clients): + mdata = self.get_initial_metadata(client) + if mdata.profile in profiles: + rv.append(client) + return rv def get_client_names_by_groups(self, groups): mdata = [self.core.build_metadata(client) - for client in list(self.clients.keys())] + for client in list(self.clients)] return [md.hostname for md in mdata if md.groups.issuperset(groups)] + def get_client_names_by_bundles(self, bundles): + mdata = [self.core.build_metadata(client) + for client in list(self.clients.keys())] + return [md.hostname for md in mdata if md.bundles.issuperset(bundles)] + def merge_additional_groups(self, imd, groups): for group in groups: - if (group in self.categories and - self.categories[group] in imd.categories): + if group in imd.groups: continue - newbundles, newgroups, _ = self.groups.get(group, - (list(), - [group], - dict())) - for newbundle in newbundles: - if newbundle not in imd.bundles: - imd.bundles.add(newbundle) - for newgroup in newgroups: - if newgroup not in imd.groups: - if (newgroup in self.categories and - self.categories[newgroup] in imd.categories): - continue - if newgroup in self.private: - self.logger.error("Refusing to add dynamic membership " - "in private group %s for client %s" % - (newgroup, imd.hostname)) - continue - imd.groups.add(newgroup) + if group in self.groups and self.groups[group].category: + category = self.groups[group].category + if self.groups[group].category in imd.categories: + self.logger.warning("%s: Group %s suppressed by category " + "%s; %s already a member of %s" % + (self.name, group, category, + imd.hostname, + imd.categories[category])) + continue + imd.categories[group] = category + imd.groups.add(group) + + self._merge_groups(imd.hostname, imd.groups, + categories=imd.categories) + + for group in imd.groups: + if group in self.groups: + imd.bundles.update(self.groups[group].bundles) def merge_additional_data(self, imd, source, data): if not hasattr(imd, source): @@ -686,8 +1035,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, (client, address)) return True else: - self.logger.error("Got request for non-float client %s from %s" % - (client, address)) + self.logger.error("Got request for non-float client %s from %s" + % (client, address)) return False resolved = self.resolve_client(addresspair) if resolved.lower() == client.lower(): @@ -711,9 +1060,10 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, id_method = 'address' try: client = self.resolve_client(address) - except MetadataConsistencyError: - self.logger.error("Client %s failed to resolve; metadata problem" - % address[0]) + except Bcfg2.Server.Plugin.MetadataConsistencyError: + err = sys.exc_info()[1] + self.logger.error("Client %s failed to resolve: %s" % + (address[0], err)) return False else: id_method = 'uuid' @@ -768,7 +1118,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, "secure mode" % address[0]) return False # populate the session cache - if user.decode('utf-8') != 'root': + if user != 'root': self.session_cache[address] = (time.time(), client) return True @@ -792,7 +1142,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, def include_group(group): return not only_client or group in clientmeta.groups - groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml")) + groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"), + parser=Bcfg2.Server.XMLParser) try: groups_tree.xinclude() except lxml.etree.XIncludeError: @@ -810,20 +1161,26 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, del categories[None] if hosts: instances = {} - clients = self.clients - for client, profile in list(clients.items()): + for client in list(self.clients): if include_client(client): continue - if profile in instances: - instances[profile].append(client) + if client in self.clientgroups: + groups = self.clientgroups[client] + elif self.default: + groups = [self.default] else: - instances[profile] = [client] - for profile, clist in list(instances.items()): + continue + for group in groups: + try: + instances[group].append(client) + except KeyError: + instances[group] = [client] + for group, clist in list(instances.items()): clist.sort() viz_str.append('"%s-instances" [ label="%s", shape="record" ];' % - (profile, '|'.join(clist))) + (group, '|'.join(clist))) viz_str.append('"%s-instances" -> "group-%s";' % - (profile, profile)) + (group, group)) if bundles: bundles = [] [bundles.append(bund.get('name')) \ @@ -864,3 +1221,35 @@ class Metadata(Bcfg2.Server.Plugin.Plugin, viz_str.append('"%s" [label="%s", shape="record", style="filled", fillcolor="%s"];' % (category, category, categories[category])) return "\n".join("\t" + s for s in viz_str) + + +class MetadataLint(Bcfg2.Server.Lint.ServerPlugin): + def Run(self): + self.nested_clients() + self.deprecated_options() + + @classmethod + def Errors(cls): + return {"nested-client-tags": "warning", + "deprecated-clients-options": "warning"} + + def deprecated_options(self): + clientdata = self.metadata.clients_xml.xdata + for el in groupdata.xpath("//Client"): + loc = el.get("location") + if loc: + if loc == "floating": + floating = True + else: + floating = False + self.LintError("deprecated-clients-options", + "The location='%s' option is deprecated. " + "Please use floating='%s' instead: %s" % + (loc, floating, self.RenderXML(el))) + + def nested_clients(self): + groupdata = self.metadata.groups_xml.xdata + for el in groupdata.xpath("//Client//Client"): + self.LintError("nested-client-tags", + "Client %s nested within Client tag: %s" % + (el.get("name"), self.RenderXML(el))) diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py index 4dbd57d16..f2b8336e0 100644 --- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py +++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py @@ -7,18 +7,23 @@ import glob import socket import logging import lxml.etree - +import Bcfg2.Server import Bcfg2.Server.Plugin LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen') line_fmt = '\t%-32s %s' -class NagiosGenConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked, - Bcfg2.Server.Plugin.StructFile): +class NagiosGenConfig(Bcfg2.Server.Plugin.StructFile): def __init__(self, filename, fam): - Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam) - Bcfg2.Server.Plugin.StructFile.__init__(self, filename) + # create config.xml if missing + if not os.path.exists(filename): + LOGGER.warning("NagiosGen: %s missing. " + "Creating empty one for you." % filename) + open(filename, "w").write("<NagiosGen></NagiosGen>") + + Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam, + should_monitor=True) class NagiosGen(Bcfg2.Server.Plugin.Plugin, @@ -51,7 +56,12 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin, def createhostconfig(self, entry, metadata): """Build host specific configuration file.""" - host_address = socket.gethostbyname(metadata.hostname) + try: + host_address = socket.gethostbyname(metadata.hostname) + except socket.gaierror: + LOGGER.error("Failed to find IP address for %s" % + metadata.hostname) + raise Bcfg2.Server.Plugin.PluginExecutionError host_groups = [grp for grp in metadata.groups if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))] host_config = ['define host {', @@ -84,7 +94,8 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin, LOGGER.warn("Parsing deprecated NagiosGen/parents.xml. " "Update to the new-style config with " "nagiosgen-convert.py.") - parents = lxml.etree.parse(pfile) + parents = lxml.etree.parse(pfile, + parser=Bcfg2.Server.XMLParser) for el in parents.xpath("//Depend[@name='%s']" % metadata.hostname): if 'parent' in xtra: xtra['parent'] += "," + el.get("on") diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py index 5fff20d98..20f9ba877 100644 --- a/src/lib/Bcfg2/Server/Plugins/Ohai.py +++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py @@ -41,7 +41,7 @@ class OhaiCache(object): # simply return if the client returned nothing return self.cache[item] = json.loads(value) - file("%s/%s.json" % (self.dirname, item), 'w').write(value) + open("%s/%s.json" % (self.dirname, item), 'w').write(value) def __getitem__(self, item): if item not in self.cache: diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py index 49e9d417b..685cd5c1d 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py @@ -2,13 +2,15 @@ import re import gzip from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import Source -from Bcfg2.Bcfg2Py3k import cPickle, file +from Bcfg2.Bcfg2Py3k import cPickle class AptCollection(Collection): def get_group(self, group): - self.logger.warning("Packages: Package groups are not supported by APT") + self.logger.warning("Packages: Package groups are not " + "supported by APT") return [] + class AptSource(Source): basegroups = ['apt', 'debian', 'ubuntu', 'nexenta'] ptype = 'deb' @@ -22,14 +24,15 @@ class AptSource(Source): 'components': self.components, 'arches': self.arches}] def save_state(self): - cache = file(self.cachefile, 'wb') - cPickle.dump((self.pkgnames, self.deps, self.provides), - cache, 2) + cache = open(self.cachefile, 'wb') + cPickle.dump((self.pkgnames, self.deps, self.provides, + self.essentialpkgs), cache, 2) cache.close() def load_state(self): - data = file(self.cachefile) - self.pkgnames, self.deps, self.provides = cPickle.load(data) + data = open(self.cachefile) + (self.pkgnames, self.deps, self.provides, + self.essentialpkgs) = cPickle.load(data) def filter_unknown(self, unknown): filtered = set([u for u in unknown if u.startswith('choice')]) diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py index 3ea14ce75..b05a69d4a 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py @@ -1,6 +1,7 @@ import sys import copy import logging +import lxml import Bcfg2.Server.Plugin logger = logging.getLogger(__name__) @@ -52,13 +53,40 @@ class Collection(Bcfg2.Server.Plugin.Debuggable): @property def cachekey(self): - return md5(self.get_config()).hexdigest() + return md5(self.sourcelist()).hexdigest() def get_config(self): - self.logger.error("Packages: Cannot generate config for host with " - "multiple source types (%s)" % self.metadata.hostname) + self.logger.error("Packages: Cannot generate config for host %s with " + "no sources or multiple source types" % + self.metadata.hostname) return "" + def sourcelist(self): + srcs = [] + for source in self.sources: + # get_urls() loads url_map as a side-effect + source.get_urls() + for url_map in source.url_map: + if url_map['arch'] not in metadata.groups: + continue + reponame = source.get_repo_name(url_map) + srcs.append("Name: %s" % reponame) + srcs.append(" Type: %s" % source.ptype) + if url_map['url']: + srcs.append(" URL: %s" % url_map['url']) + elif url_map['rawurl']: + srcs.append(" RAWURL: %s" % url_map['rawurl']) + if source.gpgkeys: + srcs.append(" GPG Key(s): %s" % ", ".join(source.gpgkeys)) + else: + srcs.append(" GPG Key(s): None") + if len(source.blacklist): + srcs.append(" Blacklist: %s" % ", ".join(source.blacklist)) + if len(source.whitelist): + srcs.append(" Whitelist: %s" % ", ".join(source.whitelist)) + srcs.append("") + return "\n".join(srcs) + def get_relevant_groups(self): groups = [] for source in self.sources: @@ -79,6 +107,14 @@ class Collection(Bcfg2.Server.Plugin.Debuggable): cachefiles.add(source.cachefile) return list(cachefiles) + def get_groups(self, grouplist): + """ provided since some backends may be able to query multiple + groups at once faster than serially """ + rv = dict() + for group, ptype in grouplist: + rv[group] = self.get_group(group, ptype) + return rv + def get_group(self, group, ptype=None): for source in self.sources: pkgs = source.get_group(self.metadata, group, ptype=ptype) @@ -152,6 +188,28 @@ class Collection(Bcfg2.Server.Plugin.Debuggable): """ do any collection-level data setup tasks """ pass + def packages_from_entry(self, entry): + """ given a Package or BoundPackage entry, get a list of the + package(s) described by it in a format appropriate for passing + to complete(). by default, that's just the name; only the Yum + backend supports getting versions""" + return [entry.get("name")] + + def packages_to_entry(self, pkglist, entry): + for pkg in pkglist: + lxml.etree.SubElement(entry, 'BoundPackage', name=pkg, + version=self.setup.cfp.get("packages", + "version", + default="auto"), + type=self.ptype, origin='Packages') + + def get_new_packages(self, initial, complete): + """ compute the difference between the complete package list + and the initial package list. this is necessary because the + format may be different between the two lists due to + packages_{to,from}_entry() """ + return list(complete.difference(initial)) + def complete(self, packagelist): '''Build the transitive closure of all package dependencies @@ -350,15 +408,7 @@ def factory(metadata, sources, basepath, debug=False): ",".join([s.__name__ for s in sclasses])) cclass = Collection elif len(sclasses) == 0: - # you'd think this should be a warning, but it happens all the - # freaking time if you have a) machines in your clients.xml - # that do not have the proper groups set up yet (e.g., if you - # have multiple Bcfg2 servers and Packages-relevant groups set - # by probes); and b) templates that query all or multiple - # machines (e.g., with metadata.query.all_clients()) - if debug: - logger.error("Packages: No sources found for %s" % - metadata.hostname) + logger.error("Packages: No sources found for %s" % metadata.hostname) cclass = Collection else: cclass = get_collection_class(sclasses.pop().__name__.replace("Source", @@ -373,4 +423,3 @@ def factory(metadata, sources, basepath, debug=False): clients[metadata.hostname] = ckey collections[ckey] = collection return collection - diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py index 99a090739..34c7b42c1 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py @@ -1,6 +1,6 @@ import gzip import tarfile -from Bcfg2.Bcfg2Py3k import cPickle, file +from Bcfg2.Bcfg2Py3k import cPickle from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import Source @@ -9,6 +9,7 @@ class PacCollection(Collection): self.logger.warning("Packages: Package groups are not supported by Pacman") return [] + class PacSource(Source): basegroups = ['arch', 'parabola'] ptype = 'pacman' @@ -22,13 +23,13 @@ class PacSource(Source): 'components': self.components, 'arches': self.arches}] def save_state(self): - cache = file(self.cachefile, 'wb') + cache = open(self.cachefile, 'wb') cPickle.dump((self.pkgnames, self.deps, self.provides), cache, 2) cache.close() def load_state(self): - data = file(self.cachefile) + data = open(self.cachefile) self.pkgnames, self.deps, self.provides = cPickle.load(data) def filter_unknown(self, unknown): diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py index 7796b9e34..0d565be31 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py @@ -4,17 +4,15 @@ import lxml.etree import Bcfg2.Server.Plugin from Bcfg2.Server.Plugins.Packages.Source import SourceInitError -class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked, - Bcfg2.Server.Plugin.StructFile, +class PackagesSources(Bcfg2.Server.Plugin.StructFile, Bcfg2.Server.Plugin.Debuggable): __identifier__ = None def __init__(self, filename, cachepath, fam, packages, setup): Bcfg2.Server.Plugin.Debuggable.__init__(self) try: - Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, - filename, - fam) + Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam, + should_monitor=True) except OSError: err = sys.exc_info()[1] msg = "Packages: Failed to read configuration file: %s" % err @@ -22,7 +20,6 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked, msg += " Have you created it?" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginInitError(msg) - Bcfg2.Server.Plugin.StructFile.__init__(self, filename) self.cachepath = cachepath self.setup = setup if not os.path.exists(self.cachepath): @@ -42,18 +39,11 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked, source.toggle_debug() def HandleEvent(self, event=None): - Bcfg2.Server.Plugin.SingleXMLFileBacked.HandleEvent(self, event=event) + Bcfg2.Server.Plugin.XMLFileBacked.HandleEvent(self, event=event) if event and event.filename != self.name: - for fname in self.extras: - fpath = None - if fname.startswith("/"): - fpath = os.path.abspath(fname) - else: - fpath = \ - os.path.abspath(os.path.join(os.path.dirname(self.name), - fname)) + for fpath in self.extras: if fpath == os.path.abspath(event.filename): - self.parsed.add(fname) + self.parsed.add(fpath) break if self.loaded: @@ -65,7 +55,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked, return sorted(list(self.parsed)) == sorted(self.extras) def Index(self): - Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self) + Bcfg2.Server.Plugin.XMLFileBacked.Index(self) self.entries = [] for xsource in self.xdata.findall('.//Source'): source = self.source_from_xml(xsource) @@ -87,7 +77,8 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked, stype.title()) cls = getattr(module, "%sSource" % stype.title()) except (ImportError, AttributeError): - self.logger.error("Packages: Unknown source type %s" % stype) + ex = sys.exc_info()[1] + self.logger.error("Packages: Unknown source type %s (%s)" % (stype, ex)) return None try: @@ -106,4 +97,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked, return "PackagesSources: %s" % repr(self.entries) def __str__(self): - return "PackagesSources: %s" % str(self.entries) + return "PackagesSources: %s sources" % len(self.entries) + + def __len__(self): + return len(self.entries) diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py index edcdcd9f2..df3706fb1 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py @@ -1,11 +1,10 @@ import os import re import sys -import base64 import Bcfg2.Server.Plugin from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \ HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \ - urlopen, file, cPickle + urlopen, cPickle try: from hashlib import md5 @@ -51,7 +50,18 @@ class Source(Bcfg2.Server.Plugin.Debuggable): for key, tag in [('components', 'Component'), ('arches', 'Arch'), ('blacklist', 'Blacklist'), ('whitelist', 'Whitelist')]: - self.__dict__[key] = [item.text for item in xsource.findall(tag)] + setattr(self, key, [item.text for item in xsource.findall(tag)]) + self.server_options = dict() + self.client_options = dict() + opts = xsource.findall("Options") + for el in opts: + repoopts = dict([(k, v) + for k, v in el.attrib.items() + if k != "clientonly" and k != "serveronly"]) + if el.get("clientonly", "false").lower() == "false": + self.server_options.update(repoopts) + if el.get("serveronly", "false").lower() == "false": + self.client_options.update(repoopts) self.gpgkeys = [el.text for el in xsource.findall("GPGKey")] @@ -137,9 +147,8 @@ class Source(Bcfg2.Server.Plugin.Debuggable): def get_repo_name(self, url_map): # try to find a sensible name for a repo - if 'components' in url_map and url_map['components']: - # use the first component as the name - rname = url_map['components'][0] + if 'component' in url_map and url_map['component']: + rname = url_map['component'] else: name = None for repo_re in (self.mrepo_re, @@ -149,12 +158,15 @@ class Source(Bcfg2.Server.Plugin.Debuggable): if match: name = match.group(1) break - if name is None: - # couldn't figure out the name from the URL or URL map - # (which probably means its a screwy URL), so we just - # generate a random one - name = base64.b64encode(os.urandom(16))[:-2] - rname = "%s-%s" % (self.groups[0], name) + if name and self.groups: + rname = "%s-%s" % (self.groups[0], name) + elif self.groups: + rname = self.groups[0] + else: + # a global source with no reasonable name. just use + # the full url and let the regex below make it even + # uglier. + rname = url_map['url'] # see yum/__init__.py in the yum source, lines 441-449, for # the source of this regex. yum doesn't like anything but # string.ascii_letters, string.digits, and [-_.:]. There @@ -169,6 +181,9 @@ class Source(Bcfg2.Server.Plugin.Debuggable): else: return self.__class__.__name__ + def __repr__(self): + return str(self) + def get_urls(self): return [] urls = property(get_urls) @@ -182,6 +197,10 @@ class Source(Bcfg2.Server.Plugin.Debuggable): if a in metadata.groups] vdict = dict() for agrp in agroups: + if agrp not in self.provides: + self.logger.warning("%s provides no packages for %s" % + (self, agrp)) + continue for key, value in list(self.provides[agrp].items()): if key not in vdict: vdict[key] = set(value) @@ -213,7 +232,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): fname = self.escape_url(url) try: data = fetch_url(url) - file(fname, 'w').write(data) + open(fname, 'w').write(data) except ValueError: self.logger.error("Packages: Bad url string %s" % url) raise diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py index 53344e200..cba3373c1 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py @@ -1,17 +1,14 @@ import os +import re import sys -import time import copy -import glob import socket -import random import logging -import threading import lxml.etree -from UserDict import DictMixin -from subprocess import Popen, PIPE, STDOUT +from subprocess import Popen, PIPE import Bcfg2.Server.Plugin -from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file +from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, URLError, \ + ConfigParser from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \ fetch_url @@ -96,19 +93,29 @@ class YumCollection(Collection): if not os.path.exists(self.cachefile): os.mkdir(self.cachefile) - self.configdir = os.path.join(self.basepath, "yum") - if not os.path.exists(self.configdir): - os.mkdir(self.configdir) - self.cfgfile = os.path.join(self.configdir, - "%s-yum.conf" % self.cachekey) + self.cfgfile = os.path.join(self.cachefile, "yum.conf") self.write_config() if has_pulp and self.has_pulp_sources: _setup_pulp(self.setup) + self._helper = None + @property def helper(self): - return self.setup.cfp.get("packages:yum", "helper", - default="/usr/sbin/bcfg2-yum-helper") + try: + return self.setup.cfp.get("packages:yum", "helper") + except: + pass + + if not self._helper: + # first see if bcfg2-yum-helper is in PATH + try: + Popen(['bcfg2-yum-helper'], + stdin=PIPE, stdout=PIPE, stderr=PIPE).wait() + self._helper = 'bcfg2-yum-helper' + except OSError: + self._helper = "/usr/sbin/bcfg2-yum-helper" + return self._helper @property def use_yum(self): @@ -129,11 +136,21 @@ class YumCollection(Collection): yumconf = self.get_config(raw=True) yumconf.add_section("main") - mainopts = dict(cachedir=self.cachefile, + # we set installroot to the cache directory so + # bcfg2-yum-helper works with an empty rpmdb. otherwise + # the rpmdb is so hopelessly intertwined with yum that we + # have to totally reinvent the dependency resolver. + mainopts = dict(cachedir='/', + installroot=self.cachefile, keepcache="0", - sslverify="0", debuglevel="0", + sslverify="0", reposdir="/dev/null") + if self.setup['debug']: + mainopts['debuglevel'] = "5" + elif self.setup['verbose']: + mainopts['debuglevel'] = "2" + try: for opt in self.setup.cfp.options("packages:yum"): if opt not in self.option_blacklist: @@ -162,7 +179,7 @@ class YumCollection(Collection): config.add_section(reponame) added = True except ConfigParser.DuplicateSectionError: - match = re.match("-(\d)", reponame) + match = re.search("-(\d+)", reponame) if match: rid = int(match.group(1)) + 1 else: @@ -186,6 +203,13 @@ class YumCollection(Collection): config.set(reponame, "includepkgs", " ".join(source.whitelist)) + if raw: + opts = source.server_options + else: + opts = source.client_options + for opt, val in opts.items(): + config.set(reponame, opt, val) + if raw: return config else: @@ -346,6 +370,25 @@ class YumCollection(Collection): # for API completeness return self.call_helper("get_provides", package) + def get_groups(self, grouplist): + if not self.use_yum: + self.logger.warning("Packages: Package groups are not supported by " + "Bcfg2's internal Yum dependency generator") + return [] + + if not grouplist: + return dict() + + gdicts = [] + for group, ptype in grouplist: + if group.startswith("@"): + group = group[1:] + if not ptype: + ptype = "default" + gdicts.append(dict(group=group, type=ptype)) + + return self.call_helper("get_groups", gdicts) + def get_group(self, group, ptype="default"): if not self.use_yum: self.logger.warning("Packages: Package groups are not supported by " @@ -355,32 +398,106 @@ class YumCollection(Collection): if group.startswith("@"): group = group[1:] - pkgs = self.call_helper("get_group", dict(group=group, type=ptype)) - return pkgs + return self.call_helper("get_group", dict(group=group, type=ptype)) + + def packages_from_entry(self, entry): + rv = set() + name = entry.get("name") + + def _tag_to_pkg(tag): + rv = (name, tag.get("arch"), tag.get("epoch"), + tag.get("version"), tag.get("release")) + # if a package requires no specific version, we just use + # the name, not the tuple. this limits the amount of JSON + # encoding/decoding that has to be done to pass the + # package list to bcfg2-yum-helper. + if rv[1:] == (None, None, None, None): + return name + else: + return rv + + for inst in entry.getchildren(): + if inst.tag != "Instance": + continue + rv.add(_tag_to_pkg(inst)) + if not rv: + rv.add(_tag_to_pkg(entry)) + return list(rv) + + def packages_to_entry(self, pkglist, entry): + def _get_entry_attrs(pkgtup): + attrs = dict(version=self.setup.cfp.get("packages", + "version", + default="auto")) + if attrs['version'] == 'any': + return attrs + + if pkgtup[1]: + attrs['arch'] = pkgtup[1] + if pkgtup[2]: + attrs['epoch'] = pkgtup[2] + if pkgtup[3]: + attrs['version'] = pkgtup[3] + if pkgtup[4]: + attrs['release'] = pkgtup[4] + return attrs + + packages = dict() + for pkg in pkglist: + try: + packages[pkg[0]].append(pkg) + except KeyError: + packages[pkg[0]] = [pkg] + for name, instances in packages.items(): + pkgattrs = dict(type=self.ptype, + origin='Packages', + name=name) + if len(instances) > 1: + pkg_el = lxml.etree.SubElement(entry, 'BoundPackage', + **pkgattrs) + for inst in instances: + lxml.etree.SubElement(pkg_el, "Instance", + _get_entry_attrs(inst)) + else: + attrs = _get_entry_attrs(instances[0]) + attrs.update(pkgattrs) + lxml.etree.SubElement(entry, 'BoundPackage', **attrs) + + def get_new_packages(self, initial, complete): + initial_names = [] + for pkg in initial: + if isinstance(pkg, tuple): + initial_names.append(pkg[0]) + else: + initial_names.append(pkg) + new = [] + for pkg in complete: + if pkg[0] not in initial_names: + new.append(pkg) + return new def complete(self, packagelist): if not self.use_yum: return Collection.complete(self, packagelist) - packages = set() - unknown = set(packagelist) - - if unknown: + if packagelist: result = \ self.call_helper("complete", - dict(packages=list(unknown), + dict(packages=list(packagelist), groups=list(self.get_relevant_groups()))) - if result and "packages" in result and "unknown" in result: - # we stringify every package because it gets returned - # in unicode; set.update() doesn't work if some - # elements are unicode and other are strings. (I.e., - # u'foo' and 'foo' get treated as unique elements.) - packages.update([str(p) for p in result['packages']]) - unknown = set([str(p) for p in result['unknown']]) - + if not result: + # some sort of error, reported by call_helper() + return set(), packagelist + # json doesn't understand sets or tuples, so we get back a + # lists of lists (packages) and a list of unicode strings + # (unknown). turn those into a set of tuples and a set of + # strings, respectively. + unknown = set([str(u) for u in result['unknown']]) + packages = set([tuple(p) for p in result['packages']]) self.filter_unknown(unknown) - - return packages, unknown + return packages, unknown + else: + return set(), set() def call_helper(self, command, input=None): """ Make a call to bcfg2-yum-helper. The yum libs have @@ -388,16 +505,12 @@ class YumCollection(Collection): around that in long-running processes it to have a short-lived helper. No, seriously -- check out the yum-updatesd code. It's pure madness. """ - # it'd be nice if we could change this to be more verbose if - # -v was given to bcfg2-server, but Collection objects don't - # get the 'setup' variable, so we don't know how verbose - # bcfg2-server is. It'd also be nice if we could tell yum to - # log to syslog. So would a unicorn. cmd = [self.helper, "-c", self.cfgfile] - if self.debug_flag: + verbose = self.debug_flag or self.setup['verbose'] + if verbose: cmd.append("-v") cmd.append(command) - self.debug_log("Packages: running %s" % " ".join(cmd)) + self.debug_log("Packages: running %s" % " ".join(cmd), flag=verbose) try: helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) except OSError: @@ -415,9 +528,9 @@ class YumCollection(Collection): if rv: self.logger.error("Packages: error running bcfg2-yum-helper " "(returned %d): %s" % (rv, stderr)) - elif self.debug_flag: + else: self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" % - stderr) + stderr, flag=verbose) try: return json.loads(stdout) except ValueError: @@ -500,15 +613,14 @@ class YumSource(Source): def save_state(self): if not self.use_yum: - cache = file(self.cachefile, 'wb') + cache = open(self.cachefile, 'wb') cPickle.dump((self.packages, self.deps, self.provides, self.filemap, self.url_map), cache, 2) cache.close() - def load_state(self): if not self.use_yum: - data = file(self.cachefile) + data = open(self.cachefile) (self.packages, self.deps, self.provides, self.filemap, self.url_map) = cPickle.load(data) @@ -520,7 +632,7 @@ class YumSource(Source): usettings = [{'version':self.version, 'component':comp, 'arch':arch} for comp in self.components] - else: # rawurl given + else: # rawurl given usettings = [{'version':self.version, 'component':None, 'arch':arch}] @@ -546,6 +658,11 @@ class YumSource(Source): except ValueError: self.logger.error("Packages: Bad url string %s" % rmdurl) return [] + except URLError: + err = sys.exc_info()[1] + self.logger.error("Packages: Failed to fetch url %s. %s" % + (rmdurl, err)) + return [] except HTTPError: err = sys.exc_info()[1] self.logger.error("Packages: Failed to fetch url %s. code=%s" % diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py index d789a6d39..d3095300a 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py @@ -11,14 +11,16 @@ from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen from Bcfg2.Server.Plugins.Packages import Collection from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources +yum_config_default = "/etc/yum.repos.d/bcfg2.repo" +apt_config_default = "/etc/apt/sources.d/bcfg2" + class Packages(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.StructureValidator, Bcfg2.Server.Plugin.Generator, Bcfg2.Server.Plugin.Connector, - Bcfg2.Server.Plugin.GoalValidator): + Bcfg2.Server.Plugin.ClientRunHooks): name = 'Packages' conflicts = ['Pkgmgr'] - experimental = True __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload'] def __init__(self, core, datastore): @@ -26,11 +28,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.StructureValidator.__init__(self) Bcfg2.Server.Plugin.Generator.__init__(self) Bcfg2.Server.Plugin.Connector.__init__(self) - Bcfg2.Server.Plugin.Probing.__init__(self) + Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) self.sentinels = set() - self.cachepath = os.path.join(self.data, 'cache') - self.keypath = os.path.join(self.data, 'keys') + self.cachepath = \ + self.core.setup.cfp.get("packages", "cache", + default=os.path.join(self.data, 'cache')) + self.keypath = \ + self.core.setup.cfp.get("packages", "keycache", + default=os.path.join(self.data, 'keys')) if not os.path.exists(self.keypath): # create key directory if needed os.makedirs(self.keypath) @@ -40,11 +46,16 @@ class Packages(Bcfg2.Server.Plugin.Plugin, self.core.setup) def toggle_debug(self): - Bcfg2.Server.Plugin.Plugin.toggle_debug(self) + rv = Bcfg2.Server.Plugin.Plugin.toggle_debug(self) self.sources.toggle_debug() + return rv @property def disableResolver(self): + if self.disableMetaData: + # disabling metadata without disabling the resolver Breaks + # Things + return True try: return not self.core.setup.cfp.getboolean("packages", "resolver") except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): @@ -87,16 +98,18 @@ class Packages(Bcfg2.Server.Plugin.Plugin, if entry.tag == 'Package': collection = self._get_collection(metadata) entry.set('version', self.core.setup.cfp.get("packages", - "version", - default="auto")) + "version", + default="auto")) entry.set('type', collection.ptype) elif entry.tag == 'Path': - if (entry.get("name") == self.core.setup.cfp.get("packages", - "yum_config", - default="") or - entry.get("name") == self.core.setup.cfp.get("packages", - "apt_config", - default="")): + if (entry.get("name") == \ + self.core.setup.cfp.get("packages", + "yum_config", + default=yum_config_default) or + entry.get("name") == \ + self.core.setup.cfp.get("packages", + "apt_config", + default=apt_config_default)): self.create_config(entry, metadata) def HandlesEntry(self, entry, metadata): @@ -110,12 +123,14 @@ class Packages(Bcfg2.Server.Plugin.Plugin, return True elif entry.tag == 'Path': # managed entries for yum/apt configs - if (entry.get("name") == self.core.setup.cfp.get("packages", - "yum_config", - default="") or - entry.get("name") == self.core.setup.cfp.get("packages", - "apt_config", - default="")): + if (entry.get("name") == \ + self.core.setup.cfp.get("packages", + "yum_config", + default=yum_config_default) or + entry.get("name") == \ + self.core.setup.cfp.get("packages", + "apt_config", + default=apt_config_default)): return True return False @@ -151,26 +166,24 @@ class Packages(Bcfg2.Server.Plugin.Plugin, # essential pkgs are those marked as such by the distribution essential = collection.get_essential() to_remove = [] + groups = [] for struct in structures: for pkg in struct.xpath('//Package | //BoundPackage'): if pkg.get("name"): - initial.add(pkg.get("name")) + initial.update(collection.packages_from_entry(pkg)) elif pkg.get("group"): - try: - if pkg.get("type"): - gpkgs = collection.get_group(pkg.get("group"), - ptype=pkg.get("type")) - else: - gpkgs = collection.get_group(pkg.get("group")) - base.update(gpkgs) - except TypeError: - raise - self.logger.error("Could not resolve group %s" % - pkg.get("group")) + groups.append((pkg.get("group"), + pkg.get("type"))) to_remove.append(pkg) else: self.logger.error("Packages: Malformed Package: %s" % - lxml.etree.tostring(pkg)) + lxml.etree.tostring(pkg, + xml_declaration=False).decode('UTF-8')) + + gpkgs = collection.get_groups(groups) + for group, pkgs in gpkgs.items(): + base.update(pkgs) + base.update(initial | essential) for el in to_remove: el.getparent().remove(el) @@ -179,16 +192,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin, if unknown: self.logger.info("Packages: Got %d unknown entries" % len(unknown)) self.logger.info("Packages: %s" % list(unknown)) - newpkgs = list(packages.difference(initial)) + newpkgs = collection.get_new_packages(initial, packages) self.debug_log("Packages: %d initial, %d complete, %d new" % (len(initial), len(packages), len(newpkgs))) newpkgs.sort() - for pkg in newpkgs: - lxml.etree.SubElement(independent, 'BoundPackage', name=pkg, - version=self.core.setup.cfp.get("packages", - "version", - default="auto"), - type=collection.ptype, origin='Packages') + collection.packages_to_entry(newpkgs, independent) def Refresh(self): '''Packages.Refresh() => True|False\nReload configuration @@ -271,10 +279,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin, collection = self._get_collection(metadata) return dict(sources=collection.get_additional_data()) - def validate_goals(self, metadata, _): - """ we abuse the GoalValidator plugin since validate_goals() - is the very last thing called during a client config run. so - we use this to clear the collection cache for this client, - which must persist only the duration of a client run """ + def end_client_run(self, metadata): + """ clear the collection cache for this client, which must + persist only the duration of a client run""" if metadata.hostname in Collection.clients: del Collection.clients[metadata.hostname] + + def end_statistics(self, metadata): + self.end_client_run(metadata) diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py index e9254cdcc..7dac907e1 100644 --- a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py +++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py @@ -1,12 +1,17 @@ '''This module implements a package management scheme for all images''' -import logging +import os import re +import glob +import logging +import lxml.etree import Bcfg2.Server.Plugin -import lxml +import Bcfg2.Server.Lint + try: set except NameError: + # deprecated since python 2.6 from sets import Set as set logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr') @@ -24,12 +29,14 @@ class FuzzyDict(dict): print("got non-string key %s" % str(key)) return dict.__getitem__(self, key) - def has_key(self, key): + def __contains__(self, key): if isinstance(key, str): mdata = self.fuzzy.match(key) - if self.fuzzy.match(key): - return dict.has_key(self, mdata.groupdict()['name']) - return dict.has_key(self, key) + if mdata: + return dict.__contains__(self, mdata.groupdict()['name']) + else: + print("got non-string key %s" % str(key)) + return dict.__contains__(self, key) def get(self, key, default=None): try: @@ -167,3 +174,40 @@ class Pkgmgr(Bcfg2.Server.Plugin.PrioDir): def HandleEntry(self, entry, metadata): self.BindEntry(entry, metadata) + + +class PkgmgrLint(Bcfg2.Server.Lint.ServerlessPlugin): + """ find duplicate Pkgmgr entries with the same priority """ + def Run(self): + pset = set() + for pfile in glob.glob(os.path.join(self.config['repo'], 'Pkgmgr', + '*.xml')): + if self.HandlesFile(pfile): + xdata = lxml.etree.parse(pfile).getroot() + # get priority, type, group + priority = xdata.get('priority') + ptype = xdata.get('type') + for pkg in xdata.xpath("//Package"): + if pkg.getparent().tag == 'Group': + grp = pkg.getparent().get('name') + if (type(grp) is not str and + grp.getparent().tag == 'Group'): + pgrp = grp.getparent().get('name') + else: + pgrp = 'none' + else: + grp = 'none' + pgrp = 'none' + ptuple = (pkg.get('name'), priority, ptype, grp, pgrp) + # check if package is already listed with same + # priority, type, grp + if ptuple in pset: + self.LintError("duplicate-package", + "Duplicate Package %s, priority:%s, type:%s" % + (pkg.get('name'), priority, ptype)) + else: + pset.add(ptuple) + + @classmethod + def Errors(cls): + return {"duplicate-packages":"error"} diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py index af908eee8..7f300ebe0 100644 --- a/src/lib/Bcfg2/Server/Plugins/Probes.py +++ b/src/lib/Bcfg2/Server/Plugins/Probes.py @@ -1,7 +1,17 @@ +import re +import os +import sys import time -import lxml.etree import operator -import re +import lxml.etree +import Bcfg2.Server +import Bcfg2.Server.Plugin + +try: + from django.db import models + has_django = True +except ImportError: + has_django = False try: import json @@ -14,23 +24,36 @@ except ImportError: has_json = False try: - import syck - has_syck = True + import syck as yaml + has_yaml = True + yaml_error = syck.error except ImportError: - has_syck = False try: import yaml + yaml_error = yaml.YAMLError has_yaml = True except ImportError: has_yaml = False import Bcfg2.Server.Plugin -specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)") -probe_matcher = re.compile("(.*/)?(?P<basename>\S+)") +if has_django: + class ProbesDataModel(models.Model, + Bcfg2.Server.Plugin.PluginDatabaseModel): + hostname = models.CharField(max_length=255) + probe = models.CharField(max_length=255) + timestamp = models.DateTimeField(auto_now=True) + data = models.TextField(null=True) + + class ProbesGroupsModel(models.Model, + Bcfg2.Server.Plugin.PluginDatabaseModel): + hostname = models.CharField(max_length=255) + group = models.CharField(max_length=255) + class ClientProbeDataSet(dict): - """ dict of probe => [probe data] that records a for each host """ + """ dict of probe => [probe data] that records a timestamp for + each host """ def __init__(self, *args, **kwargs): if "timestamp" in kwargs and kwargs['timestamp'] is not None: self.timestamp = kwargs.pop("timestamp") @@ -39,61 +62,31 @@ class ClientProbeDataSet(dict): dict.__init__(self, *args, **kwargs) -class ProbeData(object): - """ a ProbeData object emulates a str object, but also has .xdata - and .json properties to provide convenient ways to use ProbeData - objects as XML or JSON data """ +class ProbeData(str): + """ a ProbeData object emulates a str object, but also has .xdata, + .json, and .yaml properties to provide convenient ways to use + ProbeData objects as XML, JSON, or YAML data """ + def __new__(cls, data): + return str.__new__(cls, data) + def __init__(self, data): - self.data = data + str.__init__(self) self._xdata = None self._json = None self._yaml = None - def __str__(self): - return str(self.data) - - def __repr__(self): - return repr(self.data) - - def __getattr__(self, name): - """ make ProbeData act like a str object """ - return getattr(self.data, name) - - def __complex__(self): - return complex(self.data) - - def __int__(self): - return int(self.data) - - def __long__(self): - return long(self.data) - - def __float__(self): - return float(self.data) - - def __eq__(self, other): - return str(self) == str(other) - - def __ne__(self, other): - return str(self) != str(other) - - def __gt__(self, other): - return str(self) > str(other) - - def __lt__(self, other): - return str(self) < str(other) - - def __ge__(self, other): - return self > other or self == other - - def __le__(self, other): - return self < other or self == other - + @property + def data(self): + """ provide backwards compatibility with broken ProbeData + object in bcfg2 1.2.0 thru 1.2.2 """ + return str(self) + @property def xdata(self): if self._xdata is None: try: - self._xdata = lxml.etree.XML(self.data) + self._xdata = lxml.etree.XML(self.data, + parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: pass return self._xdata @@ -109,44 +102,30 @@ class ProbeData(object): @property def yaml(self): - if self._yaml is None: - if has_yaml: - try: - self._yaml = yaml.load(self.data) - except yaml.YAMLError: - pass - elif has_syck: - try: - self._yaml = syck.load(self.data) - except syck.error: - pass + if self._yaml is None and has_yaml: + try: + self._yaml = yaml.load(self.data) + except yaml_error: + pass return self._yaml class ProbeSet(Bcfg2.Server.Plugin.EntrySet): ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$") + probename = re.compile("(.*/)?(?P<basename>\S+?)(\.(?P<mode>(?:G\d\d)|H)_\S+)?$") + bangline = re.compile('^#!\s*(?P<interpreter>.*)$') + basename_is_regex = True def __init__(self, path, fam, encoding, plugin_name): - fpattern = '[0-9A-Za-z_\-]+' self.plugin_name = plugin_name - Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, + Bcfg2.Server.Plugin.EntrySet.__init__(self, '[0-9A-Za-z_\-]+', path, Bcfg2.Server.Plugin.SpecificData, encoding) fam.AddMonitor(path, self) - self.bangline = re.compile('^#!(?P<interpreter>.*)$') def HandleEvent(self, event): - if event.filename != self.path: - if (event.code2str == 'changed' and - event.filename.endswith("probed.xml") and - event.filename not in self.entries): - # for some reason, probed.xml is particularly prone to - # getting changed events before created events, - # because gamin is the worst ever. anyhow, we - # specifically handle it here to avoid a warning on - # every single server startup. - self.entry_init(event) - return + if (event.filename != self.path and + not event.filename.endswith("probed.xml")): return self.handle_event(event) def get_probe_data(self, metadata): @@ -155,9 +134,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet): candidates = self.get_matching(metadata) candidates.sort(key=operator.attrgetter('specific')) for entry in candidates: - rem = specific_probe_matcher.match(entry.name) - if not rem: - rem = probe_matcher.match(entry.name) + rem = self.probename.match(entry.name) pname = rem.group('basename') if pname not in build: build[pname] = entry @@ -176,30 +153,37 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet): return ret -class Probes(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.Probing, - Bcfg2.Server.Plugin.Connector): +class Probes(Bcfg2.Server.Plugin.Probing, + Bcfg2.Server.Plugin.Connector, + Bcfg2.Server.Plugin.DatabaseBacked): """A plugin to gather information from a client machine.""" name = 'Probes' __author__ = 'bcfg-dev@mcs.anl.gov' def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.Probing.__init__(self) + Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore) try: self.probes = ProbeSet(self.data, core.fam, core.encoding, self.name) except: - raise Bcfg2.Server.Plugin.PluginInitError + err = sys.exc_info()[1] + raise Bcfg2.Server.Plugin.PluginInitError(err) self.probedata = dict() self.cgroups = dict() self.load_data() - def write_data(self): + def write_data(self, client): """Write probe data out for use with bcfg2-info.""" + if self._use_db: + return self._write_data_db(client) + else: + return self._write_data_xml(client) + + def _write_data_xml(self, _): top = lxml.etree.Element("Probed") for client, probed in sorted(self.probedata.items()): cx = lxml.etree.SubElement(top, 'Client', name=client, @@ -209,20 +193,47 @@ class Probes(Bcfg2.Server.Plugin.Plugin, value=str(self.probedata[client][probe])) for group in sorted(self.cgroups[client]): lxml.etree.SubElement(cx, "Group", name=group) - data = lxml.etree.tostring(top, encoding='UTF-8', - xml_declaration=True, - pretty_print='true') try: - datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w') + datafile = open(os.path.join(self.data, 'probed.xml'), 'w') + datafile.write(lxml.etree.tostring(top, xml_declaration=False, + pretty_print='true').decode('UTF-8')) except IOError: - self.logger.error("Failed to write probed.xml") - datafile.write(data.decode('utf-8')) + err = sys.exc_info()[1] + self.logger.error("Failed to write probed.xml: %s" % err) + + def _write_data_db(self, client): + for probe, data in self.probedata[client.hostname].items(): + pdata = \ + ProbesDataModel.objects.get_or_create(hostname=client.hostname, + probe=probe)[0] + if pdata.data != data: + pdata.data = data + pdata.save() + ProbesDataModel.objects.filter(hostname=client.hostname).exclude(probe__in=self.probedata[client.hostname]).delete() + + for group in self.cgroups[client.hostname]: + try: + ProbesGroupsModel.objects.get(hostname=client.hostname, + group=group) + except ProbesGroupsModel.DoesNotExist: + grp = ProbesGroupsModel(hostname=client.hostname, + group=group) + grp.save() + ProbesGroupsModel.objects.filter(hostname=client.hostname).exclude(group__in=self.cgroups[client.hostname]).delete() def load_data(self): + if self._use_db: + return self._load_data_db() + else: + return self._load_data_xml() + + def _load_data_xml(self): try: - data = lxml.etree.parse(self.data + '/probed.xml').getroot() + data = lxml.etree.parse(os.path.join(self.data, 'probed.xml'), + parser=Bcfg2.Server.XMLParser).getroot() except: - self.logger.error("Failed to read file probed.xml") + err = sys.exc_info()[1] + self.logger.error("Failed to read file probed.xml: %s" % err) return self.probedata = {} self.cgroups = {} @@ -231,12 +242,25 @@ class Probes(Bcfg2.Server.Plugin.Plugin, ClientProbeDataSet(timestamp=client.get("timestamp")) self.cgroups[client.get('name')] = [] for pdata in client: - if (pdata.tag == 'Probe'): + if pdata.tag == 'Probe': self.probedata[client.get('name')][pdata.get('name')] = \ - ProbeData(pdata.get('value')) - elif (pdata.tag == 'Group'): + ProbeData(pdata.get("value")) + elif pdata.tag == 'Group': self.cgroups[client.get('name')].append(pdata.get('name')) + def _load_data_db(self): + self.probedata = {} + self.cgroups = {} + for pdata in ProbesDataModel.objects.all(): + if pdata.hostname not in self.probedata: + self.probedata[pdata.hostname] = \ + ClientProbeDataSet(timestamp=time.mktime(pdata.timestamp.timetuple())) + self.probedata[pdata.hostname][pdata.probe] = ProbeData(pdata.data) + for pgroup in ProbesGroupsModel.objects.all(): + if pgroup.hostname not in self.cgroups: + self.cgroups[pgroup.hostname] = [] + self.cgroups[pgroup.hostname].append(pgroup.group) + def GetProbes(self, meta, force=False): """Return a set of probes for execution on client.""" return self.probes.get_probe_data(meta) @@ -246,25 +270,24 @@ class Probes(Bcfg2.Server.Plugin.Plugin, self.probedata[client.hostname] = ClientProbeDataSet() for data in datalist: self.ReceiveDataItem(client, data) - self.write_data() + self.write_data(client) def ReceiveDataItem(self, client, data): """Receive probe results pertaining to client.""" if client.hostname not in self.cgroups: self.cgroups[client.hostname] = [] + if client.hostname not in self.probedata: + self.probedata[client.hostname] = ClientProbeDataSet() if data.text == None: - self.logger.error("Got null response to probe %s from %s" % \ - (data.get('name'), client.hostname)) - try: - self.probedata[client.hostname].update({data.get('name'): + self.logger.info("Got null response to probe %s from %s" % + (data.get('name'), client.hostname)) + self.probedata[client.hostname].update({data.get('name'): ProbeData('')}) - except KeyError: - self.probedata[client.hostname] = \ - ClientProbeDataSet([(data.get('name'), ProbeData(''))]) return dlines = data.text.split('\n') - self.logger.debug("%s:probe:%s:%s" % (client.hostname, - data.get('name'), [line.strip() for line in dlines])) + self.logger.debug("%s:probe:%s:%s" % + (client.hostname, data.get('name'), + [line.strip() for line in dlines])) for line in dlines[:]: if line.split(':')[0] == 'group': newgroup = line.split(':')[1].strip() @@ -272,11 +295,7 @@ class Probes(Bcfg2.Server.Plugin.Plugin, self.cgroups[client.hostname].append(newgroup) dlines.remove(line) dobj = ProbeData("\n".join(dlines)) - try: - self.probedata[client.hostname].update({data.get('name'): dobj}) - except KeyError: - self.probedata[client.hostname] = \ - ClientProbeDataSet([(data.get('name'), dobj)]) + self.probedata[client.hostname].update({data.get('name'): dobj}) def get_additional_groups(self, meta): return self.cgroups.get(meta.hostname, list()) diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py index 680881858..78019933a 100644 --- a/src/lib/Bcfg2/Server/Plugins/Properties.py +++ b/src/lib/Bcfg2/Server/Plugins/Properties.py @@ -5,26 +5,53 @@ import copy import logging import lxml.etree import Bcfg2.Server.Plugin +try: + from Bcfg2.Encryption import ssl_decrypt, EVPError + have_crypto = True +except ImportError: + have_crypto = False + +logger = logging.getLogger(__name__) + +SETUP = None + +def passphrases(): + section = "encryption" + if SETUP.cfp.has_section(section): + return dict([(o, SETUP.cfp.get(section, o)) + for o in SETUP.cfp.options(section)]) + else: + return dict() -logger = logging.getLogger('Bcfg2.Plugins.Properties') class PropertyFile(Bcfg2.Server.Plugin.StructFile): """Class for properties files.""" def write(self): """ Write the data in this data structure back to the property file """ - if self.validate_data(): - try: - open(self.name, - "wb").write(lxml.etree.tostring(self.xdata, - pretty_print=True)) - return True - except IOError: - err = sys.exc_info()[1] - logger.error("Failed to write %s: %s" % (self.name, err)) - return False - else: - return False + if not SETUP.cfp.getboolean("properties", "writes_enabled", + default=True): + msg = "Properties files write-back is disabled in the configuration" + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + try: + self.validate_data() + except Bcfg2.Server.Plugin.PluginExecutionError: + msg = "Cannot write %s: %s" % (self.name, sys.exc_info()[1]) + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + + try: + open(self.name, + "wb").write(lxml.etree.tostring(self.xdata, + xml_declaration=False, + pretty_print=True).decode('UTF-8')) + return True + except IOError: + err = sys.exc_info()[1] + msg = "Failed to write %s: %s" % (self.name, err) + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) def validate_data(self): """ ensure that the data in this object validates against the @@ -34,19 +61,51 @@ class PropertyFile(Bcfg2.Server.Plugin.StructFile): try: schema = lxml.etree.XMLSchema(file=schemafile) except: - logger.error("Failed to process schema for %s" % self.name) - return False + err = sys.exc_info()[1] + raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to process schema for %s: %s" % (self.name, err)) else: # no schema exists return True if not schema.validate(self.xdata): - logger.error("Data for %s fails to validate; run bcfg2-lint for " - "more details" % self.name) - return False + raise Bcfg2.Server.Plugin.PluginExecutionError("Data for %s fails to validate; run bcfg2-lint for more details" % self.name) else: return True + def Index(self): + Bcfg2.Server.Plugin.StructFile.Index(self) + if self.xdata.get("encryption", "false").lower() != "false": + if not have_crypto: + msg = "Properties: M2Crypto is not available: %s" % self.name + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + for el in self.xdata.xpath("*[@encrypted]"): + try: + el.text = self._decrypt(el) + except EVPError: + msg = "Failed to decrypt %s element in %s" % (el.tag, + self.name) + logger.error(msg) + raise Bcfg2.Server.PluginExecutionError(msg) + + def _decrypt(self, element): + if not element.text.strip(): + return + passes = passphrases() + try: + passphrase = passes[element.get("encrypted")] + try: + return ssl_decrypt(element.text, passphrase) + except EVPError: + # error is raised below + pass + except KeyError: + for passwd in passes.values(): + try: + return ssl_decrypt(element.text, passwd) + except EVPError: + pass + raise EVPError("Failed to decrypt") class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): __child__ = PropertyFile @@ -62,6 +121,7 @@ class Properties(Bcfg2.Server.Plugin.Plugin, name = 'Properties' def __init__(self, core, datastore): + global SETUP Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Connector.__init__(self) try: @@ -72,5 +132,16 @@ class Properties(Bcfg2.Server.Plugin.Plugin, (e.strerror, e.filename)) raise Bcfg2.Server.Plugin.PluginInitError - def get_additional_data(self, _): - return copy.copy(self.store.entries) + SETUP = core.setup + + def get_additional_data(self, metadata): + autowatch = self.core.setup.cfp.getboolean("properties", "automatch", + default=False) + rv = dict() + for fname, pfile in self.store.entries.items(): + if (autowatch or + pfile.xdata.get("automatch", "false").lower() == "true"): + rv[fname] = pfile.XMLMatch(metadata) + else: + rv[fname] = copy.copy(pfile) + return rv diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py new file mode 100644 index 000000000..46182e9a2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py @@ -0,0 +1,117 @@ +import os +import Bcfg2.Server +import Bcfg2.Server.Plugin +from subprocess import Popen, PIPE + +try: + from syck import load as yaml_load, error as yaml_error +except ImportError: + try: + from yaml import load as yaml_load, YAMLError as yaml_error + except ImportError: + raise ImportError("No yaml library could be found") + +class PuppetENCFile(Bcfg2.Server.Plugin.FileBacked): + def HandleEvent(self, event=None): + return + + +class PuppetENC(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector, + Bcfg2.Server.Plugin.ClientRunHooks, + Bcfg2.Server.Plugin.DirectoryBacked): + """ A plugin to run Puppet external node classifiers + (http://docs.puppetlabs.com/guides/external_nodes.html) """ + name = 'PuppetENC' + experimental = True + __child__ = PuppetENCFile + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) + Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, + self.core.fam) + self.cache = dict() + + def _run_encs(self, metadata): + cache = dict(groups=[], params=dict()) + for enc in self.entries.keys(): + epath = os.path.join(self.data, enc) + self.debug_log("PuppetENC: Running ENC %s for %s" % + (enc, metadata.hostname)) + proc = Popen([epath, metadata.hostname], stdin=PIPE, stdout=PIPE, + stderr=PIPE) + (out, err) = proc.communicate() + rv = proc.wait() + if rv != 0: + msg = "PuppetENC: Error running ENC %s for %s (%s): %s" % \ + (enc, metadata.hostname, rv) + self.logger.error("%s: %s" % (msg, err)) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + if err: + self.debug_log("ENC Error: %s" % err) + + try: + yaml = yaml_load(out) + self.debug_log("Loaded data from %s for %s: %s" % + (enc, metadata.hostname, yaml)) + except yaml_error: + err = sys.exc_info()[1] + msg = "Error decoding YAML from %s for %s: %s" % \ + (enc, metadata.hostname, err) + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + + groups = [] + if "classes" in yaml: + # stock Puppet ENC output format + groups = yaml['classes'] + elif "groups" in yaml: + # more Bcfg2-ish output format + groups = yaml['groups'] + if groups: + if isinstance(groups, list): + self.debug_log("ENC %s adding groups to %s: %s" % + (enc, metadata.hostname, groups)) + cache['groups'].extend(groups) + else: + self.debug_log("ENC %s adding groups to %s: %s" % + (enc, metadata.hostname, groups.keys())) + for group, params in groups.items(): + cache['groups'].append(group) + if params: + cache['params'].update(params) + if "parameters" in yaml and yaml['parameters']: + cache['params'].update(yaml['parameters']) + if "environment" in yaml: + self.logger.info("Ignoring unsupported environment section of " + "ENC %s for %s" % (enc, metadata.hostname)) + + self.cache[metadata.hostname] = cache + + def get_additional_groups(self, metadata): + if metadata.hostname not in self.cache: + self._run_encs(metadata) + return self.cache[metadata.hostname]['groups'] + + def get_additional_data(self, metadata): + if metadata.hostname not in self.cache: + self._run_encs(metadata) + return self.cache[metadata.hostname]['params'] + + def end_client_run(self, metadata): + """ clear the entire cache at the end of each client run. this + guarantees that each client will run all ENCs at or near the + start of each run; we have to clear the entire cache instead + of just the cache for this client because a client that builds + templates that use metadata for other clients will populate + the cache for those clients, which we don't want. This makes + the caching less than stellar, but it does prevent multiple + runs of ENCs for a single host a) for groups and data + separately; and b) when a single client's metadata is + generated multiple times by separate templates """ + self.cache = dict() + + def end_statistics(self, metadata): + self.end_client_run(self, metadata) diff --git a/src/lib/Bcfg2/Server/Plugins/SEModules.py b/src/lib/Bcfg2/Server/Plugins/SEModules.py new file mode 100644 index 000000000..62b3fb10a --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/SEModules.py @@ -0,0 +1,45 @@ +import os +import logging +import Bcfg2.Server.Plugin +from Bcfg2.Bcfg2Py3k import b64encode + +logger = logging.getLogger(__name__) + +class SEModuleData(Bcfg2.Server.Plugin.SpecificData): + def bind_entry(self, entry, _): + entry.set('encoding', 'base64') + entry.text = b64encode(self.data) + + +class SEModules(Bcfg2.Server.Plugin.GroupSpool): + """ Handle SELinux 'module' entries """ + name = 'SEModules' + __author__ = 'chris.a.st.pierre@gmail.com' + es_child_cls = SEModuleData + entry_type = 'SELinux' + experimental = True + + def _get_module_name(self, entry): + """ GroupSpool stores entries as /foo.pp, but we want people + to be able to specify module entries as name='foo' or + name='foo.pp', so we put this abstraction in between """ + if entry.get("name").endswith(".pp"): + name = entry.get("name") + else: + name = entry.get("name") + ".pp" + return "/" + name + + def HandlesEntry(self, entry, metadata): + if entry.tag in self.Entries and entry.get('type') == 'module': + return self._get_module_name(entry) in self.Entries[entry.tag] + return Bcfg2.Server.Plugin.GroupSpool.HandlesEntry(self, entry, + metadata) + + def HandleEntry(self, entry, metadata): + entry.set("name", self._get_module_name(entry)) + return self.Entries[entry.tag][entry.get("name")](entry, metadata) + + def add_entry(self, event): + self.filename_pattern = \ + os.path.basename(os.path.dirname(self.event_path(event))) + Bcfg2.Server.Plugin.GroupSpool.add_entry(self, event) diff --git a/src/lib/Bcfg2/Server/Plugins/SGenshi.py b/src/lib/Bcfg2/Server/Plugins/SGenshi.py deleted file mode 100644 index 0ba08125e..000000000 --- a/src/lib/Bcfg2/Server/Plugins/SGenshi.py +++ /dev/null @@ -1,97 +0,0 @@ -'''This module implements a templating generator based on Genshi''' - -import genshi.input -import genshi.template -import lxml.etree -import logging -import copy -import sys -import os.path - -import Bcfg2.Server.Plugin -import Bcfg2.Server.Plugins.TGenshi - -logger = logging.getLogger('Bcfg2.Plugins.SGenshi') - - -class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile, - Bcfg2.Server.Plugin.StructFile): - def __init__(self, name, specific, encoding): - Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name, - specific, encoding) - Bcfg2.Server.Plugin.StructFile.__init__(self, name) - - def get_xml_value(self, metadata): - if not hasattr(self, 'template'): - logger.error("No parsed template information for %s" % (self.name)) - raise Bcfg2.Server.Plugin.PluginExecutionError - try: - stream = self.template.generate(metadata=metadata).filter( \ - Bcfg2.Server.Plugins.TGenshi.removecomment) - data = lxml.etree.XML(stream.render('xml', strip_whitespace=False)) - bundlename = os.path.splitext(os.path.basename(self.name))[0] - bundle = lxml.etree.Element('Bundle', name=bundlename) - for item in self.Match(metadata, data): - bundle.append(copy.deepcopy(item)) - return bundle - except LookupError: - lerror = sys.exc_info()[1] - logger.error('Genshi lookup error: %s' % lerror) - except genshi.template.TemplateError: - terror = sys.exc_info()[1] - logger.error('Genshi template error: %s' % terror) - raise - except genshi.input.ParseError: - perror = sys.exc_info()[1] - logger.error('Genshi parse error: %s' % perror) - raise - - def Match(self, metadata, xdata): - """Return matching fragments of parsed template.""" - rv = [] - for child in xdata.getchildren(): - rv.extend(self._match(child, metadata)) - logger.debug("File %s got %d match(es)" % (self.name, len(rv))) - return rv - -class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet): - - def __init__(self, path, fam, encoding): - fpattern = '\S+\.xml' - Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, - SGenshiTemplateFile, encoding) - fam.AddMonitor(path, self) - - def HandleEvent(self, event): - '''passthrough event handler for old calling convention''' - if event.filename != self.path: - return self.handle_event(event) - - def BuildStructures(self, metadata): - """Build SGenshi structures.""" - ret = [] - for entry in self.get_matching(metadata): - try: - ret.append(entry.get_xml_value(metadata)) - except: - logger.error("SGenshi: Failed to template file %s" % entry.name) - return ret - - -class SGenshi(SGenshiEntrySet, - Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.Structure): - """The SGenshi plugin provides templated structures.""" - name = 'SGenshi' - __author__ = 'bcfg-dev@mcs.anl.gov' - deprecated = True - - def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) - Bcfg2.Server.Plugin.Structure.__init__(self) - try: - SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding) - except: - logger.error("Failed to load %s repository; disabling %s" \ - % (self.name, self.name)) - raise Bcfg2.Server.Plugin.PluginInitError diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py index a1a29727f..cbe8d0d9b 100644 --- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py +++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py @@ -1,20 +1,16 @@ """This module manages ssh key files for bcfg2""" -import binascii import re import os +import sys import socket import shutil -import sys +import logging import tempfile from subprocess import Popen, PIPE import Bcfg2.Server.Plugin -from Bcfg2.Bcfg2Py3k import u_str +from Bcfg2.Bcfg2Py3k import u_str, reduce, b64encode -if sys.hexversion >= 0x03000000: - from functools import reduce - -import logging logger = logging.getLogger(__name__) class KeyData(Bcfg2.Server.Plugin.SpecificData): @@ -31,7 +27,7 @@ class KeyData(Bcfg2.Server.Plugin.SpecificData): def bind_entry(self, entry, metadata): entry.set('type', 'file') if entry.get('encoding') == 'base64': - entry.text = binascii.b2a_base64(self.data) + entry.text = b64encode(self.data) else: try: entry.text = u_str(self.data, self.encoding) diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py index 0072dc62d..9d1c51a08 100644 --- a/src/lib/Bcfg2/Server/Plugins/SSLCA.py +++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py @@ -3,12 +3,15 @@ import Bcfg2.Options import lxml.etree import posixpath import tempfile -import pipes import os from subprocess import Popen, PIPE, STDOUT # Compatibility import from Bcfg2.Bcfg2Py3k import ConfigParser +try: + from hashlib import md5 +except ImportError: + from md5 import md5 class SSLCA(Bcfg2.Server.Plugin.GroupSpool): """ @@ -22,6 +25,10 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool): cert_specs = {} CAs = {} + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.GroupSpool.__init__(self, core, datastore) + self.infoxml = dict() + def HandleEvent(self, event=None): """ Updates which files this plugin handles based upon filesystem events. @@ -37,19 +44,21 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool): else: ident = self.handles[event.requestID][:-1] - fname = "".join([ident, '/', event.filename]) + fname = os.path.join(ident, event.filename) if event.filename.endswith('.xml'): if action in ['exists', 'created', 'changed']: if event.filename.endswith('key.xml'): - key_spec = dict(list(lxml.etree.parse(epath).find('Key').items())) + key_spec = dict(list(lxml.etree.parse(epath, + parser=Bcfg2.Server.XMLParser).find('Key').items())) self.key_specs[ident] = { 'bits': key_spec.get('bits', 2048), 'type': key_spec.get('type', 'rsa') } self.Entries['Path'][ident] = self.get_key elif event.filename.endswith('cert.xml'): - cert_spec = dict(list(lxml.etree.parse(epath).find('Cert').items())) + cert_spec = dict(list(lxml.etree.parse(epath, + parser=Bcfg2.Server.XMLParser).find('Cert').items())) ca = cert_spec.get('ca', 'default') self.cert_specs[ident] = { 'ca': ca, @@ -67,6 +76,9 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool): cp.read(self.core.cfile) self.CAs[ca] = dict(cp.items('sslca_' + ca)) self.Entries['Path'][ident] = self.get_cert + elif event.filename.endswith("info.xml"): + self.infoxml[ident] = Bcfg2.Server.Plugin.InfoXML(epath) + self.infoxml[ident].HandleEvent(event) if action == 'deleted': if ident in self.Entries['Path']: del self.Entries['Path'][ident] @@ -90,28 +102,27 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool): either grabs a prexisting key hostfile, or triggers the generation of a new key if one doesn't exist. """ - # set path type and permissions, otherwise bcfg2 won't bind the file - permdata = {'owner': 'root', - 'group': 'root', - 'type': 'file', - 'perms': '644'} - [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] - # check if we already have a hostfile, or need to generate a new key # TODO: verify key fits the specs path = entry.get('name') - filename = "".join([path, '/', path.rsplit('/', 1)[1], - '.H_', metadata.hostname]) + filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path), + metadata.hostname)) if filename not in list(self.entries.keys()): key = self.build_key(filename, entry, metadata) open(self.data + filename, 'w').write(key) entry.text = key - self.entries[filename] = self.__child__("%s%s" % (self.data, - filename)) + self.entries[filename] = self.__child__(self.data + filename) self.entries[filename].HandleEvent() else: entry.text = self.entries[filename].data + entry.set("type", "file") + if path in self.infoxml: + Bcfg2.Server.Plugin.bind_info(entry, metadata, + infoxml=self.infoxml[path]) + else: + Bcfg2.Server.Plugin.bind_info(entry, metadata) + def build_key(self, filename, entry, metadata): """ generates a new key according the the specification @@ -130,56 +141,61 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool): either grabs a prexisting cert hostfile, or triggers the generation of a new cert if one doesn't exist. """ - # set path type and permissions, otherwise bcfg2 won't bind the file - permdata = {'owner': 'root', - 'group': 'root', - 'type': 'file', - 'perms': '644'} - [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] - path = entry.get('name') - filename = "".join([path, '/', path.rsplit('/', 1)[1], - '.H_', metadata.hostname]) + filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path), + metadata.hostname)) # first - ensure we have a key to work with key = self.cert_specs[entry.get('name')].get('key') - key_filename = "".join([key, '/', key.rsplit('/', 1)[1], - '.H_', metadata.hostname]) + key_filename = os.path.join(key, "%s.H_%s" % (os.path.basename(key), + metadata.hostname)) if key_filename not in self.entries: e = lxml.etree.Element('Path') - e.attrib['name'] = key + e.set('name', key) self.core.Bind(e, metadata) # check if we have a valid hostfile - if filename in list(self.entries.keys()) and self.verify_cert(filename, - key_filename, - entry): + if (filename in list(self.entries.keys()) and + self.verify_cert(filename, key_filename, entry)): entry.text = self.entries[filename].data else: cert = self.build_cert(key_filename, entry, metadata) open(self.data + filename, 'w').write(cert) - self.entries[filename] = self.__child__("%s%s" % (self.data, - filename)) + self.entries[filename] = self.__child__(self.data + filename) self.entries[filename].HandleEvent() entry.text = cert + entry.set("type", "file") + if path in self.infoxml: + Bcfg2.Server.Plugin.bind_info(entry, metadata, + infoxml=self.infoxml[path]) + else: + Bcfg2.Server.Plugin.bind_info(entry, metadata) + def verify_cert(self, filename, key_filename, entry): - if self.verify_cert_against_ca(filename, entry): - if self.verify_cert_against_key(filename, key_filename): - return True - return False + do_verify = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('verify_certs', True) + if do_verify: + return (self.verify_cert_against_ca(filename, entry) and + self.verify_cert_against_key(filename, key_filename)) + return True def verify_cert_against_ca(self, filename, entry): """ check that a certificate validates against the ca cert, and that it has not expired. """ - chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert') + chaincert = \ + self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert') cert = self.data + filename - res = Popen(["openssl", "verify", "-CAfile", chaincert, cert], + res = Popen(["openssl", "verify", "-untrusted", chaincert, "-purpose", + "sslserver", cert], stdout=PIPE, stderr=STDOUT).stdout.read() if res == cert + ": OK\n": + self.debug_log("SSLCA: %s verified successfully against CA" % + entry.get("name")) return True + self.logger.warning("SSLCA: %s failed verification against CA: %s" % + (entry.get("name"), res)) return False def verify_cert_against_key(self, filename, key_filename): @@ -188,14 +204,20 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool): """ cert = self.data + filename key = self.data + key_filename - cmd = ("openssl x509 -noout -modulus -in %s | openssl md5" % - pipes.quote(cert)) - cert_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read() - cmd = ("openssl rsa -noout -modulus -in %s | openssl md5" % - pipes.quote(key)) - key_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read() + cert_md5 = \ + md5(Popen(["openssl", "x509", "-noout", "-modulus", "-in", cert], + stdout=PIPE, + stderr=STDOUT).stdout.read().strip()).hexdigest() + key_md5 = \ + md5(Popen(["openssl", "rsa", "-noout", "-modulus", "-in", key], + stdout=PIPE, + stderr=STDOUT).stdout.read().strip()).hexdigest() if cert_md5 == key_md5: + self.debug_log("SSLCA: %s verified successfully against key %s" % + (filename, key_filename)) return True + self.logger.warning("SSLCA: %s failed verification against key %s" % + (filename, key_filename)) return False def build_cert(self, key_filename, entry, metadata): diff --git a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py new file mode 100644 index 000000000..aad92b7c7 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py @@ -0,0 +1,32 @@ +import Bcfg2.Server.Plugin + +class ServiceCompat(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.StructureValidator): + """ Use old-style service modes for older clients """ + name = 'ServiceCompat' + __author__ = 'bcfg-dev@mcs.anl.gov' + mode_map = {('true', 'true'): 'default', + ('interactive', 'true'): 'interactive_only', + ('false', 'false'): 'manual'} + + def validate_structures(self, metadata, structures): + """ Apply defaults """ + if metadata.version_info and metadata.version_info > (1, 3, 0, '', 0): + # do not care about a client that is _any_ 1.3.0 release + # (including prereleases and RCs) + return + + for struct in structures: + for entry in struct.xpath("//BoundService|//Service"): + mode_key = (entry.get("restart", "true").lower(), + entry.get("install", "true").lower()) + try: + mode = self.mode_map[mode_key] + except KeyError: + self.logger.info("Could not map restart and install " + "settings of %s:%s to an old-style " + "Service mode for %s; using 'manual'" % + (entry.tag, entry.get("name"), + metadata.hostname)) + mode = "manual" + entry.set("mode", mode) diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py index aeb3b9f74..e62638b4f 100644 --- a/src/lib/Bcfg2/Server/Plugins/Snapshots.py +++ b/src/lib/Bcfg2/Server/Plugins/Snapshots.py @@ -1,9 +1,5 @@ -#import lxml.etree import logging -import binascii import difflib -#import sqlalchemy -#import sqlalchemy.orm import Bcfg2.Server.Plugin import Bcfg2.Server.Snapshots import Bcfg2.Logger @@ -13,7 +9,7 @@ import time import threading # Compatibility import -from Bcfg2.Bcfg2Py3k import Queue +from Bcfg2.Bcfg2Py3k import Queue, u_str, b64decode logger = logging.getLogger('Snapshots') @@ -28,14 +24,6 @@ datafields = { } -# py3k compatibility -def u_str(string): - if sys.hexversion >= 0x03000000: - return string - else: - return unicode(string) - - def build_snap_ent(entry): basefields = [] if entry.tag in ['Package', 'Service']: @@ -52,13 +40,12 @@ def build_snap_ent(entry): if entry.get('encoding', 'ascii') == 'ascii': desired['contents'] = u_str(entry.text) else: - desired['contents'] = u_str(binascii.a2b_base64(entry.text)) + desired['contents'] = u_str(b64decode(entry.text)) if 'current_bfile' in entry.attrib: - state['contents'] = u_str(binascii.a2b_base64( \ - entry.get('current_bfile'))) + state['contents'] = u_str(b64decode(entry.get('current_bfile'))) elif 'current_bdiff' in entry.attrib: - diff = binascii.a2b_base64(entry.get('current_bdiff')) + diff = b64decode(entry.get('current_bdiff')) state['contents'] = u_str( \ '\n'.join(difflib.restore(diff.split('\n'), 1))) @@ -69,14 +56,12 @@ def build_snap_ent(entry): return [desired, state] -class Snapshots(Bcfg2.Server.Plugin.Statistics, - Bcfg2.Server.Plugin.Plugin): +class Snapshots(Bcfg2.Server.Plugin.Statistics): name = 'Snapshots' experimental = True def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) - Bcfg2.Server.Plugin.Statistics.__init__(self) + Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore) self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile) self.work_queue = Queue() self.loader = threading.Thread(target=self.load_snapshot) diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py index 265ef95a8..984efb76c 100644 --- a/src/lib/Bcfg2/Server/Plugins/Statistics.py +++ b/src/lib/Bcfg2/Server/Plugins/Statistics.py @@ -1,15 +1,14 @@ '''This file manages the statistics collected by the BCFG2 Server''' -import binascii import copy import difflib import logging -from lxml.etree import XML, SubElement, Element, XMLSyntaxError import lxml.etree import os +import sys from time import asctime, localtime, time, strptime, mktime import threading - +from Bcfg2.Bcfg2Py3k import b64decode import Bcfg2.Server.Plugin @@ -19,7 +18,7 @@ class StatisticsStore(object): def __init__(self, filename): self.filename = filename - self.element = Element('Dummy') + self.element = lxml.etree.Element('Dummy') self.dirty = 0 self.lastwrite = 0 self.logger = logging.getLogger('Bcfg2.Server.Statistics') @@ -35,7 +34,8 @@ class StatisticsStore(object): ioerr = sys.exc_info()[1] self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr)) else: - fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True)) + fout.write(lxml.etree.tostring(self.element, + xml_declaration=False).decode('UTF-8')) fout.close() os.rename(self.filename + '.new', self.filename) self.dirty = 0 @@ -47,11 +47,11 @@ class StatisticsStore(object): fin = open(self.filename, 'r') data = fin.read() fin.close() - self.element = XML(data) + self.element = lxml.etree.XML(data) self.dirty = 0 - except (IOError, XMLSyntaxError): + except (IOError, lxml.etree.XMLSyntaxError): self.logger.error("Creating new statistics file %s"%(self.filename)) - self.element = Element('ConfigStatistics') + self.element = lxml.etree.Element('ConfigStatistics') self.WriteBack() self.dirty = 0 @@ -77,7 +77,7 @@ class StatisticsStore(object): nummatch = len(nodes) if nummatch == 0: # Create an entry for this node - node = SubElement(self.element, 'Node', name=client) + node = lxml.etree.SubElement(self.element, 'Node', name=client) elif nummatch == 1 and not node_dirty: # Delete old instance node = nodes[0] @@ -112,13 +112,11 @@ class StatisticsStore(object): return (now-utime) > secondsPerDay -class Statistics(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.ThreadedStatistics, +class Statistics(Bcfg2.Server.Plugin.ThreadedStatistics, Bcfg2.Server.Plugin.PullSource): name = 'Statistics' def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore) Bcfg2.Server.Plugin.PullSource.__init__(self) fpath = "%s/etc/statistics.xml" % datastore @@ -151,9 +149,9 @@ class Statistics(Bcfg2.Server.Plugin.Plugin, if cfentry.get('sensitive') in ['true', 'True']: raise Bcfg2.Server.Plugin.PluginExecutionError elif 'current_bfile' in cfentry.attrib: - contents = binascii.a2b_base64(cfentry.get('current_bfile')) + contents = b64decode(cfentry.get('current_bfile')) elif 'current_bdiff' in cfentry.attrib: - diff = binascii.a2b_base64(cfentry.get('current_bdiff')) + diff = b64decode(cfentry.get('current_bdiff')) contents = '\n'.join(difflib.restore(diff.split('\n'), 1)) else: contents = None diff --git a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py deleted file mode 100644 index f4232ad5c..000000000 --- a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py +++ /dev/null @@ -1,10 +0,0 @@ -"""This generator provides service mappings.""" - -import Bcfg2.Server.Plugin - - -class Svcmgr(Bcfg2.Server.Plugin.PrioDir): - """This is a generator that handles service assignments.""" - name = 'Svcmgr' - __author__ = 'bcfg-dev@mcs.anl.gov' - deprecated = True diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py index 8879fdef1..2bf475363 100644 --- a/src/lib/Bcfg2/Server/Plugins/TCheetah.py +++ b/src/lib/Bcfg2/Server/Plugins/TCheetah.py @@ -1,13 +1,11 @@ '''This module implements a templating generator based on Cheetah''' -import binascii import logging import sys import traceback import Bcfg2.Server.Plugin -# py3k compatibility -if sys.hexversion >= 0x03000000: - unicode = str + +from Bcfg2.Bcfg2Py3k import unicode, b64encode logger = logging.getLogger('Bcfg2.Plugins.TCheetah') @@ -60,7 +58,7 @@ class TemplateFile: else: if entry.get('encoding') == 'base64': # take care of case where file needs base64 encoding - entry.text = binascii.b2a_base64(self.template) + entry.text = b64encode(self.template) else: entry.text = unicode(str(self.template), self.encoding) except: @@ -78,3 +76,4 @@ class TCheetah(Bcfg2.Server.Plugin.GroupSpool): __author__ = 'bcfg-dev@mcs.anl.gov' filename_pattern = 'template' es_child_cls = TemplateFile + deprecated = True diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py index c4dd40614..c7335a0c0 100644 --- a/src/lib/Bcfg2/Server/Plugins/TGenshi.py +++ b/src/lib/Bcfg2/Server/Plugins/TGenshi.py @@ -1,12 +1,10 @@ """This module implements a templating generator based on Genshi.""" -import binascii import logging import sys import Bcfg2.Server.Plugin -# py3k compatibility -if sys.hexversion >= 0x03000000: - unicode = str + +from Bcfg2.Bcfg2Py3k import unicode, b64encode logger = logging.getLogger('Bcfg2.Plugins.TGenshi') @@ -18,7 +16,7 @@ try: TextTemplate, MarkupTemplate, TemplateError except ImportError: logger.error("TGenshi: Failed to import Genshi. Is it installed?") - raise Bcfg2.Server.Plugin.PluginInitError + raise try: from genshi.template import NewTextTemplate have_ntt = True @@ -33,7 +31,7 @@ def removecomment(stream): yield kind, data, pos -class TemplateFile: +class TemplateFile(object): """Template file creates Genshi template structures for the loaded file.""" def __init__(self, name, specific, encoding): @@ -99,7 +97,7 @@ class TemplateFile: else: if entry.get('encoding') == 'base64': # take care of case where file needs base64 encoding - entry.text = binascii.b2a_base64(textdata) + entry.text = b64encode(textdata) else: entry.text = unicode(textdata, self.encoding) else: @@ -123,6 +121,10 @@ class TemplateFile: raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template loading error: %s' % err) +class TemplateEntrySet(Bcfg2.Server.Plugin.EntrySet): + basename_is_regex = True + + class TGenshi(Bcfg2.Server.Plugin.GroupSpool): """ The TGenshi generator implements a templating @@ -132,4 +134,6 @@ class TGenshi(Bcfg2.Server.Plugin.GroupSpool): name = 'TGenshi' __author__ = 'jeff@ocjtech.us' filename_pattern = 'template\.(txt|newtxt|xml)' + es_cls = TemplateEntrySet es_child_cls = TemplateFile + deprecated = True diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py index 2c0ee03e0..6d92bb530 100644 --- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py +++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py @@ -1,26 +1,23 @@ import re import imp import sys +import glob import logging +import Bcfg2.Server.Lint import Bcfg2.Server.Plugin logger = logging.getLogger(__name__) -class HelperModule(Bcfg2.Server.Plugin.SpecificData): - _module_name_re = re.compile(r'([^/]+?)\.py') - - def __init__(self, name, specific, encoding): - Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific, - encoding) - match = self._module_name_re.search(self.name) - if match: - self._module_name = match.group(1) - else: - self._module_name = name +module_pattern = r'(?P<filename>(?P<module>[^\/]+)\.py)$' +module_re = re.compile(module_pattern) + +class HelperModule(Bcfg2.Server.Plugin.FileBacked): + def __init__(self, name, fam=None): + Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam) + self._module_name = module_re.search(self.name).group('module') self._attrs = [] - def handle_event(self, event): - Bcfg2.Server.Plugin.SpecificData.handle_event(self, event) + def Index(self): try: module = imp.load_source(self._module_name, self.name) except: @@ -34,32 +31,29 @@ class HelperModule(Bcfg2.Server.Plugin.SpecificData): self.name) return + newattrs = [] for sym in module.__export__: if sym not in self._attrs and hasattr(self, sym): logger.warning("TemplateHelper: %s: %s is a reserved keyword, " "skipping export" % (self.name, sym)) - setattr(self, sym, getattr(module, sym)) + continue + try: + setattr(self, sym, getattr(module, sym)) + newattrs.append(sym) + except AttributeError: + logger.warning("TemplateHelper: %s: %s exports %s, but has no " + "such attribute" % (self.name, sym)) # remove old exports - for sym in set(self._attrs) - set(module.__export__): + for sym in set(self._attrs) - set(newattrs): delattr(self, sym) - self._attrs = module.__export__ + self._attrs = newattrs -class HelperSet(Bcfg2.Server.Plugin.EntrySet): +class HelperSet(Bcfg2.Server.Plugin.DirectoryBacked): ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\.py[co])$") - - def __init__(self, path, fam, encoding, plugin_name): - fpattern = '[0-9A-Za-z_\-]+\.py' - self.plugin_name = plugin_name - Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, - HelperModule, encoding) - fam.AddMonitor(path, self) - - def HandleEvent(self, event): - if (event.filename != self.path and - not self.ignore.match(event.filename)): - return self.handle_event(event) + patterns = module_re + __child__ = HelperModule class TemplateHelper(Bcfg2.Server.Plugin.Plugin, @@ -71,13 +65,69 @@ class TemplateHelper(Bcfg2.Server.Plugin.Plugin, def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Connector.__init__(self) + self.helpers = HelperSet(self.data, core.fam) + + def get_additional_data(self, _): + return dict([(h._module_name, h) + for h in self.helpers.entries.values()]) + + +class TemplateHelperLint(Bcfg2.Server.Lint.ServerlessPlugin): + """ find duplicate Pkgmgr entries with the same priority """ + def __init__(self, *args, **kwargs): + Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) + hm = HelperModule("foo.py") + self.reserved_keywords = dir(hm) + + def Run(self): + for fname in os.listdir(os.path.join(self.config['repo'], + "TemplateHelper")): + helper = os.path.join(self.config['repo'], "TemplateHelper", + fname) + if not module_re.search(helper) or not self.HandlesFile(helper): + continue + self.check_helper(helper) + + def check_helper(self, helper): + module_name = module_re.search(helper).group(1) try: - self.helpers = HelperSet(self.data, core.fam, core.encoding, - self.name) + module = imp.load_source(module_name, helper) except: - raise Bcfg2.Server.Plugin.PluginInitError + err = sys.exc_info()[1] + self.LintError("templatehelper-import-error", + "Failed to import %s: %s" % + (helper, err)) + return - def get_additional_data(self, metadata): - return dict([(h._module_name, h) - for h in list(self.helpers.entries.values())]) + if not hasattr(module, "__export__"): + self.LintError("templatehelper-no-export", + "%s has no __export__ list" % helper) + return + elif not isinstance(module.__export__, list): + self.LintError("templatehelper-nonlist-export", + "__export__ is not a list in %s" % helper) + return + + for sym in module.__export__: + if not hasattr(module, sym): + self.LintError("templatehelper-nonexistent-export", + "%s: exported symbol %s does not exist" % + (helper, sym)) + elif sym in self.reserved_keywords: + self.LintError("templatehelper-reserved-export", + "%s: exported symbol %s is reserved" % + (helper, sym)) + elif sym.startswith("_"): + self.LintError("templatehelper-underscore-export", + "%s: exported symbol %s starts with underscore" % + (helper, sym)) + + @classmethod + def Errors(cls): + return {"templatehelper-import-error":"error", + "templatehelper-no-export":"error", + "templatehelper-nonlist-export":"error", + "templatehelper-nonexistent-export":"error", + "templatehelper-reserved-export":"error", + "templatehelper-underscore-export":"warning"} diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py index b0d21545c..313a1bf03 100644 --- a/src/lib/Bcfg2/Server/Plugins/Trigger.py +++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py @@ -1,43 +1,52 @@ import os +import pipes import Bcfg2.Server.Plugin +from subprocess import Popen, PIPE +class TriggerFile(Bcfg2.Server.Plugin.FileBacked): + def HandleEvent(self, event=None): + return -def async_run(prog, args): - pid = os.fork() - if pid: - os.waitpid(pid, 0) - else: - dpid = os.fork() - if not dpid: - os.system(" ".join([prog] + args)) - os._exit(0) + def __str__(self): + return "%s: %s" % (self.__class__.__name__, self.name) class Trigger(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.Statistics): + Bcfg2.Server.Plugin.ClientRunHooks, + Bcfg2.Server.Plugin.DirectoryBacked): """Trigger is a plugin that calls external scripts (on the server).""" name = 'Trigger' __author__ = 'bcfg-dev@mcs.anl.gov' def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) - Bcfg2.Server.Plugin.Statistics.__init__(self) - try: - os.stat(self.data) - except: - self.logger.error("Trigger: spool directory %s does not exist; " - "unloading" % self.data) - raise Bcfg2.Server.Plugin.PluginInitError - - def process_statistics(self, metadata, _): + Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) + Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, + self.core.fam) + + def async_run(self, args): + pid = os.fork() + if pid: + os.waitpid(pid, 0) + else: + dpid = os.fork() + if not dpid: + self.debug_log("Running %s" % " ".join(pipes.quote(a) + for a in args)) + proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) + (out, err) = proc.communicate() + rv = proc.wait() + if rv != 0: + self.logger.error("Trigger: Error running %s (%s): %s" % + (args[0], rv, err)) + elif err: + self.debug_log("Trigger: Error: %s" % err) + os._exit(0) + + + def end_client_run(self, metadata): args = [metadata.hostname, '-p', metadata.profile, '-g', ':'.join([g for g in metadata.groups])] - for notifier in os.listdir(self.data): - if ((notifier[-1] == '~') or - (notifier[:2] == '.#') or - (notifier[-4:] == '.swp') or - (notifier in ['SCCS', '.svn', '4913'])): - continue - npath = self.data + '/' + notifier - self.logger.debug("Running %s %s" % (npath, " ".join(args))) - async_run(npath, args) + for notifier in self.entries.keys(): + npath = os.path.join(self.data, notifier) + self.async_run([npath] + args) diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py index f9f1b4e52..b33eeba28 100644 --- a/src/lib/Bcfg2/Server/Plugins/__init__.py +++ b/src/lib/Bcfg2/Server/Plugins/__init__.py @@ -25,10 +25,8 @@ __all__ = [ 'SSHbase', 'Snapshots', 'Statistics', - 'Svcmgr', 'Svn', 'TCheetah', 'Trigger', - 'SGenshi', 'TGenshi', ] diff --git a/src/lib/Bcfg2/Server/Reports/importscript.py b/src/lib/Bcfg2/Server/Reports/importscript.py index 16df86a9b..4eced8340 100755 --- a/src/lib/Bcfg2/Server/Reports/importscript.py +++ b/src/lib/Bcfg2/Server/Reports/importscript.py @@ -4,17 +4,17 @@ Imports statistics.xml and clients.xml files in to database backend for new statistics engine """ -import binascii import os import sys +import traceback try: - import Bcfg2.Server.Reports.settings + import Bcfg2.settings except Exception: e = sys.exc_info()[1] sys.stderr.write("Failed to load configuration settings. %s\n" % e) sys.exit(1) -project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__) +project_directory = os.path.dirname(Bcfg2.settings.__file__) project_name = os.path.basename(project_directory) sys.path.append(os.path.join(project_directory, '..')) project_module = __import__(project_name, '', '', ['']) @@ -27,14 +27,14 @@ from lxml.etree import XML, XMLSyntaxError from getopt import getopt, GetoptError from datetime import datetime from time import strptime -from django.db import connection -from Bcfg2.Server.Reports.updatefix import update_database +from django.db import connection, transaction +from Bcfg2.Server.Plugins.Metadata import ClientMetadata import logging import Bcfg2.Logger import platform # Compatibility import -from Bcfg2.Bcfg2Py3k import ConfigParser +from Bcfg2.Bcfg2Py3k import ConfigParser, b64decode def build_reason_kwargs(r_ent, encoding, logger): @@ -53,7 +53,7 @@ def build_reason_kwargs(r_ent, encoding, logger): # No point in flagging binary if we have no data binary_file = False elif r_ent.get('current_bdiff', False): - rc_diff = binascii.a2b_base64(r_ent.get('current_bdiff')) + rc_diff = b64decode(r_ent.get('current_bdiff')) elif r_ent.get('current_diff', False): rc_diff = r_ent.get('current_diff') else: @@ -86,130 +86,160 @@ def build_reason_kwargs(r_ent, encoding, logger): is_sensitive=sensitive_file, unpruned=unpruned_entries) +def _fetch_reason(elem, kargs, logger): + try: + rr = None + try: + rr = Reason.objects.filter(**kargs)[0] + except IndexError: + rr = Reason(**kargs) + rr.save() + logger.debug("Created reason: %s" % rr.id) + except Exception: + ex = sys.exc_info()[1] + logger.error("Failed to create reason for %s: %s" % (elem.get('name'), ex)) + rr = Reason(current_exists=elem.get('current_exists', + default="True").capitalize() == "True") + rr.save() + return rr -def load_stats(cdata, sdata, encoding, vlevel, logger, quick=False, location=''): - clients = {} - [clients.__setitem__(c.name, c) \ - for c in Client.objects.all()] - - pingability = {} - [pingability.__setitem__(n.get('name'), n.get('pingable', default='N')) \ - for n in cdata.findall('Client')] +def load_stats(sdata, encoding, vlevel, logger, quick=False, location=''): for node in sdata.findall('Node'): name = node.get('name') - c_inst, created = Client.objects.get_or_create(name=name) - if vlevel > 0: - logger.info("Client %s added to db" % name) - clients[name] = c_inst - try: - pingability[name] - except KeyError: - pingability[name] = 'N' for statistics in node.findall('Statistics'): - timestamp = datetime(*strptime(statistics.get('time'))[0:6]) - ilist = Interaction.objects.filter(client=c_inst, - timestamp=timestamp) - if ilist: - current_interaction = ilist[0] - if vlevel > 0: - logger.info("Interaction for %s at %s with id %s already exists" % \ - (c_inst.id, timestamp, current_interaction.id)) - continue - else: - newint = Interaction(client=c_inst, - timestamp=timestamp, - state=statistics.get('state', + try: + load_stat(name, statistics, encoding, vlevel, logger, quick, location) + except: + logger.error("Failed to create interaction for %s: %s" % + (name, traceback.format_exc().splitlines()[-1])) + +@transaction.commit_on_success +def load_stat(cobj, statistics, encoding, vlevel, logger, quick, location): + if isinstance(cobj, ClientMetadata): + client_name = cobj.hostname + else: + client_name = cobj + client, created = Client.objects.get_or_create(name=client_name) + if created and vlevel > 0: + logger.info("Client %s added to db" % client_name) + + timestamp = datetime(*strptime(statistics.get('time'))[0:6]) + ilist = Interaction.objects.filter(client=client, + timestamp=timestamp) + if ilist: + current_interaction = ilist[0] + if vlevel > 0: + logger.info("Interaction for %s at %s with id %s already exists" % \ + (client.id, timestamp, current_interaction.id)) + return + else: + newint = Interaction(client=client, + timestamp=timestamp, + state=statistics.get('state', + default="unknown"), + repo_rev_code=statistics.get('revision', default="unknown"), - repo_rev_code=statistics.get('revision', - default="unknown"), - goodcount=statistics.get('good', - default="0"), - totalcount=statistics.get('total', - default="0"), - server=location) - newint.save() - current_interaction = newint - if vlevel > 0: - logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (c_inst.id, - timestamp, current_interaction.id)) - - counter_fields = {TYPE_CHOICES[0]: 0, - TYPE_CHOICES[1]: 0, - TYPE_CHOICES[2]: 0} - pattern = [('Bad/*', TYPE_CHOICES[0]), - ('Extra/*', TYPE_CHOICES[2]), - ('Modified/*', TYPE_CHOICES[1])] - for (xpath, type) in pattern: - for x in statistics.findall(xpath): - counter_fields[type] = counter_fields[type] + 1 - kargs = build_reason_kwargs(x, encoding, logger) - - try: - rr = None - try: - rr = Reason.objects.filter(**kargs)[0] - except IndexError: - rr = Reason(**kargs) - rr.save() - if vlevel > 0: - logger.info("Created reason: %s" % rr.id) - except Exception: - ex = sys.exc_info()[1] - logger.error("Failed to create reason for %s: %s" % (x.get('name'), ex)) - rr = Reason(current_exists=x.get('current_exists', - default="True").capitalize() == "True") - rr.save() - - entry, created = Entries.objects.get_or_create(\ - name=x.get('name'), kind=x.tag) - - Entries_interactions(entry=entry, reason=rr, - interaction=current_interaction, - type=type[0]).save() - if vlevel > 0: - logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id)) - - # Update interaction counters - current_interaction.bad_entries = counter_fields[TYPE_CHOICES[0]] - current_interaction.modified_entries = counter_fields[TYPE_CHOICES[1]] - current_interaction.extra_entries = counter_fields[TYPE_CHOICES[2]] - current_interaction.save() - - mperfs = [] - for times in statistics.findall('OpStamps'): - for metric, value in list(times.items()): - mmatch = [] - if not quick: - mmatch = Performance.objects.filter(metric=metric, value=value) - - if mmatch: - mperf = mmatch[0] - else: - mperf = Performance(metric=metric, value=value) - mperf.save() - mperfs.append(mperf) - current_interaction.performance_items.add(*mperfs) - - for key in list(pingability.keys()): - if key not in clients: - continue + goodcount=statistics.get('good', + default="0"), + totalcount=statistics.get('total', + default="0"), + server=location) + newint.save() + current_interaction = newint + if vlevel > 0: + logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (client.id, + timestamp, current_interaction.id)) + + if isinstance(cobj, ClientMetadata): try: - pmatch = Ping.objects.filter(client=clients[key]).order_by('-endtime')[0] - if pmatch.status == pingability[key]: - pmatch.endtime = datetime.now() - pmatch.save() - continue - except IndexError: - pass - Ping(client=clients[key], status=pingability[key], - starttime=datetime.now(), - endtime=datetime.now()).save() + imeta = InteractionMetadata(interaction=current_interaction) + profile, created = Group.objects.get_or_create(name=cobj.profile) + imeta.profile = profile + imeta.save() # save here for m2m + + #FIXME - this should be more efficient + group_set = [] + for group_name in cobj.groups: + group, created = Group.objects.get_or_create(name=group_name) + if created: + logger.debug("Added group %s" % group) + imeta.groups.add(group) + for bundle_name in cobj.bundles: + bundle, created = Bundle.objects.get_or_create(name=bundle_name) + if created: + logger.debug("Added bundle %s" % bundle) + imeta.bundles.add(bundle) + imeta.save() + except: + logger.error("Failed to save interaction metadata for %s: %s" % + (client_name, traceback.format_exc().splitlines()[-1])) + + + entries_cache = {} + [entries_cache.__setitem__((e.kind, e.name), e) \ + for e in Entries.objects.all()] + counter_fields = {TYPE_BAD: 0, + TYPE_MODIFIED: 0, + TYPE_EXTRA: 0} + pattern = [('Bad/*', TYPE_BAD), + ('Extra/*', TYPE_EXTRA), + ('Modified/*', TYPE_MODIFIED)] + for (xpath, type) in pattern: + for x in statistics.findall(xpath): + counter_fields[type] = counter_fields[type] + 1 + rr = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger) - if vlevel > 1: - logger.info("---------------PINGDATA SYNCED---------------------") + try: + entry = entries_cache[(x.tag, x.get('name'))] + except KeyError: + entry, created = Entries.objects.get_or_create(\ + name=x.get('name'), kind=x.tag) + + Entries_interactions(entry=entry, reason=rr, + interaction=current_interaction, + type=type).save() + if vlevel > 0: + logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id)) + + # add good entries + good_reason = None + for x in statistics.findall('Good/*'): + if good_reason == None: + # Do this once. Really need to fix Reasons... + good_reason = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger) + try: + entry = entries_cache[(x.tag, x.get('name'))] + except KeyError: + entry, created = Entries.objects.get_or_create(\ + name=x.get('name'), kind=x.tag) + Entries_interactions(entry=entry, reason=good_reason, + interaction=current_interaction, + type=TYPE_GOOD).save() + if vlevel > 0: + logger.info("%s interaction created with reason id %s and entry %s" % (xpath, good_reason.id, entry.id)) + + # Update interaction counters + current_interaction.bad_entries = counter_fields[TYPE_BAD] + current_interaction.modified_entries = counter_fields[TYPE_MODIFIED] + current_interaction.extra_entries = counter_fields[TYPE_EXTRA] + current_interaction.save() + + mperfs = [] + for times in statistics.findall('OpStamps'): + for metric, value in list(times.items()): + mmatch = [] + if not quick: + mmatch = Performance.objects.filter(metric=metric, value=value) + + if mmatch: + mperf = mmatch[0] + else: + mperf = Performance(metric=metric, value=value) + mperf.save() + mperfs.append(mperf) + current_interaction.performance_items.add(*mperfs) - #Clients are consistent if __name__ == '__main__': from sys import argv @@ -231,18 +261,17 @@ if __name__ == '__main__': except GetoptError: mesg = sys.exc_info()[1] # print help information and exit: - print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-c clients-file] [-s statistics-file]" % (mesg)) + print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-s statistics-file]" % (mesg)) raise SystemExit(2) for o, a in opts: if o in ("-h", "--help"): - print("Usage:\nimportscript.py [-h] [-v] -c <clients-file> -s <statistics-file> \n") + print("Usage:\nimportscript.py [-h] [-v] -s <statistics-file> \n") print("h : help; this message") print("v : verbose; print messages on record insertion/skip") print("u : updates; print status messages as items inserted semi-verbose") print("d : debug; print most SQL used to manipulate database") print("C : path to bcfg2.conf config file.") - print("c : clients.xml file") print("s : statistics.xml file") print("S : syslog; output to syslog") raise SystemExit @@ -256,7 +285,7 @@ if __name__ == '__main__': if o in ("-d", "--debug"): verb = 3 if o in ("-c", "--clients"): - clientspath = a + print("DeprecationWarning: %s is no longer used" % o) if o in ("-s", "--stats"): statpath = a @@ -267,7 +296,7 @@ if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) Bcfg2.Logger.setup_logging('importscript.py', True, - syslog) + syslog, level=logging.INFO) cf = ConfigParser.ConfigParser() cf.read([cpath]) @@ -289,24 +318,16 @@ if __name__ == '__main__': except: encoding = 'UTF-8' - if not clientpath: - try: - clientspath = "%s/Metadata/clients.xml" % \ - cf.get('server', 'repository') - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): - print("Could not read bcfg2.conf; exiting") - raise SystemExit(1) - try: - clientsdata = XML(open(clientspath).read()) - except (IOError, XMLSyntaxError): - print("StatReports: Failed to parse %s" % (clientspath)) - raise SystemExit(1) - q = '-O3' in sys.argv + + # don't load this at the top. causes a circular import error + from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError # Be sure the database is ready for new schema - update_database() - load_stats(clientsdata, - statsdata, + try: + update_database() + except UpdaterError: + raise SystemExit(1) + load_stats(statsdata, encoding, verb, logger, diff --git a/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml b/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml deleted file mode 100644 index bde236989..000000000 --- a/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml +++ /dev/null @@ -1,43 +0,0 @@ -<?xml version='1.0' encoding='utf-8' ?> -<django-objects version="1.0"> - <object pk="1" model="reports.internaldatabaseversion"> - <field type="IntegerField" name="version">0</field> - <field type="DateTimeField" name="updated">2008-08-05 11:03:50</field> - </object> - <object pk="2" model="reports.internaldatabaseversion"> - <field type="IntegerField" name="version">1</field> - <field type="DateTimeField" name="updated">2008-08-05 11:04:10</field> - </object> - <object pk="3" model="reports.internaldatabaseversion"> - <field type="IntegerField" name="version">2</field> - <field type="DateTimeField" name="updated">2008-08-05 13:37:19</field> - </object> - <object pk="4" model="reports.internaldatabaseversion"> - <field type='IntegerField' name='version'>3</field> - <field type='DateTimeField' name='updated'>2008-08-11 08:44:36</field> - </object> - <object pk="5" model="reports.internaldatabaseversion"> - <field type='IntegerField' name='version'>10</field> - <field type='DateTimeField' name='updated'>2008-08-22 11:28:50</field> - </object> - <object pk="5" model="reports.internaldatabaseversion"> - <field type='IntegerField' name='version'>11</field> - <field type='DateTimeField' name='updated'>2009-01-13 12:26:10</field> - </object> - <object pk="6" model="reports.internaldatabaseversion"> - <field type='IntegerField' name='version'>16</field> - <field type='DateTimeField' name='updated'>2010-06-01 12:26:10</field> - </object> - <object pk="7" model="reports.internaldatabaseversion"> - <field type='IntegerField' name='version'>17</field> - <field type='DateTimeField' name='updated'>2010-07-02 00:00:00</field> - </object> - <object pk="8" model="reports.internaldatabaseversion"> - <field type='IntegerField' name='version'>18</field> - <field type='DateTimeField' name='updated'>2011-06-30 00:00:00</field> - </object> - <object pk="8" model="reports.internaldatabaseversion"> - <field type='IntegerField' name='version'>19</field> - <field type='DateTimeField' name='updated'>2012-03-28 00:00:00</field> - </object> -</django-objects> diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py index 35f2a4393..73adaaaaf 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/models.py +++ b/src/lib/Bcfg2/Server/Reports/reports/models.py @@ -23,16 +23,13 @@ KIND_CHOICES = ( ('Path', 'symlink'), ('Service', 'Service'), ) -PING_CHOICES = ( - #These are possible ping states - ('Up (Y)', 'Y'), - ('Down (N)', 'N') -) +TYPE_GOOD = 0 TYPE_BAD = 1 TYPE_MODIFIED = 2 TYPE_EXTRA = 3 TYPE_CHOICES = ( + (TYPE_GOOD, 'Good'), (TYPE_BAD, 'Bad'), (TYPE_MODIFIED, 'Modified'), (TYPE_EXTRA, 'Extra'), @@ -87,30 +84,9 @@ class Client(models.Model): pass -class Ping(models.Model): - """Represents a ping of a client (sparsely).""" - client = models.ForeignKey(Client, related_name="pings") - starttime = models.DateTimeField() - endtime = models.DateTimeField() - status = models.CharField(max_length=4, choices=PING_CHOICES) # up/down - - class Meta: - get_latest_by = 'endtime' - - class InteractiveManager(models.Manager): """Manages interactions objects.""" - def recent_interactions_dict(self, maxdate=None, active_only=True): - """ - Return the most recent interactions for clients as of a date. - - This method uses aggregated queries to return a ValuesQueryDict object. - Faster then raw sql since this is executed as a single query. - """ - - return list(self.values('client').annotate(max_timestamp=Max('timestamp')).values()) - def interaction_per_client(self, maxdate=None, active_only=True): """ Returns the most recent interactions for clients as of a date @@ -154,15 +130,15 @@ class InteractiveManager(models.Manager): cursor.execute(sql) return [item[0] for item in cursor.fetchall()] except: - '''FIXME - really need some error hadling''' + '''FIXME - really need some error handling''' pass return [] class Interaction(models.Model): """Models each reconfiguration operation interaction between client and server.""" - client = models.ForeignKey(Client, related_name="interactions",) - timestamp = models.DateTimeField() # Timestamp for this record + client = models.ForeignKey(Client, related_name="interactions") + timestamp = models.DateTimeField(db_index=True) # Timestamp for this record state = models.CharField(max_length=32) # good/bad/modified/etc repo_rev_code = models.CharField(max_length=64) # repo revision at time of interaction goodcount = models.IntegerField() # of good config-items @@ -270,27 +246,47 @@ class Interaction(models.Model): class Reason(models.Model): """reason why modified or bad entry did not verify, or changed.""" - owner = models.TextField(max_length=128, blank=True) - current_owner = models.TextField(max_length=128, blank=True) - group = models.TextField(max_length=128, blank=True) - current_group = models.TextField(max_length=128, blank=True) - perms = models.TextField(max_length=4, blank=True) # txt fixes typing issue - current_perms = models.TextField(max_length=4, blank=True) - status = models.TextField(max_length=3, blank=True) # on/off/(None) - current_status = models.TextField(max_length=1, blank=True) # on/off/(None) - to = models.TextField(max_length=256, blank=True) - current_to = models.TextField(max_length=256, blank=True) - version = models.TextField(max_length=128, blank=True) - current_version = models.TextField(max_length=128, blank=True) + owner = models.CharField(max_length=255, blank=True) + current_owner = models.CharField(max_length=255, blank=True) + group = models.CharField(max_length=255, blank=True) + current_group = models.CharField(max_length=255, blank=True) + perms = models.CharField(max_length=4, blank=True) + current_perms = models.CharField(max_length=4, blank=True) + status = models.CharField(max_length=128, blank=True) + current_status = models.CharField(max_length=128, blank=True) + to = models.CharField(max_length=1024, blank=True) + current_to = models.CharField(max_length=1024, blank=True) + version = models.CharField(max_length=1024, blank=True) + current_version = models.CharField(max_length=1024, blank=True) current_exists = models.BooleanField() # False means its missing. Default True - current_diff = models.TextField(max_length=1280, blank=True) + current_diff = models.TextField(max_length=1024*1024, blank=True) is_binary = models.BooleanField(default=False) is_sensitive = models.BooleanField(default=False) - unpruned = models.TextField(max_length=1280, blank=True) + unpruned = models.TextField(max_length=4096, blank=True, default='') def _str_(self): return "Reason" + def short_list(self): + rv = [] + if self.current_owner or self.current_group or self.current_perms: + rv.append("File permissions") + if self.current_status: + rv.append("Incorrect status") + if self.current_to: + rv.append("Incorrect target") + if self.current_version or self.version == 'auto': + rv.append("Wrong version") + if not self.current_exists: + rv.append("Missing") + if self.current_diff or self.is_sensitive: + rv.append("Incorrect data") + if self.unpruned: + rv.append("Directory has extra files") + if len(rv) == 0: + rv.append("Exists") + return rv + @staticmethod @transaction.commit_on_success def prune_orphans(): @@ -316,6 +312,9 @@ class Entries(models.Model): cursor.execute('delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)') transaction.set_dirty() + class Meta: + unique_together = ("name", "kind") + class Entries_interactions(models.Model): """Define the relation between the reason, the interaction and the entry.""" @@ -343,10 +342,52 @@ class Performance(models.Model): transaction.set_dirty() -class InternalDatabaseVersion(models.Model): - """Object that tell us to witch version is the database.""" - version = models.IntegerField() - updated = models.DateTimeField(auto_now_add=True) +class Group(models.Model): + """ + Groups extracted from interactions + + name - The group name + + TODO - Most of this is for future use + TODO - set a default group + """ + + name = models.CharField(max_length=255, unique=True) + profile = models.BooleanField(default=False) + public = models.BooleanField(default=False) + category = models.CharField(max_length=1024, blank=True) + comment = models.TextField(blank=True) + + groups = models.ManyToManyField("self", symmetrical=False) + bundles = models.ManyToManyField("Bundle") + + def __unicode__(self): + return self.name + + +class Bundle(models.Model): + """ + Bundles extracted from interactions + + name - The bundle name + """ + + name = models.CharField(max_length=255, unique=True) + + def __unicode__(self): + return self.name + + +class InteractionMetadata(models.Model): + """ + InteractionMetadata + + Hold extra data associated with the client and interaction + """ + + interaction = models.OneToOneField(Interaction, primary_key=True, related_name='metadata') + profile = models.ForeignKey(Group, related_name="+") + groups = models.ManyToManyField(Group) + bundles = models.ManyToManyField(Bundle) + - def __str__(self): - return "version %d updated the %s" % (self.version, self.updated.isoformat()) diff --git a/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql b/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql deleted file mode 100644 index 28e785450..000000000 --- a/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE VIEW reports_current_interactions AS SELECT x.client_id AS client_id, reports_interaction.id AS interaction_id FROM (select client_id, MAX(timestamp) as timer FROM reports_interaction GROUP BY client_id) x, reports_interaction WHERE reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer; - -create index reports_interaction_client_id on reports_interaction (client_id); -create index reports_client_current_interaction_id on reports_client (current_interaction_id); -create index reports_performance_interaction_performance_id on reports_performance_interaction (performance_id); -create index reports_interaction_timestamp on reports_interaction (timestamp); -create index reports_performance_interation_interaction_id on reports_performance_interaction (interaction_id); diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html index 842de36f0..9a5ef651c 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html @@ -20,6 +20,9 @@ document.write(getCalendarStyles()); {% if not timestamp %}Rendered at {% now "Y-m-d H:i" %} | {% else %}View as of {{ timestamp|date:"Y-m-d H:i" }} | {% endif %}{% spaceless %} <a id='cal_link' name='cal_link' href='#' onclick='showCalendar(); return false;' >[change]</a> - <form method='post' action='{{ path }}' id='cal_form' name='cal_form'><input id='cal_date' name='cal_date' type='hidden' value=''/></form> + <form method='post' action='{{ path }}' id='cal_form' name='cal_form'> + <input id='cal_date' name='cal_date' type='hidden' value=''/> + <input name='op' type='hidden' value='timeview'/> + </form> {% endspaceless %} {% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html index f541c0d2b..3fa482a19 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/base.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html @@ -62,6 +62,7 @@ <li>Entries Configured</li> </ul> <ul class='menu-level2'> + <li><a href="{% url reports_common_problems %}">Common problems</a></li> <li><a href="{% url reports_item_list "bad" %}">Bad</a></li> <li><a href="{% url reports_item_list "modified" %}">Modified</a></li> <li><a href="{% url reports_item_list "extra" %}">Extra</a></li> @@ -87,7 +88,7 @@ <div style='clear:both'></div> </div><!-- document --> <div id="footer"> - <span>Bcfg2 Version 1.2.2</span> + <span>Bcfg2 Version 1.2.3</span> </div> <div id="calendar_div" style='position:absolute; visibility:hidden; background-color:white; layer-background-color:white;'></div> diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html index dd4295f21..9b86b609f 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html @@ -50,6 +50,9 @@ span.history_links a { {% if interaction.server %} <tr><td>Served by</td><td>{{interaction.server}}</td></tr> {% endif %} + {% if interaction.metadata %} + <tr><td>Profile</td><td>{{interaction.metadata.profile}}</td></tr> + {% endif %} {% if interaction.repo_rev_code %} <tr><td>Revision</td><td>{{interaction.repo_rev_code}}</td></tr> {% endif %} @@ -60,58 +63,57 @@ span.history_links a { {% endif %} </table> - {% if interaction.bad_entry_count %} + {% if interaction.metadata.groups.count %} <div class='entry_list'> - <div class='entry_list_head dirty-lineitem' onclick='javascript:toggleMe("bad_table");'> - <h3>Bad Entries — {{ interaction.bad_entry_count }}</h3> - <div class='entry_expand_tab' id='plusminus_bad_table'>[+]</div> + <div class='entry_list_head' onclick='javascript:toggleMe("groups_table");'> + <h3>Group membership</h3> + <div class='entry_expand_tab' id='plusminus_groups_table'>[+]</div> </div> - <table id='bad_table' class='entry_list'> - {% for e in interaction.bad|sortwell %} + <table id='groups_table' class='entry_list' style='display: none'> + {% for group in interaction.metadata.groups.all %} <tr class='{% cycle listview,listview_alt %}'> - <td class='entry_list_type'>{{e.entry.kind}}:</td> - <td><a href="{% url reports_item "bad",e.id %}"> - {{e.entry.name}}</a></td> + <td class='entry_list_type'>{{group}}</td> </tr> {% endfor %} </table> </div> {% endif %} - {% if interaction.modified_entry_count %} + {% if interaction.metadata.bundles.count %} <div class='entry_list'> - <div class='entry_list_head modified-lineitem' onclick='javascript:toggleMe("modified_table");'> - <h3>Modified Entries — {{ interaction.modified_entry_count }}</h3> - <div class='entry_expand_tab' id='plusminus_modified_table'>[+]</div> + <div class='entry_list_head' onclick='javascript:toggleMe("bundles_table");'> + <h3>Bundle membership</h3> + <div class='entry_expand_tab' id='plusminus_bundless_table'>[+]</div> </div> - <table id='modified_table' class='entry_list'> - {% for e in interaction.modified|sortwell %} + <table id='bundles_table' class='entry_list' style='display: none'> + {% for bundle in interaction.metadata.bundles.all %} <tr class='{% cycle listview,listview_alt %}'> - <td class='entry_list_type'>{{e.entry.kind}}:</td> - <td><a href="{% url reports_item "modified",e.id %}"> - {{e.entry.name}}</a></td> + <td class='entry_list_type'>{{bundle}}</td> </tr> {% endfor %} </table> </div> {% endif %} - {% if interaction.extra_entry_count %} + {% for type, ei_list in ei_lists %} + {% if ei_list %} <div class='entry_list'> - <div class='entry_list_head extra-lineitem' onclick='javascript:toggleMe("extra_table");'> - <h3>Extra Entries — {{ interaction.extra_entry_count }}</h3> - <div class='entry_expand_tab' id='plusminus_extra_table'>[+]</div> + <div class='entry_list_head {{type}}-lineitem' onclick='javascript:toggleMe("{{type}}_table");'> + <h3>{{ type|capfirst }} Entries — {{ ei_list|length }}</h3> + <div class='entry_expand_tab' id='plusminus_{{type}}_table'>[+]</div> </div> - <table id='extra_table' class='entry_list'> - {% for e in interaction.extra|sortwell %} + <table id='{{type}}_table' class='entry_list'> + {% for ei in ei_list %} <tr class='{% cycle listview,listview_alt %}'> - <td class='entry_list_type'>{{e.entry.kind}}:</td> - <td><a href="{% url reports_item "extra",e.id %}">{{e.entry.name}}</a></td> + <td class='entry_list_type'>{{ei.entry.kind}}</td> + <td><a href="{% url reports_item type ei.id %}"> + {{ei.entry.name}}</a></td> </tr> - {% endfor %} + {% endfor %} </table> </div> {% endif %} + {% endfor %} {% if entry_list %} <div class="entry_list recent_history_wrapper"> diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html index 84ac71d92..9be59e7d2 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html @@ -6,18 +6,18 @@ {% block content %} <div class='client_list_box'> -{% if entry_list %} {% filter_navigator %} +{% if entry_list %} <table cellpadding="3"> <tr id='table_list_header' class='listview'> - <td class='left_column'>Node</td> - <td class='right_column' style='width:75px'>State</td> - <td class='right_column_narrow'>Good</td> - <td class='right_column_narrow'>Bad</td> - <td class='right_column_narrow'>Modified</td> - <td class='right_column_narrow'>Extra</td> - <td class='right_column'>Last Run</td> - <td class='right_column_wide'>Server</td> + <td class='left_column'>{% sort_link 'client' 'Node' %}</td> + <td class='right_column' style='width:75px'>{% sort_link 'state' 'State' %}</td> + <td class='right_column_narrow'>{% sort_link '-good' 'Good' %}</td> + <td class='right_column_narrow'>{% sort_link '-bad' 'Bad' %}</td> + <td class='right_column_narrow'>{% sort_link '-modified' 'Modified' %}</td> + <td class='right_column_narrow'>{% sort_link '-extra' 'Extra' %}</td> + <td class='right_column'>{% sort_link 'timestamp' 'Last Run' %}</td> + <td class='right_column_wide'>{% sort_link 'server' 'Server' %}</td> </tr> {% for entry in entry_list %} <tr class='{% cycle listview,listview_alt %}'> diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html index 134e237d6..45ba20b86 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html @@ -9,6 +9,7 @@ {% block pagebanner %}Clients - Grid View{% endblock %} {% block content %} +{% filter_navigator %} {% if inter_list %} <table class='grid-view' align='center'> {% for inter in inter_list %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html index 5725ae577..443ec8ccb 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html @@ -38,8 +38,8 @@ </tr> {% endfor %} </table> - </div> {% else %} <p>No client records are available.</p> {% endif %} + </div> {% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html new file mode 100644 index 000000000..d6ad303fc --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html @@ -0,0 +1,42 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Common Problems{% endblock %} + +{% block extra_header_info %} +{% endblock%} + +{% block pagebanner %}Common configuration problems{% endblock %} + +{% block content %} + <div id='threshold_box'> + <form method='post' action='{{ request.path }}'> + <span>Showing items with more then {{ threshold }} entries</span> + <input type='text' name='threshold' value='{{ threshold }}' maxlength='5' size='5' /> + <input type='submit' value='Change' /> + </form> + </div> + {% for type_name, type_list in lists %} + <div class='entry_list'> + <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ type_name }}");'> + <h3>{{ type_name|capfirst }} entries</h3> + <div class='entry_expand_tab' id='plusminus_table_{{ type_name }}'>[–]</div> + </div> + {% if type_list %} + <table id='table_{{ type_name }}' class='entry_list'> + <tr style='text-align: left'><th>Type</th><th>Name</th><th>Count</th><th>Reason</th></tr> + {% for entry, reason, interaction in type_list %} + <tr class='{% cycle listview,listview_alt %}'> + <td>{{ entry.kind }}</td> + <td><a href="{% url reports_entry eid=entry.pk %}">{{ entry.name }}</a></td> + <td>{{ interaction|length }}</td> + <td><a href="{% url reports_item type=type_name pk=interaction.0 %}">{{ reason.short_list|join:"," }}</a></td> + </tr> + {% endfor %} + </table> + {% else %} + <p>There are currently no inconsistent {{ type_name }} configuration entries.</p> + {% endif %} + </div> + {% endfor %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html new file mode 100644 index 000000000..5f7579eb9 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html @@ -0,0 +1,30 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Entry Status{% endblock %} + +{% block extra_header_info %} +{% endblock%} + +{% block pagebanner %}{{ entry.kind }} entry {{ entry.name }} status{% endblock %} + +{% block content %} +{% filter_navigator %} +{% if item_data %} + <div class='entry_list'> + <table class='entry_list'> + <tr style='text-align: left' ><th>Name</th><th>Timestamp</th><th>State</th><th>Reason</th></tr> + {% for ei, inter, reason in item_data %} + <tr class='{% cycle listview,listview_alt %}'> + <td><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=inter.client.name, pk=inter.id %}'>{{ inter.client.name }}</a></td> + <td style='white-space: nowrap'><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=inter.client.name, pk=inter.id %}'>{{ inter.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</a></td> + <td>{{ ei.get_type_display }}</td> + <td style='white-space: nowrap'><a href="{% url reports_item type=ei.get_type_display pk=ei.pk %}">{{ reason.short_list|join:"," }}</a></td> + </tr> + {% endfor %} + </table> + </div> +{% else %} + <p>There are currently no hosts with this configuration entry.</p> +{% endif %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html index 9b1026a08..0a92e7fc0 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html @@ -9,19 +9,21 @@ {% block pagebanner %}{{mod_or_bad|capfirst}} Element Listing{% endblock %} {% block content %} -{% if item_list_dict %} - {% for kind, entries in item_list_dict.items %} - +{% filter_navigator %} +{% if item_list %} + {% for type_name, type_data in item_list %} <div class='entry_list'> - <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ kind }}");'> - <h3>{{ kind }} — {{ entries|length }}</h3> - <div class='entry_expand_tab' id='plusminus_table_{{ kind }}'>[–]</div> + <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ type_name }}");'> + <h3>{{ type_name }} — {{ type_data|length }}</h3> + <div class='entry_expand_tab' id='plusminus_table_{{ type_name }}'>[–]</div> </div> - - <table id='table_{{ kind }}' class='entry_list'> - {% for e in entries %} + <table id='table_{{ type_name }}' class='entry_list'> + <tr style='text-align: left' ><th>Name</th><th>Count</th><th>Reason</th></tr> + {% for entry, reason, eis in type_data %} <tr class='{% cycle listview,listview_alt %}'> - <td><a href="{% url reports_item type=mod_or_bad,pk=e.id %}">{{e.entry.name}}</a></td> + <td><a href="{% url reports_entry eid=entry.pk %}">{{entry.name}}</a></td> + <td>{{ eis|length }}</td> + <td><a href="{% url reports_item type=mod_or_bad,pk=eis.0 %}">{{ reason.short_list|join:"," }}</a></td> </tr> {% endfor %} </table> diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html index 6fbe585ab..759415507 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html @@ -1,13 +1,25 @@ {% spaceless %} +<div class="filter_bar"> +<form name='filter_form'> {% if filters %} {% for filter, filter_url in filters %} {% if forloop.first %} - <div class="filter_bar">Active filters (click to remove): + Active filters (click to remove): {% endif %} <a href='{{ filter_url }}'>{{ filter|capfirst }}</a>{% if not forloop.last %}, {% endif %} {% if forloop.last %} - </div> + {% if groups %}|{% endif %} {% endif %} {% endfor %} {% endif %} +{% if groups %} +<label for="id_group">Group filter:</label> +<select id="id_group" name="group" onchange="javascript:url=document.forms['filter_form'].group.value; if(url) { location.href=url }"> + {% for group, group_url, selected in groups %} + <option label="{{group}}" value="{{group_url}}" {% if selected %}selected {% endif %}/> + {% endfor %} +</select> +{% endif %} +</form> +</div> {% endspaceless %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py index ac63cda3e..894353bba 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py +++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py @@ -1,11 +1,17 @@ import sys +from copy import copy from django import template +from django.conf import settings from django.core.urlresolvers import resolve, reverse, \ Resolver404, NoReverseMatch +from django.template.loader import get_template, \ + get_template_from_string,TemplateDoesNotExist from django.utils.encoding import smart_unicode, smart_str +from django.utils.safestring import mark_safe from datetime import datetime, timedelta from Bcfg2.Server.Reports.utils import filter_list +from Bcfg2.Server.Reports.reports.models import Group register = template.Library() @@ -115,13 +121,27 @@ def filter_navigator(context): filters = [] for filter in filter_list: + if filter == 'group': + continue if filter in kwargs: myargs = kwargs.copy() del myargs[filter] filters.append((filter, reverse(view, args=args, kwargs=myargs))) filters.sort(lambda x, y: cmp(x[0], y[0])) - return {'filters': filters} + + myargs = kwargs.copy() + selected=True + if 'group' in myargs: + del myargs['group'] + selected=False + groups = [('---', reverse(view, args=args, kwargs=myargs), selected)] + for group in Group.objects.values('name'): + myargs['group'] = group['name'] + groups.append((group['name'], reverse(view, args=args, kwargs=myargs), + group['name'] == kwargs.get('group', ''))) + + return {'filters': filters, 'groups': groups} except (Resolver404, NoReverseMatch, ValueError, KeyError): pass return dict() @@ -242,19 +262,6 @@ def add_url_filter(parser, token): return AddUrlFilter(filter_name, filter_value) -@register.filter -def sortwell(value): - """ - Sorts a list(or evaluates queryset to list) of bad, extra, or modified items in the best - way for presentation - """ - - configItems = list(value) - configItems.sort(lambda x, y: cmp(x.entry.name, y.entry.name)) - configItems.sort(lambda x, y: cmp(x.entry.kind, y.entry.kind)) - return configItems - - class MediaTag(template.Node): def __init__(self, filter_value): self.filter_value = filter_value @@ -311,3 +318,98 @@ def determine_client_state(entry): else: thisdirty = "very-dirty-lineitem" return thisdirty + + +@register.tag(name='qs') +def do_qs(parser, token): + """ + qs tag + + accepts a name value pair and inserts or replaces it in the query string + """ + try: + tag, name, value = token.split_contents() + except ValueError: + raise TemplateSyntaxError, "%r tag requires exactly two arguments" \ + % token.contents.split()[0] + return QsNode(name, value) + +class QsNode(template.Node): + def __init__(self, name, value): + self.name = template.Variable(name) + self.value = template.Variable(value) + + def render(self, context): + try: + name = self.name.resolve(context) + value = self.value.resolve(context) + request = context['request'] + qs = copy(request.GET) + qs[name] = value + return "?%s" % qs.urlencode() + except template.VariableDoesNotExist: + return '' + except KeyError: + if settings.TEMPLATE_DEBUG: + raise Exception, "'qs' tag requires context['request']" + return '' + except: + return '' + + +@register.tag +def sort_link(parser, token): + ''' + Create a sort anchor tag. Reverse it if active. + + {% sort_link sort_key text %} + ''' + try: + tag, sort_key, text = token.split_contents() + except ValueError: + raise TemplateSyntaxError("%r tag requires at least four arguments" \ + % token.split_contents()[0]) + + return SortLinkNode(sort_key, text) + +class SortLinkNode(template.Node): + __TMPL__ = "{% load bcfg2_tags %}<a href='{% qs 'sort' key %}'>{{ text }}</a>" + + def __init__(self, sort_key, text): + self.sort_key = template.Variable(sort_key) + self.text = template.Variable(text) + + def render(self, context): + try: + try: + sort = context['request'].GET['sort'] + except KeyError: + #fall back on this + sort = context.get('sort', '') + sort_key = self.sort_key.resolve(context) + text = self.text.resolve(context) + + # add arrows + try: + sort_base = sort_key.lstrip('-') + if sort[0] == '-' and sort[1:] == sort_base: + text = text + '▼' + sort_key = sort_base + elif sort_base == sort: + text = text + '▲' + sort_key = '-' + sort_base + except IndexError: + pass + + context.push() + context['key'] = sort_key + context['text'] = mark_safe(text) + output = get_template_from_string(self.__TMPL__).render(context) + context.pop() + return output + except: + if settings.DEBUG: + raise + raise + return '' + diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py index 36d4cf693..0d4c6501d 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py +++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py @@ -4,6 +4,8 @@ from django.utils.encoding import smart_unicode from django.utils.html import conditional_escape from django.utils.safestring import mark_safe +from Bcfg2.Bcfg2Py3k import u_str + register = template.Library() try: @@ -16,14 +18,6 @@ except: colorize = False -# py3k compatibility -def u_str(string): - if sys.hexversion >= 0x03000000: - return string - else: - return unicode(string) - - @register.filter def syntaxhilight(value, arg="diff", autoescape=None): """ diff --git a/src/lib/Bcfg2/Server/Reports/reports/urls.py b/src/lib/Bcfg2/Server/Reports/reports/urls.py index 434ce07b7..1cfe725c2 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/urls.py +++ b/src/lib/Bcfg2/Server/Reports/reports/urls.py @@ -17,20 +17,23 @@ urlpatterns = patterns('Bcfg2.Server.Reports.reports', url(r'^client/(?P<hostname>[^/]+)/(?P<pk>\d+)/?$', 'views.client_detail', name='reports_client_detail_pk'), url(r'^client/(?P<hostname>[^/]+)/?$', 'views.client_detail', name='reports_client_detail'), url(r'^elements/(?P<type>\w+)/(?P<pk>\d+)/?$', 'views.config_item', name='reports_item'), + url(r'^entry/(?P<eid>\w+)/?$', 'views.entry_status', name='reports_entry'), ) urlpatterns += patterns('Bcfg2.Server.Reports.reports', *timeviewUrls( - (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'), (r'^summary/?$', 'views.display_summary', None, 'reports_summary'), (r'^timing/?$', 'views.display_timing', None, 'reports_timing'), - (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'), + (r'^common/(?P<threshold>\d+)/?$', 'views.common_problems', None, 'reports_common_problems'), + (r'^common/?$', 'views.common_problems', None, 'reports_common_problems'), )) urlpatterns += patterns('Bcfg2.Server.Reports.reports', *filteredUrls(*timeviewUrls( + (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'), (r'^detailed/?$', - 'views.client_detailed_list', None, 'reports_detailed_list') + 'views.client_detailed_list', None, 'reports_detailed_list'), + (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'), ))) urlpatterns += patterns('Bcfg2.Server.Reports.reports', diff --git a/src/lib/Bcfg2/Server/Reports/reports/views.py b/src/lib/Bcfg2/Server/Reports/reports/views.py index ccd71a60e..e4c38363f 100644 --- a/src/lib/Bcfg2/Server/Reports/reports/views.py +++ b/src/lib/Bcfg2/Server/Reports/reports/views.py @@ -13,16 +13,41 @@ from django.http import \ from django.shortcuts import render_to_response, get_object_or_404 from django.core.urlresolvers import \ resolve, reverse, Resolver404, NoReverseMatch -from django.db import connection +from django.db import connection, DatabaseError +from django.db.models import Q from Bcfg2.Server.Reports.reports.models import * +__SORT_FIELDS__ = ( 'client', 'state', 'good', 'bad', 'modified', 'extra', \ + 'timestamp', 'server' ) + class PaginationError(Exception): """This error is raised when pagination cannot be completed.""" pass +def _in_bulk(model, ids): + """ + Short cut to fetch in bulk and trap database errors. sqlite will raise + a "too many SQL variables" exception if this list is too long. Try using + django and fetch manually if an error occurs + + returns a dict of this form { id: <model instance> } + """ + + try: + return model.objects.in_bulk(ids) + except DatabaseError: + pass + + # if objects.in_bulk fails so will obejcts.filter(pk__in=ids) + bulk_dict = {} + [bulk_dict.__setitem__(i.id, i) \ + for i in model.objects.all() if i.id in ids] + return bulk_dict + + def server_error(request): """ 500 error handler. @@ -44,7 +69,7 @@ def timeview(fn): """ def _handle_timeview(request, **kwargs): """Send any posts back.""" - if request.method == 'POST': + if request.method == 'POST' and request.POST.get('op', '') == 'timeview': cal_date = request.POST['cal_date'] try: fmt = "%Y/%m/%d" @@ -84,6 +109,30 @@ def timeview(fn): return _handle_timeview +def _handle_filters(query, **kwargs): + """ + Applies standard filters to a query object + + Returns an updated query object + + query - query object to filter + + server -- Filter interactions by server + state -- Filter interactions by state + group -- Filter interactions by group + + """ + if 'state' in kwargs and kwargs['state']: + query = query.filter(state__exact=kwargs['state']) + if 'server' in kwargs and kwargs['server']: + query = query.filter(server__exact=kwargs['server']) + + if 'group' in kwargs and kwargs['group']: + group = get_object_or_404(Group, name=kwargs['group']) + query = query.filter(metadata__groups__id=group.pk) + return query + + def config_item(request, pk, type="bad"): """ Display a single entry. @@ -121,47 +170,138 @@ def config_item(request, pk, type="bad"): @timeview -def config_item_list(request, type, timestamp=None): +def config_item_list(request, type, timestamp=None, **kwargs): """Render a listing of affected elements""" mod_or_bad = type.lower() type = convert_entry_type_to_id(type) if type < 0: raise Http404 - current_clients = Interaction.objects.get_interaction_per_client_ids(timestamp) - item_list_dict = {} - seen = dict() - for x in Entries_interactions.objects.filter(interaction__in=current_clients, - type=type).select_related(): - if (x.entry, x.reason) in seen: - continue - seen[(x.entry, x.reason)] = 1 - if item_list_dict.get(x.entry.kind, None): - item_list_dict[x.entry.kind].append(x) - else: - item_list_dict[x.entry.kind] = [x] + current_clients = Interaction.objects.interaction_per_client(timestamp) + current_clients = [q['id'] for q in _handle_filters(current_clients, **kwargs).values('id')] + + ldata = list(Entries_interactions.objects.filter( + interaction__in=current_clients, type=type).values()) + entry_ids = set([x['entry_id'] for x in ldata]) + reason_ids = set([x['reason_id'] for x in ldata]) - for kind in item_list_dict: - item_list_dict[kind].sort(lambda a, b: cmp(a.entry.name, b.entry.name)) + entries = _in_bulk(Entries, entry_ids) + reasons = _in_bulk(Reason, reason_ids) + + kind_list = {} + [kind_list.__setitem__(kind, {}) for kind in set([e.kind for e in entries.values()])] + for x in ldata: + kind = entries[x['entry_id']].kind + data_key = (x['entry_id'], x['reason_id']) + try: + kind_list[kind][data_key].append(x['id']) + except KeyError: + kind_list[kind][data_key] = [x['id']] + + lists = [] + for kind in kind_list.keys(): + lists.append((kind, [(entries[e[0][0]], reasons[e[0][1]], e[1]) + for e in sorted(kind_list[kind].iteritems(), key=lambda x: entries[x[0][0]].name)])) return render_to_response('config_items/listing.html', - {'item_list_dict': item_list_dict, + {'item_list': lists, 'mod_or_bad': mod_or_bad, 'timestamp': timestamp}, context_instance=RequestContext(request)) @timeview -def client_index(request, timestamp=None): +def entry_status(request, eid, timestamp=None, **kwargs): + """Render a listing of affected elements""" + entry = get_object_or_404(Entries, pk=eid) + + current_clients = Interaction.objects.interaction_per_client(timestamp) + inters = {} + [inters.__setitem__(i.id, i) \ + for i in _handle_filters(current_clients, **kwargs).select_related('client')] + + eis = Entries_interactions.objects.filter( + interaction__in=inters.keys(), entry=entry) + + reasons = _in_bulk(Reason, set([x.reason_id for x in eis])) + + item_data = [] + for ei in eis: + item_data.append((ei, inters[ei.interaction_id], reasons[ei.reason_id])) + + return render_to_response('config_items/entry_status.html', + {'entry': entry, + 'item_data': item_data, + 'timestamp': timestamp}, + context_instance=RequestContext(request)) + + +@timeview +def common_problems(request, timestamp=None, threshold=None): + """Mine config entries""" + + if request.method == 'POST': + try: + threshold = int(request.POST['threshold']) + view, args, kw = resolve(request.META['PATH_INFO']) + kw['threshold'] = threshold + return HttpResponseRedirect(reverse(view, + args=args, + kwargs=kw)) + except: + pass + + try: + threshold = int(threshold) + except: + threshold = 10 + + c_intr = Interaction.objects.get_interaction_per_client_ids(timestamp) + data_list = {} + [data_list.__setitem__(t_id, {}) \ + for t_id, t_label in TYPE_CHOICES if t_id != TYPE_GOOD] + ldata = list(Entries_interactions.objects.filter( + interaction__in=c_intr).exclude(type=TYPE_GOOD).values()) + + entry_ids = set([x['entry_id'] for x in ldata]) + reason_ids = set([x['reason_id'] for x in ldata]) + for x in ldata: + type = x['type'] + data_key = (x['entry_id'], x['reason_id']) + try: + data_list[type][data_key].append(x['id']) + except KeyError: + data_list[type][data_key] = [x['id']] + + entries = _in_bulk(Entries, entry_ids) + reasons = _in_bulk(Reason, reason_ids) + + lists = [] + for type, type_name in TYPE_CHOICES: + if type == TYPE_GOOD: + continue + lists.append([type_name.lower(), [(entries[e[0][0]], reasons[e[0][1]], e[1]) + for e in sorted(data_list[type].items(), key=lambda x: len(x[1]), reverse=True) + if len(e[1]) > threshold]]) + + return render_to_response('config_items/common.html', + {'lists': lists, + 'timestamp': timestamp, + 'threshold': threshold}, + context_instance=RequestContext(request)) + + +@timeview +def client_index(request, timestamp=None, **kwargs): """ Render a grid view of active clients. Keyword parameters: - timestamp -- datetime objectto render from + timestamp -- datetime object to render from """ - list = Interaction.objects.interaction_per_client(timestamp).select_related()\ - .order_by("client__name").all() + list = _handle_filters(Interaction.objects.interaction_per_client(timestamp), **kwargs).\ + select_related().order_by("client__name").all() return render_to_response('clients/index.html', {'inter_list': list, @@ -177,8 +317,29 @@ def client_detailed_list(request, timestamp=None, **kwargs): """ + try: + sort = request.GET['sort'] + if sort[0] == '-': + sort_key = sort[1:] + else: + sort_key = sort + if not sort_key in __SORT_FIELDS__: + raise ValueError + + if sort_key == "client": + kwargs['orderby'] = "%s__name" % sort + elif sort_key == "good": + kwargs['orderby'] = "%scount" % sort + elif sort_key in ["bad", "modified", "extra"]: + kwargs['orderby'] = "%s_entries" % sort + else: + kwargs['orderby'] = sort + kwargs['sort'] = sort + except (ValueError, KeyError): + kwargs['orderby'] = "client__name" + kwargs['sort'] = "client" + kwargs['interaction_base'] = Interaction.objects.interaction_per_client(timestamp).select_related() - kwargs['orderby'] = "client__name" kwargs['page_limit'] = 0 return render_history_view(request, 'clients/detailed-list.html', **kwargs) @@ -187,13 +348,25 @@ def client_detail(request, hostname=None, pk=None): context = dict() client = get_object_or_404(Client, name=hostname) if(pk == None): - context['interaction'] = client.current_interaction - return render_history_view(request, 'clients/detail.html', page_limit=5, - client=client, context=context) + inter = client.current_interaction + maxdate = None else: - context['interaction'] = client.interactions.get(pk=pk) - return render_history_view(request, 'clients/detail.html', page_limit=5, - client=client, maxdate=context['interaction'].timestamp, context=context) + inter = client.interactions.get(pk=pk) + maxdate = inter.timestamp + + ei = Entries_interactions.objects.filter(interaction=inter).select_related('entry').order_by('entry__kind', 'entry__name') + #ei = Entries_interactions.objects.filter(interaction=inter).select_related('entry') + #ei = sorted(Entries_interactions.objects.filter(interaction=inter).select_related('entry'), + # key=lambda x: (x.entry.kind, x.entry.name)) + context['ei_lists'] = ( + ('bad', [x for x in ei if x.type == TYPE_BAD]), + ('modified', [x for x in ei if x.type == TYPE_MODIFIED]), + ('extra', [x for x in ei if x.type == TYPE_EXTRA]) + ) + + context['interaction']=inter + return render_history_view(request, 'clients/detail.html', page_limit=5, + client=client, maxdate=maxdate, context=context) def client_manage(request): @@ -230,9 +403,9 @@ def display_summary(request, timestamp=None): """ Display a summary of the bcfg2 world """ - query = Interaction.objects.interaction_per_client(timestamp).select_related() - node_count = query.count() - recent_data = query.all() + recent_data = Interaction.objects.interaction_per_client(timestamp) \ + .select_related().all() + node_count = len(recent_data) if not timestamp: timestamp = datetime.now() @@ -240,18 +413,11 @@ def display_summary(request, timestamp=None): bad=[], modified=[], extra=[], - stale=[], - pings=[]) + stale=[]) for node in recent_data: if timestamp - node.timestamp > timedelta(hours=24): collected_data['stale'].append(node) # If stale check for uptime - try: - if node.client.pings.latest().status == 'N': - collected_data['pings'].append(node) - except Ping.DoesNotExist: - collected_data['pings'].append(node) - continue if node.bad_entry_count() > 0: collected_data['bad'].append(node) else: @@ -281,9 +447,6 @@ def display_summary(request, timestamp=None): if len(collected_data['stale']) > 0: summary_data.append(get_dict('stale', 'nodes did not run within the last 24 hours.')) - if len(collected_data['pings']) > 0: - summary_data.append(get_dict('pings', - 'are down.')) return render_to_response('displays/summary.html', {'summary_data': summary_data, 'node_count': node_count, @@ -299,7 +462,11 @@ def display_timing(request, timestamp=None): for inter in inters] for metric in Performance.objects.filter(interaction__in=list(mdict.keys())).all(): for i in metric.interaction.all(): - mdict[i][metric.metric] = metric.value + try: + mdict[i][metric.metric] = metric.value + except KeyError: + #In the unlikely event two interactions share a metric, ignore it + pass return render_to_response('displays/timing.html', {'metrics': list(mdict.values()), 'timestamp': timestamp}, @@ -324,6 +491,7 @@ def render_history_view(request, template='clients/history.html', **kwargs): not found server -- Filter interactions by server state -- Filter interactions by state + group -- Filter interactions by group entry_max -- Most recent interaction to display orderby -- Sort results using this field @@ -345,15 +513,15 @@ def render_history_view(request, template='clients/history.html', **kwargs): # Either filter by client or limit by clients iquery = kwargs.get('interaction_base', Interaction.objects) if client: - iquery = iquery.filter(client__exact=client).select_related() + iquery = iquery.filter(client__exact=client) + iquery = iquery.select_related() if 'orderby' in kwargs and kwargs['orderby']: iquery = iquery.order_by(kwargs['orderby']) + if 'sort' in kwargs: + context['sort'] = kwargs['sort'] - if 'state' in kwargs and kwargs['state']: - iquery = iquery.filter(state__exact=kwargs['state']) - if 'server' in kwargs and kwargs['server']: - iquery = iquery.filter(server__exact=kwargs['server']) + iquery = _handle_filters(iquery, **kwargs) if entry_max: iquery = iquery.filter(timestamp__lte=entry_max) diff --git a/src/lib/Bcfg2/Server/Reports/settings.py b/src/lib/Bcfg2/Server/Reports/settings.py deleted file mode 100644 index 4d567f1a2..000000000 --- a/src/lib/Bcfg2/Server/Reports/settings.py +++ /dev/null @@ -1,161 +0,0 @@ -import django -import sys - -# Compatibility import -from Bcfg2.Bcfg2Py3k import ConfigParser -# Django settings for bcfg2 reports project. -c = ConfigParser.ConfigParser() -if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0: - raise ImportError("Please check that bcfg2.conf or bcfg2-web.conf exists " - "and is readable by your web server.") - -try: - DEBUG = c.getboolean('statistics', 'web_debug') -except: - DEBUG = False - -if DEBUG: - print("Warning: Setting web_debug to True causes extraordinary memory " - "leaks. Only use this setting if you know what you're doing.") - -TEMPLATE_DEBUG = DEBUG - -ADMINS = ( - ('Root', 'root'), -) - -MANAGERS = ADMINS -try: - db_engine = c.get('statistics', 'database_engine') -except ConfigParser.NoSectionError: - e = sys.exc_info()[1] - raise ImportError("Failed to determine database engine: %s" % e) -db_name = '' -if c.has_option('statistics', 'database_name'): - db_name = c.get('statistics', 'database_name') -if db_engine == 'sqlite3' and db_name == '': - db_name = "%s/etc/brpt.sqlite" % c.get('server', 'repository') - -DATABASES = { - 'default': { - 'ENGINE': "django.db.backends.%s" % db_engine, - 'NAME': db_name - } -} - -if db_engine != 'sqlite3': - DATABASES['default']['USER'] = c.get('statistics', 'database_user') - DATABASES['default']['PASSWORD'] = c.get('statistics', 'database_password') - DATABASES['default']['HOST'] = c.get('statistics', 'database_host') - try: - DATABASES['default']['PORT'] = c.get('statistics', 'database_port') - except: # An empty string tells Django to use the default port. - DATABASES['default']['PORT'] = '' - -if django.VERSION[0] == 1 and django.VERSION[1] < 2: - DATABASE_ENGINE = db_engine - DATABASE_NAME = DATABASES['default']['NAME'] - if DATABASE_ENGINE != 'sqlite3': - DATABASE_USER = DATABASES['default']['USER'] - DATABASE_PASSWORD = DATABASES['default']['PASSWORD'] - DATABASE_HOST = DATABASES['default']['HOST'] - DATABASE_PORT = DATABASES['default']['PORT'] - - -# Local time zone for this installation. All choices can be found here: -# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone -if django.VERSION[0] == 1 and django.VERSION[1] > 2: - try: - TIME_ZONE = c.get('statistics', 'time_zone') - except: - TIME_ZONE = None - -# Language code for this installation. All choices can be found here: -# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes -# http://blogs.law.harvard.edu/tech/stories/storyReader$15 -LANGUAGE_CODE = 'en-us' - -SITE_ID = 1 - -# Absolute path to the directory that holds media. -# Example: "/home/media/media.lawrence.com/" -MEDIA_ROOT = '' - -# URL that handles the media served from MEDIA_ROOT. -# Example: "http://media.lawrence.com" -MEDIA_URL = '/site_media' -if c.has_option('statistics', 'web_prefix'): - MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL - -# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a -# trailing slash. -# Examples: "http://foo.com/media/", "/media/". -ADMIN_MEDIA_PREFIX = '/media/' - -# Make this unique, and don't share it with anybody. -SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7' - -# List of callables that know how to import templates from various sources. -TEMPLATE_LOADERS = ( - 'django.template.loaders.filesystem.load_template_source', - 'django.template.loaders.app_directories.load_template_source', -) - -MIDDLEWARE_CLASSES = ( - 'django.middleware.common.CommonMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.middleware.doc.XViewMiddleware', -) - -ROOT_URLCONF = 'Bcfg2.Server.Reports.urls' - -# Authentication Settings -# Use NIS authentication backend defined in backends.py -AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', - 'Bcfg2.Server.Reports.backends.NISBackend') -# The NIS group authorized to login to BCFG2's reportinvg system -AUTHORIZED_GROUP = '' -#create login url area: -try: - import django.contrib.auth -except ImportError: - raise ImportError('Import of Django module failed. Is Django installed?') -django.contrib.auth.LOGIN_URL = '/login' - -SESSION_EXPIRE_AT_BROWSER_CLOSE = True - - - -TEMPLATE_DIRS = ( - # Put strings here, like "/home/html/django_templates". - # Always use forward slashes, even on Windows. - '/usr/share/python-support/python-django/django/contrib/admin/templates/', - 'Bcfg2.Server.Reports.reports' -) - -if django.VERSION[0] == 1 and django.VERSION[1] < 2: - TEMPLATE_CONTEXT_PROCESSORS = ( - 'django.core.context_processors.auth', - 'django.core.context_processors.debug', - 'django.core.context_processors.i18n', - 'django.core.context_processors.media', - 'django.core.context_processors.request' - ) -else: - TEMPLATE_CONTEXT_PROCESSORS = ( - 'django.contrib.auth.context_processors.auth', - 'django.core.context_processors.debug', - 'django.core.context_processors.i18n', - 'django.core.context_processors.media', - 'django.core.context_processors.request' - ) - -INSTALLED_APPS = ( - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.sites', - 'django.contrib.admin', - 'Bcfg2.Server.Reports.reports' -) diff --git a/src/lib/Bcfg2/Server/Reports/updatefix.py b/src/lib/Bcfg2/Server/Reports/updatefix.py deleted file mode 100644 index 192b94b61..000000000 --- a/src/lib/Bcfg2/Server/Reports/updatefix.py +++ /dev/null @@ -1,281 +0,0 @@ -import Bcfg2.Server.Reports.settings - -from django.db import connection, DatabaseError -import django.core.management -import logging -import sys -import traceback -from Bcfg2.Server.Reports.reports.models import InternalDatabaseVersion, \ - TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA -logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix') - - -# all update function should go here -def _merge_database_table_entries(): - cursor = connection.cursor() - insert_cursor = connection.cursor() - find_cursor = connection.cursor() - cursor.execute(""" - Select name, kind from reports_bad - union - select name, kind from reports_modified - union - select name, kind from reports_extra - """) - # this fetch could be better done - entries_map = {} - for row in cursor.fetchall(): - insert_cursor.execute("insert into reports_entries (name, kind) \ - values (%s, %s)", (row[0], row[1])) - entries_map[(row[0], row[1])] = insert_cursor.lastrowid - - cursor.execute(""" - Select name, kind, reason_id, interaction_id, 1 from reports_bad - inner join reports_bad_interactions on reports_bad.id=reports_bad_interactions.bad_id - union - Select name, kind, reason_id, interaction_id, 2 from reports_modified - inner join reports_modified_interactions on reports_modified.id=reports_modified_interactions.modified_id - union - Select name, kind, reason_id, interaction_id, 3 from reports_extra - inner join reports_extra_interactions on reports_extra.id=reports_extra_interactions.extra_id - """) - for row in cursor.fetchall(): - key = (row[0], row[1]) - if entries_map.get(key, None): - entry_id = entries_map[key] - else: - find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key) - rowe = find_cursor.fetchone() - entry_id = rowe[0] - insert_cursor.execute("insert into reports_entries_interactions \ - (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4])) - - -def _interactions_constraint_or_idx(): - """sqlite doesn't support alter tables.. or constraints""" - cursor = connection.cursor() - try: - cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)') - except: - cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)') - - -def _remove_table_column(tbl, col): - """sqlite doesn't support deleting a column via alter table""" - cursor = connection.cursor() - db_engine = Bcfg2.Server.Reports.settings.DATABASES['default']['ENGINE'] - if db_engine == 'django.db.backends.mysql': - db_name = Bcfg2.Server.Reports.settings.DATABASES['default']['NAME'] - column_exists = cursor.execute('select * from information_schema.columns ' - 'where table_schema="%s" and ' - 'table_name="%s" ' - 'and column_name="%s";' % (db_name, tbl, col)) - if not column_exists: - # column doesn't exist - return - # if column exists from previous database, remove it - cursor.execute('alter table %s ' - 'drop column %s;' % (tbl, col)) - elif db_engine == 'django.db.backends.sqlite3': - # check if table exists - try: - cursor.execute('select * from sqlite_master where name=%s and type="table";' % tbl) - except DatabaseError: - # table doesn't exist - return - - # sqlite wants us to create a new table containing the columns we want - # and copy into it http://www.sqlite.org/faq.html#q11 - tmptbl_name = "t_backup" - _tmptbl_create = \ -"""create temporary table "%s" ( - "id" integer NOT NULL PRIMARY KEY, - "client_id" integer NOT NULL REFERENCES "reports_client" ("id"), - "timestamp" datetime NOT NULL, - "state" varchar(32) NOT NULL, - "repo_rev_code" varchar(64) NOT NULL, - "goodcount" integer NOT NULL, - "totalcount" integer NOT NULL, - "server" varchar(256) NOT NULL, - "bad_entries" integer NOT NULL, - "modified_entries" integer NOT NULL, - "extra_entries" integer NOT NULL, - UNIQUE ("client_id", "timestamp") -);""" % tmptbl_name - _newtbl_create = \ -"""create table "%s" ( - "id" integer NOT NULL PRIMARY KEY, - "client_id" integer NOT NULL REFERENCES "reports_client" ("id"), - "timestamp" datetime NOT NULL, - "state" varchar(32) NOT NULL, - "repo_rev_code" varchar(64) NOT NULL, - "goodcount" integer NOT NULL, - "totalcount" integer NOT NULL, - "server" varchar(256) NOT NULL, - "bad_entries" integer NOT NULL, - "modified_entries" integer NOT NULL, - "extra_entries" integer NOT NULL, - UNIQUE ("client_id", "timestamp") -);""" % tbl - new_cols = "id,\ - client_id,\ - timestamp,\ - state,\ - repo_rev_code,\ - goodcount,\ - totalcount,\ - server,\ - bad_entries,\ - modified_entries,\ - extra_entries" - - delete_col = [_tmptbl_create, - "insert into %s select %s from %s;" % (tmptbl_name, new_cols, tbl), - "drop table %s" % tbl, - _newtbl_create, - "create index reports_interaction_client_id on %s (client_id);" % tbl, - "insert into %s select %s from %s;" % (tbl, new_cols, - tmptbl_name), - "drop table %s;" % tmptbl_name] - - for sql in delete_col: - cursor.execute(sql) - - -def _populate_interaction_entry_counts(): - '''Populate up the type totals for the interaction table''' - cursor = connection.cursor() - count_field = {TYPE_BAD: 'bad_entries', - TYPE_MODIFIED: 'modified_entries', - TYPE_EXTRA: 'extra_entries'} - - for type in list(count_field.keys()): - cursor.execute("select count(type), interaction_id " + - "from reports_entries_interactions where type = %s group by interaction_id" % type) - updates = [] - for row in cursor.fetchall(): - updates.append(row) - try: - cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates) - except Exception: - e = sys.exc_info()[1] - print(e) - cursor.close() - - -# be sure to test your upgrade query before reflecting the change in the models -# the list of function and sql command to do should go here -_fixes = [_merge_database_table_entries, - # this will remove unused tables - "drop table reports_bad;", - "drop table reports_bad_interactions;", - "drop table reports_extra;", - "drop table reports_extra_interactions;", - "drop table reports_modified;", - "drop table reports_modified_interactions;", - "drop table reports_repository;", - "drop table reports_metadata;", - "alter table reports_interaction add server varchar(256) not null default 'N/A';", - # fix revision data type to support $VCS hashes - "alter table reports_interaction add repo_rev_code varchar(64) default '';", - # Performance enhancements for large sites - 'alter table reports_interaction add column bad_entries integer not null default -1;', - 'alter table reports_interaction add column modified_entries integer not null default -1;', - 'alter table reports_interaction add column extra_entries integer not null default -1;', - _populate_interaction_entry_counts, - _interactions_constraint_or_idx, - 'alter table reports_reason add is_binary bool NOT NULL default False;', - 'alter table reports_reason add is_sensitive bool NOT NULL default False;', - _remove_table_column('reports_interaction', 'client_version'), - "alter table reports_reason add unpruned varchar(1280) not null default '';", -] - -# this will calculate the last possible version of the database -lastversion = len(_fixes) - - -def rollupdate(current_version): - """function responsible to coordinates all the updates - need current_version as integer - """ - ret = None - if current_version < lastversion: - for i in range(current_version, lastversion): - try: - if type(_fixes[i]) == str: - connection.cursor().execute(_fixes[i]) - else: - _fixes[i]() - except: - logger.error("Failed to perform db update %s" % (_fixes[i]), - exc_info=1) - # since the array starts at 0 but version - # starts at 1 we add 1 to the normal count - ret = InternalDatabaseVersion.objects.create(version=i + 1) - return ret - else: - return None - - -def dosync(): - """Function to do the syncronisation for the models""" - # try to detect if it's a fresh new database - try: - cursor = connection.cursor() - # If this table goes missing, - # don't forget to change it to the new one - cursor.execute("Select * from reports_client") - # if we get here with no error then the database has existing tables - fresh = False - except: - logger.debug("there was an error while detecting " - "the freshness of the database") - #we should get here if the database is new - fresh = True - - # ensure database connections are closed - # so that the management can do its job right - try: - cursor.close() - connection.close() - except: - # ignore any errors from missing/invalid dbs - pass - # Do the syncdb according to the django version - if "call_command" in dir(django.core.management): - # this is available since django 1.0 alpha. - # not yet tested for full functionnality - django.core.management.call_command("syncdb", interactive=False, verbosity=0) - if fresh: - django.core.management.call_command("loaddata", 'initial_version.xml', verbosity=0) - elif "syncdb" in dir(django.core.management): - # this exist only for django 0.96.* - django.core.management.syncdb(interactive=False, verbosity=0) - if fresh: - logger.debug("loading the initial_version fixtures") - django.core.management.load_data(fixture_labels=['initial_version'], verbosity=0) - else: - logger.warning("Don't forget to run syncdb") - - -def update_database(): - """method to search where we are in the revision - of the database models and update them""" - try: - logger.debug("Running upgrade of models to the new one") - dosync() - know_version = InternalDatabaseVersion.objects.order_by('-version') - if not know_version: - logger.debug("No version, creating initial version") - know_version = InternalDatabaseVersion.objects.create(version=0) - else: - know_version = know_version[0] - logger.debug("Presently at %s" % know_version) - if know_version.version < lastversion: - new_version = rollupdate(know_version.version) - if new_version: - logger.debug("upgraded to %s" % new_version) - except: - logger.error("Error while updating the database") - for x in traceback.format_exc().splitlines(): - logger.error(x) diff --git a/src/lib/Bcfg2/Server/Reports/utils.py b/src/lib/Bcfg2/Server/Reports/utils.py index e0b6ead59..c47763e39 100755 --- a/src/lib/Bcfg2/Server/Reports/utils.py +++ b/src/lib/Bcfg2/Server/Reports/utils.py @@ -3,7 +3,7 @@ from django.conf.urls.defaults import * import re """List of filters provided by filteredUrls""" -filter_list = ('server', 'state') +filter_list = ('server', 'state', 'group') class BatchFetch(object): @@ -97,6 +97,8 @@ def filteredUrls(pattern, view, kwargs=None, name=None): tail = mtail.group(1) pattern = pattern[:len(pattern) - len(tail)] for filter in ('/state/(?P<state>\w+)', + '/group/(?P<group>[\w\-\.]+)', + '/group/(?P<group>[\w\-\.]+)/(?P<state>[A-Za-z]+)', '/server/(?P<server>[\w\-\.]+)', '/server/(?P<server>[\w\-\.]+)/(?P<state>[A-Za-z]+)'): results += [(pattern + filter + tail, view, kwargs)] diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py new file mode 100644 index 000000000..ff4c24328 --- /dev/null +++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py @@ -0,0 +1,11 @@ +""" +1_0_x.py + +This file should contain updates relevant to the 1.0.x branches ONLY. +The updates() method must be defined and it should return an Updater object +""" +from Bcfg2.Server.SchemaUpdater import UnsupportedUpdate + +def updates(): + return UnsupportedUpdate("1.0", 10) + diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py new file mode 100644 index 000000000..0d28786fd --- /dev/null +++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py @@ -0,0 +1,59 @@ +""" +1_1_x.py + +This file should contain updates relevant to the 1.1.x branches ONLY. +The updates() method must be defined and it should return an Updater object +""" +from Bcfg2.Server.SchemaUpdater import Updater +from Bcfg2.Server.SchemaUpdater.Routines import updatercallable + +from django.db import connection +import sys +import Bcfg2.settings +from Bcfg2.Server.Reports.reports.models import \ + TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA + +@updatercallable +def _interactions_constraint_or_idx(): + """sqlite doesn't support alter tables.. or constraints""" + cursor = connection.cursor() + try: + cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)') + except: + cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)') + + +@updatercallable +def _populate_interaction_entry_counts(): + '''Populate up the type totals for the interaction table''' + cursor = connection.cursor() + count_field = {TYPE_BAD: 'bad_entries', + TYPE_MODIFIED: 'modified_entries', + TYPE_EXTRA: 'extra_entries'} + + for type in list(count_field.keys()): + cursor.execute("select count(type), interaction_id " + + "from reports_entries_interactions where type = %s group by interaction_id" % type) + updates = [] + for row in cursor.fetchall(): + updates.append(row) + try: + cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates) + except Exception: + e = sys.exc_info()[1] + print(e) + cursor.close() + + +def updates(): + fixes = Updater("1.1") + fixes.override_base_version(12) # Do not do this in new code + + fixes.add('alter table reports_interaction add column bad_entries integer not null default -1;') + fixes.add('alter table reports_interaction add column modified_entries integer not null default -1;') + fixes.add('alter table reports_interaction add column extra_entries integer not null default -1;') + fixes.add(_populate_interaction_entry_counts()) + fixes.add(_interactions_constraint_or_idx()) + fixes.add('alter table reports_reason add is_binary bool NOT NULL default False;') + return fixes + diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py new file mode 100644 index 000000000..024965bd5 --- /dev/null +++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py @@ -0,0 +1,15 @@ +""" +1_2_x.py + +This file should contain updates relevant to the 1.2.x branches ONLY. +The updates() method must be defined and it should return an Updater object +""" +from Bcfg2.Server.SchemaUpdater import Updater +from Bcfg2.Server.SchemaUpdater.Routines import updatercallable + +def updates(): + fixes = Updater("1.2") + fixes.override_base_version(18) # Do not do this in new code + fixes.add('alter table reports_reason add is_sensitive bool NOT NULL default False;') + return fixes + diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py new file mode 100644 index 000000000..4fc57c653 --- /dev/null +++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py @@ -0,0 +1,27 @@ +""" +1_3_0.py + +This file should contain updates relevant to the 1.3.x branches ONLY. +The updates() method must be defined and it should return an Updater object +""" +from Bcfg2.Server.SchemaUpdater import Updater, UpdaterError +from Bcfg2.Server.SchemaUpdater.Routines import AddColumns, \ + RemoveColumns, RebuildTable, DropTable + +from Bcfg2.Server.Reports.reports.models import Reason, Interaction + + +def updates(): + fixes = Updater("1.3") + fixes.add(RemoveColumns(Interaction, 'client_version')) + fixes.add(AddColumns(Reason)) + fixes.add(RebuildTable(Reason, [ + 'owner', 'current_owner', + 'group', 'current_group', + 'perms', 'current_perms', + 'status', 'current_status', + 'to', 'current_to'])) + fixes.add(DropTable('reports_ping')) + + return fixes + diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py b/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py new file mode 100644 index 000000000..4fcf0e6bf --- /dev/null +++ b/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py @@ -0,0 +1,279 @@ +import logging +import traceback +from django.db.models.fields import NOT_PROVIDED +from django.db import connection, DatabaseError, backend, models +from django.core.management.color import no_style +from django.core.management.sql import sql_create +import django.core.management + +import Bcfg2.settings + +logger = logging.getLogger(__name__) + +def _quote(value): + """ + Quote a string to use as a table name or column + """ + return backend.DatabaseOperations().quote_name(value) + + +def _rebuild_sqlite_table(model): + """Sqlite doesn't support most alter table statments. This streamlines the + rebuild process""" + try: + cursor = connection.cursor() + table_name = model._meta.db_table + + # Build create staement from django + model._meta.db_table = "%s_temp" % table_name + sql, references = connection.creation.sql_create_model(model, no_style()) + columns = ",".join([_quote(f.column) \ + for f in model._meta.fields]) + + # Create a temp table + [cursor.execute(s) for s in sql] + + # Fill the table + tbl_name = _quote(table_name) + tmp_tbl_name = _quote(model._meta.db_table) + # Reset this + model._meta.db_table = table_name + cursor.execute("insert into %s(%s) select %s from %s;" % ( + tmp_tbl_name, + columns, + columns, + tbl_name)) + cursor.execute("drop table %s" % tbl_name) + + # Call syncdb to create the table again + django.core.management.call_command("syncdb", interactive=False, verbosity=0) + # syncdb closes our cursor + cursor = connection.cursor() + # Repopulate + cursor.execute('insert into %s(%s) select %s from %s;' % (tbl_name, + columns, + columns, + tmp_tbl_name)) + cursor.execute('DROP TABLE %s;' % tmp_tbl_name) + except DatabaseError: + logger.error("Failed to rebuild sqlite table %s" % table_name, exc_info=1) + raise UpdaterRoutineException + + +class UpdaterRoutineException(Exception): + pass + + +class UpdaterRoutine(object): + """Base for routines.""" + def __init__(self): + pass + + def __str__(self): + return __name__ + + def run(self): + """Called to execute the action""" + raise UpdaterRoutineException + + + +class AddColumns(UpdaterRoutine): + """ + Routine to add new columns to an existing model + """ + def __init__(self, model): + self.model = model + self.model_name = model.__name__ + + def __str__(self): + return "Add new columns for model %s" % self.model_name + + def run(self): + try: + cursor = connection.cursor() + except DatabaseError: + logger.error("Failed to connect to the db") + raise UpdaterRoutineException + + try: + desc = {} + for d in connection.introspection.get_table_description(cursor, + self.model._meta.db_table): + desc[d[0]] = d + except DatabaseError: + logger.error("Failed to get table description", exc_info=1) + raise UpdaterRoutineException + + for field in self.model._meta.fields: + if field.column in desc: + continue + logger.debug("Column %s does not exist yet" % field.column) + if field.default == NOT_PROVIDED: + logger.error("Cannot add a column with out a default value") + raise UpdaterRoutineException + + sql = "ALTER TABLE %s ADD %s %s NOT NULL DEFAULT " % ( + _quote(self.model._meta.db_table), + _quote(field.column), field.db_type(), ) + db_engine = Bcfg2.settings.DATABASES['default']['ENGINE'] + if db_engine == 'django.db.backends.sqlite3': + sql += _quote(field.default) + sql_values = () + else: + sql += '%s' + sql_values = (field.default, ) + try: + cursor.execute(sql, sql_values) + logger.debug("Added column %s to %s" % + (field.column, self.model._meta.db_table)) + except DatabaseError: + logger.error("Unable to add column %s" % field.column) + raise UpdaterRoutineException + + +class RebuildTable(UpdaterRoutine): + """ + Rebuild the table for an existing model. Use this if field types have changed. + """ + def __init__(self, model, columns): + self.model = model + self.model_name = model.__name__ + + if type(columns) == str: + self.columns = [columns] + elif type(columns) in (tuple, list): + self.columns = columns + else: + logger.error("Columns must be a str, tuple, or list") + raise UpdaterRoutineException + + + def __str__(self): + return "Rebuild columns for model %s" % self.model_name + + def run(self): + try: + cursor = connection.cursor() + except DatabaseError: + logger.error("Failed to connect to the db") + raise UpdaterRoutineException + + db_engine = Bcfg2.settings.DATABASES['default']['ENGINE'] + if db_engine == 'django.db.backends.sqlite3': + """ Sqlite is a special case. Altering columns is not supported. """ + _rebuild_sqlite_table(self.model) + return + + if db_engine == 'django.db.backends.mysql': + modify_cmd = 'MODIFY ' + else: + modify_cmd = 'ALTER COLUMN ' + + col_strings = [] + for column in self.columns: + col_strings.append("%s %s %s" % ( \ + modify_cmd, + _quote(column), + self.model._meta.get_field(column).db_type() + )) + + try: + cursor.execute('ALTER TABLE %s %s' % + (_quote(self.model._meta.db_table), ", ".join(col_strings))) + except DatabaseError: + logger.debug("Failed modify table %s" % self.model._meta.db_table) + raise UpdaterRoutineException + + + +class RemoveColumns(RebuildTable): + """ + Routine to remove columns from an existing model + """ + def __init__(self, model, columns): + super(RemoveColumns, self).__init__(model, columns) + + + def __str__(self): + return "Remove columns from model %s" % self.model_name + + def run(self): + try: + cursor = connection.cursor() + except DatabaseError: + logger.error("Failed to connect to the db") + raise UpdaterRoutineException + + try: + columns = [d[0] for d in connection.introspection.get_table_description(cursor, + self.model._meta.db_table)] + except DatabaseError: + logger.error("Failed to get table description", exc_info=1) + raise UpdaterRoutineException + + for column in self.columns: + if column not in columns: + logger.warning("Cannot drop column %s: does not exist" % column) + continue + + logger.debug("Dropping column %s" % column) + + db_engine = Bcfg2.settings.DATABASES['default']['ENGINE'] + if db_engine == 'django.db.backends.sqlite3': + _rebuild_sqlite_table(self.model) + else: + sql = "alter table %s drop column %s" % \ + (_quote(self.model._meta.db_table), _quote(column), ) + try: + cursor.execute(sql) + except DatabaseError: + logger.debug("Failed to drop column %s from %s" % + (column, self.model._meta.db_table)) + raise UpdaterRoutineException + + +class DropTable(UpdaterRoutine): + """ + Drop a table + """ + def __init__(self, table_name): + self.table_name = table_name + + def __str__(self): + return "Drop table %s" % self.table_name + + def run(self): + try: + cursor = connection.cursor() + cursor.execute('DROP TABLE %s' % _quote(self.table_name)) + except DatabaseError: + logger.error("Failed to drop table: %s" % + traceback.format_exc().splitlines()[-1]) + raise UpdaterRoutineException + + +class UpdaterCallable(UpdaterRoutine): + """Helper for routines. Basically delays execution""" + def __init__(self, fn): + self.fn = fn + self.args = [] + self.kwargs = {} + + def __call__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + return self + + def __str__(self): + return self.fn.__name__ + + def run(self): + self.fn(*self.args, **self.kwargs) + +def updatercallable(fn): + """Decorator for UpdaterCallable. Use for any function passed + into the fixes list""" + return UpdaterCallable(fn) + + diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py b/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py new file mode 100644 index 000000000..304b36636 --- /dev/null +++ b/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py @@ -0,0 +1,257 @@ +from django.db import connection, DatabaseError +from django.core.exceptions import ImproperlyConfigured +import django.core.management +import logging +import pkgutil +import re +import sys +import traceback + +from Bcfg2.Bcfg2Py3k import CmpMixin +from Bcfg2.Server.models import InternalDatabaseVersion +from Bcfg2.Server.SchemaUpdater.Routines import UpdaterRoutineException, \ + UpdaterRoutine +from Bcfg2.Server.SchemaUpdater import Changes + +logger = logging.getLogger(__name__) + +class UpdaterError(Exception): + pass + + +class SchemaTooOldError(UpdaterError): + pass + + +def _walk_packages(paths): + """Python 2.4 lacks this routine""" + import glob + submodules = [] + for path in paths: + for submodule in glob.glob("%s/*.py" % path): + mod = '.'.join(submodule.split("/")[-1].split('.')[:-1]) + if mod != '__init__': + submodules.append((None, mod, False)) + return submodules + + +def _release_to_version(release): + """ + Build a release base for a version + + Expects a string of the form 00.00 + + returns an integer of the form MMmm00 + """ + regex = re.compile("^(\d+)\.(\d+)$") + m = regex.match(release) + if not m: + logger.error("Invalid release string: %s" % release) + raise TypeError + return int("%02d%02d00" % (int(m.group(1)), int(m.group(2)))) + + +class Updater(CmpMixin): + """Database updater to standardize updates""" + + def __init__(self, release): + CmpMixin.__init__(self) + + self._cursor = None + self._release = release + try: + self._base_version = _release_to_version(release) + except: + err = "Invalid release string: %s" % release + logger.error(err) + raise UpdaterError(err) + + self._fixes = [] + self._version = -1 + + def __cmp__(self, other): + return self._base_version - other._base_version + + @property + def release(self): + return self._release + + @property + def version(self): + if self._version < 0: + try: + iv = InternalDatabaseVersion.objects.latest() + self._version = iv.version + except InternalDatabaseVersion.DoesNotExist: + raise UpdaterError("No database version stored internally") + return self._version + + @property + def cursor(self): + if not self._cursor: + self._cursor = connection.cursor() + return self._cursor + + @property + def target_version(self): + if(len(self._fixes) == 0): + return self._base_version + else: + return self._base_version + len(self._fixes) - 1 + + + def add(self, update): + if type(update) == str or isinstance(update, UpdaterRoutine): + self._fixes.append(update) + else: + raise TypeError + + + def override_base_version(self, version): + """Override our starting point for old releases. New code should + not use this method""" + self._base_version = int(version) + + + @staticmethod + def get_current_version(): + """Queries the db for the latest version. Returns 0 for a + fresh install""" + + if "call_command" in dir(django.core.management): + django.core.management.call_command("syncdb", interactive=False, + verbosity=0) + else: + msg = "Unable to call syndb routine" + logger.warning(msg) + raise UpdaterError(msg) + + try: + iv = InternalDatabaseVersion.objects.latest() + version = iv.version + except InternalDatabaseVersion.DoesNotExist: + version = 0 + + return version + + + def syncdb(self): + """Function to do the syncronisation for the models""" + + self._version = Updater.get_current_version() + self._cursor = None + + + def increment(self): + """Increment schema version in the database""" + if self._version < self._base_version: + self._version = self._base_version + else: + self._version += 1 + InternalDatabaseVersion.objects.create(version=self._version) + + def apply(self): + """Apply pending schema changes""" + + if self.version >= self.target_version: + logger.debug("No updates for release %s" % self._release) + return + + logger.debug("Applying updates for release %s" % self._release) + + if self.version < self._base_version: + start = 0 + else: + start = self.version - self._base_version + 1 + + try: + for fix in self._fixes[start:]: + if type(fix) == str: + self.cursor.execute(fix) + elif isinstance(fix, UpdaterRoutine): + fix.run() + else: + logger.error("Invalid schema change at %s" % \ + self._version + 1) + self.increment() + logger.debug("Applied schema change number %s: %s" % \ + (self.version, fix)) + logger.info("Applied schema changes for release %s" % self._release) + except: + msg = "Failed to perform db update %s (%s): %s" % \ + (self._version + 1, fix, + traceback.format_exc().splitlines()[-1]) + logger.error(msg) + raise UpdaterError(msg) + + +class UnsupportedUpdate(Updater): + """Handle an unsupported update""" + + def __init__(self, release, version): + super(UnsupportedUpdate, self).__init__(release) + self._base_version = version + + def apply(self): + """Raise an exception if we're too old""" + + if self.version < self.target_version: + logger.error("Upgrade from release %s unsupported" % self._release) + raise SchemaTooOldError + + +def update_database(): + """method to search where we are in the revision + of the database models and update them""" + try: + logger.debug("Verifying database schema") + + updaters = [] + if hasattr(pkgutil, 'walk_packages'): + submodules = pkgutil.walk_packages(path=Changes.__path__) + else: + #python 2.4 + submodules = _walk_packages(Changes.__path__) + for loader, submodule, ispkg in submodules: + if ispkg: + continue + try: + updates = getattr( + __import__("%s.%s" % (Changes.__name__, submodule), + globals(), locals(), ['*']), + "updates") + updaters.append(updates()) + except ImportError: + logger.error("Failed to import %s" % submodule) + except AttributeError: + logger.warning("Module %s does not have an updates function" % + submodule) + except: + msg = "Failed to build updater for %s" % submodule + logger.error(msg, exc_info=1) + raise UpdaterError(msg) + + current_version = Updater.get_current_version() + logger.debug("Database version at %s" % current_version) + + updaters.sort() + if current_version > 0: + [u.apply() for u in updaters] + logger.debug("Database version at %s" % + Updater.get_current_version()) + else: + target = updaters[-1].target_version + InternalDatabaseVersion.objects.create(version=target) + logger.info("A new database was created") + + except UpdaterError: + raise + except ImproperlyConfigured: + logger.error("Django is not properly configured: %s" % + traceback.format_exc().splitlines()[-1]) + raise UpdaterError + except: + logger.error("Error while updating the database") + for x in traceback.format_exc().splitlines(): + logger.error(x) + raise UpdaterError diff --git a/src/lib/Bcfg2/Server/Snapshots/model.py b/src/lib/Bcfg2/Server/Snapshots/model.py index 5d7973c16..0bbd206da 100644 --- a/src/lib/Bcfg2/Server/Snapshots/model.py +++ b/src/lib/Bcfg2/Server/Snapshots/model.py @@ -6,13 +6,7 @@ import sqlalchemy.exceptions from sqlalchemy.orm import relation, backref from sqlalchemy.ext.declarative import declarative_base - -# py3k compatibility -def u_str(string): - if sys.hexversion >= 0x03000000: - return string - else: - return unicode(string) +from Bcfg2.Bcfg2Py3k import u_str class Uniquer(object): diff --git a/src/lib/Bcfg2/Server/__init__.py b/src/lib/Bcfg2/Server/__init__.py index 96777b0bf..f79b51dd3 100644 --- a/src/lib/Bcfg2/Server/__init__.py +++ b/src/lib/Bcfg2/Server/__init__.py @@ -1,4 +1,13 @@ """This is the set of modules for Bcfg2.Server.""" +import lxml.etree + __all__ = ["Admin", "Core", "FileMonitor", "Plugin", "Plugins", - "Hostbase", "Reports", "Snapshots"] + "Hostbase", "Reports", "Snapshots", "XMLParser", + "XI", "XI_NAMESPACE"] + +XMLParser = lxml.etree.XMLParser(remove_blank_text=True) + +XI = 'http://www.w3.org/2001/XInclude' +XI_NAMESPACE = '{%s}' % XI + diff --git a/src/lib/Bcfg2/Server/models.py b/src/lib/Bcfg2/Server/models.py new file mode 100644 index 000000000..effd4d298 --- /dev/null +++ b/src/lib/Bcfg2/Server/models.py @@ -0,0 +1,77 @@ +import sys +import logging +import Bcfg2.Options +import Bcfg2.Server.Plugins +from django.db import models +from Bcfg2.Bcfg2Py3k import ConfigParser + +logger = logging.getLogger('Bcfg2.Server.models') + +MODELS = [] + +def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True): + global MODELS + + if plugins is None: + # we want to provide a different default plugin list -- + # namely, _all_ plugins, so that the database is guaranteed to + # work, even if /etc/bcfg2.conf isn't set up properly + plugin_opt = Bcfg2.Options.SERVER_PLUGINS + plugin_opt.default = Bcfg2.Server.Plugins.__all__ + + setup = Bcfg2.Options.OptionParser(dict(plugins=plugin_opt, + configfile=Bcfg2.Options.CFILE), + quiet=quiet) + setup.parse([Bcfg2.Options.CFILE.cmd, cfile]) + plugins = setup['plugins'] + + if MODELS: + # load_models() has been called once, so first unload all of + # the models; otherwise we might call load_models() with no + # arguments, end up with _all_ models loaded, and then in a + # subsequent call only load a subset of models + for model in MODELS: + delattr(sys.modules[__name__], model) + MODELS = [] + + for plugin in plugins: + try: + mod = getattr(__import__("Bcfg2.Server.Plugins.%s" % + plugin).Server.Plugins, plugin) + except ImportError: + try: + err = sys.exc_info()[1] + mod = __import__(plugin) + except: + if plugins != Bcfg2.Server.Plugins.__all__: + # only produce errors if the default plugin list + # was not used -- i.e., if the config file was set + # up. don't produce errors when trying to load + # all plugins, IOW. the error from the first + # attempt to import is probably more accurate than + # the second attempt. + logger.error("Failed to load plugin %s: %s" % (plugin, err)) + continue + for sym in dir(mod): + obj = getattr(mod, sym) + if hasattr(obj, "__bases__") and models.Model in obj.__bases__: + setattr(sys.modules[__name__], sym, obj) + MODELS.append(sym) + +# basic invocation to ensure that a default set of models is loaded, +# and thus that this module will always work. +load_models(quiet=True) + +# Monitor our internal db version +class InternalDatabaseVersion(models.Model): + """Object that tell us to witch version is the database.""" + version = models.IntegerField() + updated = models.DateTimeField(auto_now_add=True) + + def __str__(self): + return "version %d updated the %s" % (self.version, self.updated.isoformat()) + + class Meta: + app_label = "reports" + get_latest_by = "version" + diff --git a/src/lib/Bcfg2/manage.py b/src/lib/Bcfg2/manage.py new file mode 100755 index 000000000..3e4eedc9f --- /dev/null +++ b/src/lib/Bcfg2/manage.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +import imp +try: + imp.find_module('settings') # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__) + sys.exit(1) + +import settings + +if __name__ == "__main__": + execute_manager(settings) diff --git a/src/lib/Bcfg2/settings.py b/src/lib/Bcfg2/settings.py new file mode 100644 index 000000000..05e85bb9a --- /dev/null +++ b/src/lib/Bcfg2/settings.py @@ -0,0 +1,161 @@ +import os +import sys +import Bcfg2.Options + +try: + import django + has_django = True +except: + has_django = False + +DATABASES = dict() + +# Django < 1.2 compat +DATABASE_ENGINE = None +DATABASE_NAME = None +DATABASE_USER = None +DATABASE_PASSWORD = None +DATABASE_HOST = None +DATABASE_PORT = None + +TIME_ZONE = None + +DEBUG = False +TEMPLATE_DEBUG = DEBUG + +MEDIA_URL = '/site_media' + +# default config file is /etc/bcfg2-web.conf, UNLESS /etc/bcfg2.conf +# exists AND /etc/bcfg2-web.conf does not exist. +DEFAULT_CONFIG = Bcfg2.Options.WEB_CFILE.default +if (not os.path.exists(Bcfg2.Options.WEB_CFILE.default) and + os.path.exists(Bcfg2.Options.CFILE.default)): + DEFAULT_CONFIG = Bcfg2.Options.CFILE.default + +def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False): + global DATABASE_ENGINE, DATABASE_NAME, DATABASE_USER, DATABASE_PASSWORD, \ + DATABASE_HOST, DATABASE_PORT, DEBUG, TEMPLATE_DEBUG, TIME_ZONE, \ + MEDIA_URL + + optinfo = Bcfg2.Options.DATABASE_COMMON_OPTIONS + optinfo['repo'] = Bcfg2.Options.SERVER_REPOSITORY + setup = Bcfg2.Options.OptionParser(optinfo, quiet=quiet) + setup.parse([Bcfg2.Options.WEB_CFILE.cmd, cfile]) + + if repo is None: + repo = setup['repo'] + + DATABASES['default'] = \ + dict(ENGINE="django.db.backends.%s" % setup['db_engine'], + NAME=setup['db_name'], + USER=setup['db_user'], + PASSWORD=setup['db_password'], + HOST=setup['db_host'], + PORT=setup['db_port']) + + if has_django and django.VERSION[0] == 1 and django.VERSION[1] < 2: + DATABASE_ENGINE = setup['db_engine'] + DATABASE_NAME = DATABASES['default']['NAME'] + DATABASE_USER = DATABASES['default']['USER'] + DATABASE_PASSWORD = DATABASES['default']['PASSWORD'] + DATABASE_HOST = DATABASES['default']['HOST'] + DATABASE_PORT = DATABASES['default']['PORT'] + + # dropping the version check. This was added in 1.1.2 + TIME_ZONE = setup['time_zone'] + + DEBUG = setup['django_debug'] + TEMPLATE_DEBUG = DEBUG + if DEBUG: + print("Warning: Setting web_debug to True causes extraordinary memory " + "leaks. Only use this setting if you know what you're doing.") + + if setup['web_prefix']: + MEDIA_URL = setup['web_prefix'].rstrip('/') + MEDIA_URL + else: + MEDIA_URL = '/site_media' + + +# initialize settings from /etc/bcfg2.conf, or set up basic defaults. +# this lets manage.py work in all cases +read_config(quiet=True) + +ADMINS = (('Root', 'root')) +MANAGERS = ADMINS + +# Language code for this installation. All choices can be found here: +# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes +# http://blogs.law.harvard.edu/tech/stories/storyReader$15 +LANGUAGE_CODE = 'en-us' + +SITE_ID = 1 + +# TODO - sanitize this +INSTALLED_APPS = ( + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.admin', + 'Bcfg2.Server.Reports.reports', + 'Bcfg2.Server' +) + +# Imported from Bcfg2.Server.Reports +MEDIA_ROOT = '' + +# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a +# trailing slash. +ADMIN_MEDIA_PREFIX = '/media/' + +#TODO - make this unique +# Make this unique, and don't share it with anybody. +SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7' + +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', +) + +#TODO - review these. auth and sessions aren't really used +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.middleware.doc.XViewMiddleware', +) + +# TODO - move this to a higher root and dynamically import +ROOT_URLCONF = 'Bcfg2.Server.Reports.urls' + +# TODO - this isn't usable +# Authentication Settings +AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend') + +LOGIN_URL = '/login' + +SESSION_EXPIRE_AT_BROWSER_CLOSE = True + +TEMPLATE_DIRS = ( + # App loaders should take care of this.. not sure why this is here + '/usr/share/python-support/python-django/django/contrib/admin/templates/', +) + +# TODO - sanitize this +if has_django and django.VERSION[0] == 1 and django.VERSION[1] < 2: + TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.core.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request' + ) +else: + TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.contrib.auth.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request' + ) + diff --git a/src/lib/Bcfg2/version.py b/src/lib/Bcfg2/version.py new file mode 100644 index 000000000..ac10dac94 --- /dev/null +++ b/src/lib/Bcfg2/version.py @@ -0,0 +1,115 @@ +import re + +__version__ = "1.3.0" + +class Bcfg2VersionInfo(tuple): + v_re = re.compile(r'(\d+)(\w+)(\d+)') + + def __new__(cls, vstr): + (major, minor, rest) = vstr.split(".") + match = cls.v_re.match(rest) + if match: + micro, releaselevel, serial = match.groups() + else: + micro = rest + releaselevel = 'final' + serial = 0 + return tuple.__new__(cls, [int(major), int(minor), int(micro), + releaselevel, int(serial)]) + + def __init__(self, vstr): + tuple.__init__(self) + self.major, self.minor, self.micro, self.releaselevel, self.serial = \ + tuple(self) + + def __repr__(self): + return "(major=%s, minor=%s, micro=%s, releaselevel=%s, serial=%s)" % \ + tuple(self) + + def _release_cmp(self, r1, r2): + if r1 == r2: + return 0 + elif r1 == "final": + return -1 + elif r2 == "final": + return 1 + elif r1 == "rc": + return -1 + elif r2 == "rc": + return 1 + # should never get to anything past this point + elif r1 == "pre": + return -1 + elif r2 == "pre": + return 1 + else: + # wtf? + return 0 + + def __gt__(self, version): + if version is None: + # older bcfg2 clients didn't report their version, so we + # handle this case specially and assume that any reported + # version is newer than any indeterminate version + return True + try: + for i in range(3): + if self[i] > version[i]: + return True + elif self[i] < version[i]: + return False + rel = self._release_cmp(self[3], version[3]) + if rel < 0: + return True + elif rel > 0: + return False + if self[4] > version[4]: + return True + else: + return False + except TypeError: + return self > Bcfg2VersionInfo(version) + + def __lt__(self, version): + if version is None: + # older bcfg2 clients didn't report their version, so we + # handle this case specially and assume that any reported + # version is newer than any indeterminate version + return False + try: + for i in range(3): + if self[i] < version[i]: + return True + elif self[i] > version[i]: + return False + rel = self._release_cmp(self[3], version[3]) + if rel > 0: + return True + elif rel < 0: + return False + if self[4] < version[4]: + return True + else: + return False + except TypeError: + return self < Bcfg2VersionInfo(version) + + def __eq__(self, version): + if version is None: + # older bcfg2 clients didn't report their version, so we + # handle this case specially and assume that any reported + # version is newer than any indeterminate version + return False + try: + rv = True + for i in range(len(self)): + rv &= self[i] == version[i] + return rv + except TypeError: + return self == Bcfg2VersionInfo(version) + + def __ge__(self, version): + return not self < version + + def __le__(self, version): + return not self > version diff --git a/src/sbin/bcfg2 b/src/sbin/bcfg2 index fb34e627b..3fbeb0a62 100755 --- a/src/sbin/bcfg2 +++ b/src/sbin/bcfg2 @@ -18,6 +18,8 @@ import Bcfg2.Client.Tools # Compatibility imports from Bcfg2.Bcfg2Py3k import xmlrpclib +from Bcfg2.version import __version__ + import Bcfg2.Proxy import Bcfg2.Logger @@ -27,10 +29,6 @@ def cb_sigint_handler(signum, frame): """Exit upon CTRL-C.""" os._exit(1) -DECISION_LIST = Bcfg2.Options.Option('Decision List', default=False, - cmd="--decision-list", odesc='<file>', - long_arg=True) - class Client: """The main bcfg2 client class""" @@ -38,46 +36,8 @@ class Client: def __init__(self): self.toolset = None self.config = None - - optinfo = { - # 'optname': (('-a', argdesc, optdesc), - # env, cfpath, default, boolean)), - 'verbose': Bcfg2.Options.VERBOSE, - 'extra': Bcfg2.Options.CLIENT_EXTRA_DISPLAY, - 'quick': Bcfg2.Options.CLIENT_QUICK, - 'debug': Bcfg2.Options.DEBUG, - 'lockfile': Bcfg2.Options.LOCKFILE, - 'drivers': Bcfg2.Options.CLIENT_DRIVERS, - 'dryrun': Bcfg2.Options.CLIENT_DRYRUN, - 'paranoid': Bcfg2.Options.CLIENT_PARANOID, - 'bundle': Bcfg2.Options.CLIENT_BUNDLE, - 'bundle-quick': Bcfg2.Options.CLIENT_BUNDLEQUICK, - 'indep': Bcfg2.Options.CLIENT_INDEP, - 'file': Bcfg2.Options.CLIENT_FILE, - 'interactive': Bcfg2.Options.INTERACTIVE, - 'cache': Bcfg2.Options.CLIENT_CACHE, - 'profile': Bcfg2.Options.CLIENT_PROFILE, - 'remove': Bcfg2.Options.CLIENT_REMOVE, - 'help': Bcfg2.Options.HELP, - 'setup': Bcfg2.Options.CFILE, - 'server': Bcfg2.Options.SERVER_LOCATION, - 'user': Bcfg2.Options.CLIENT_USER, - 'password': Bcfg2.Options.SERVER_PASSWORD, - 'retries': Bcfg2.Options.CLIENT_RETRIES, - 'kevlar': Bcfg2.Options.CLIENT_KEVLAR, - 'decision-list': DECISION_LIST, - 'encoding': Bcfg2.Options.ENCODING, - 'omit-lock-check': Bcfg2.Options.OMIT_LOCK_CHECK, - 'filelog': Bcfg2.Options.LOGGING_FILE_PATH, - 'decision': Bcfg2.Options.CLIENT_DLIST, - 'servicemode': Bcfg2.Options.CLIENT_SERVICE_MODE, - 'key': Bcfg2.Options.CLIENT_KEY, - 'certificate': Bcfg2.Options.CLIENT_CERT, - 'ca': Bcfg2.Options.CLIENT_CA, - 'serverCN': Bcfg2.Options.CLIENT_SCNS, - 'timeout': Bcfg2.Options.CLIENT_TIMEOUT, - } - + + optinfo = Bcfg2.Options.CLIENT_COMMON_OPTIONS self.setup = Bcfg2.Options.OptionParser(optinfo) self.setup.parse(sys.argv[1:]) @@ -85,38 +45,38 @@ class Client: print("Bcfg2 takes no arguments, only options") print(self.setup.buildHelpMessage()) raise SystemExit(1) - level = 30 - if self.setup['verbose']: - level = 20 if self.setup['debug']: - level = 0 + level = logging.DEBUG + elif self.setup['verbose']: + level = logging.INFO + else: + level = logging.WARNING Bcfg2.Logger.setup_logging('bcfg2', - to_syslog=False, + to_syslog=self.setup['syslog'], level=level, - to_file=self.setup['filelog']) + to_file=self.setup['logging']) self.logger = logging.getLogger('bcfg2') self.logger.debug(self.setup) - if self.setup['bundle-quick']: - if self.setup['bundle'] == []: - self.logger.error("-Q option requires -b") + if self.setup['bundle_quick']: + if not self.setup['bundle'] and not self.setup['skipbundle']: + self.logger.error("-Q option requires -b or -B") raise SystemExit(1) - elif self.setup['remove'] != False: + elif self.setup['remove']: self.logger.error("-Q option incompatible with -r") raise SystemExit(1) if 'drivers' in self.setup and self.setup['drivers'] == 'help': self.logger.info("The following drivers are available:") self.logger.info(Bcfg2.Client.Tools.drivers) raise SystemExit(0) - if self.setup['remove'] and 'services' in self.setup['remove']: - self.logger.error("Service removal is nonsensical; removed services will only be disabled") - if self.setup['remove'] not in [False, - 'all', - 'Services', - 'Packages', - 'services', - 'packages']: - self.logger.error("Got unknown argument %s for -r" % (self.setup['remove'])) - if (self.setup["file"] != False) and (self.setup["cache"] != False): + if self.setup['remove'] and 'services' in self.setup['remove'].lower(): + self.logger.error("Service removal is nonsensical; " + "removed services will only be disabled") + if (self.setup['remove'] and + self.setup['remove'].lower() not in ['all', 'services', + 'packages']): + self.logger.error("Got unknown argument %s for -r" % + self.setup['remove']) + if self.setup["file"] and self.setup["cache"]: print("cannot use -f and -c together") raise SystemExit(1) if not self.setup['server'].startswith('https://'): @@ -138,11 +98,13 @@ class Client: script.write(probe.text) script.close() os.close(scripthandle) - os.chmod(script.name, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | - stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | - stat.S_IWUSR) # 0755 + os.chmod(script.name, + stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | + stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | + stat.S_IWUSR) # 0755 ret.text = os.popen(script.name).read().strip() - self.logger.info("Probe %s has result:\n%s" % (name, ret.text)) + self.logger.info("Probe %s has result:" % name) + self.logger.info(ret.text) finally: os.unlink(script.name) except: @@ -162,6 +124,8 @@ class Client: # begin configuration times['start'] = time.time() + self.logger.info("Starting Bcfg2 client run at %s" % times['start']) + if self.setup['file']: # read config from file try: @@ -176,14 +140,17 @@ class Client: return(1) else: # retrieve config from server - proxy = Bcfg2.Proxy.ComponentProxy(self.setup['server'], - self.setup['user'], - self.setup['password'], - key=self.setup['key'], - cert=self.setup['certificate'], - ca=self.setup['ca'], - allowedServerCNs=self.setup['serverCN'], - timeout=self.setup['timeout']) + proxy = \ + Bcfg2.Proxy.ComponentProxy(self.setup['server'], + self.setup['user'], + self.setup['password'], + key=self.setup['key'], + cert=self.setup['certificate'], + ca=self.setup['ca'], + allowedServerCNs=self.setup['serverCN'], + timeout=self.setup['timeout'], + retries=int(self.setup['retries']), + delay=int(self.setup['retry_delay'])) if self.setup['profile']: try: @@ -195,6 +162,24 @@ class Client: raise SystemExit(1) try: + probe_data = proxy.DeclareVersion(__version__) + except xmlrpclib.Fault: + err = sys.exc_info()[1] + if (err.faultCode == xmlrpclib.METHOD_NOT_FOUND or + (err.faultCode == 7 and + err.faultString.startswith("Unknown method"))): + self.logger.debug("Server does not support declaring " + "client version") + else: + self.logger.error("Failed to declare version: %s" % err) + except (Bcfg2.Proxy.ProxyError, + Bcfg2.Proxy.CertificateError, + socket.gaierror, + socket.error): + err = sys.exc_info()[1] + self.logger.error("Failed to declare version: %s" % err) + + try: probe_data = proxy.GetProbes() except (Bcfg2.Proxy.ProxyError, Bcfg2.Proxy.CertificateError, @@ -208,7 +193,7 @@ class Client: times['probe_download'] = time.time() try: - probes = Bcfg2.Client.XML.XML(probe_data) + probes = Bcfg2.Client.XML.XML(str(probe_data)) except Bcfg2.Client.XML.ParseError: syntax_error = sys.exc_info()[1] self.fatal_error( @@ -229,8 +214,7 @@ class Client: try: # upload probe responses proxy.RecvProbeData(Bcfg2.Client.XML.tostring(probedata, - encoding='UTF-8', - xml_declaration=True)) + xml_declaration=False).decode('UTF-8')) except Bcfg2.Proxy.ProxyError: err = sys.exc_info()[1] self.logger.error("Failed to upload probe data: %s" % err) @@ -282,10 +266,15 @@ class Client: self.fatal_error("Server error: %s" % (self.config.text)) return(1) - if self.setup['bundle-quick']: + if self.setup['bundle_quick']: newconfig = Bcfg2.Client.XML.XML('<Configuration/>') - [newconfig.append(bundle) for bundle in self.config.getchildren() if \ - bundle.tag == 'Bundle' and bundle.get('name') in self.setup['bundle']] + [newconfig.append(bundle) + for bundle in self.config.getchildren() + if (bundle.tag == 'Bundle' and + ((self.setup['bundle'] and + bundle.get('name') in self.setup['bundle']) or + (self.setup['skipbundle'] and + bundle.get('name') not in self.setup['skipbundle'])))] self.config = newconfig self.tools = Bcfg2.Client.Frame.Frame(self.config, @@ -293,7 +282,7 @@ class Client: times, self.setup['drivers'], self.setup['dryrun']) - if not self.setup['omit-lock-check']: + if not self.setup['omit_lock_check']: #check lock here try: lockfile = open(self.setup['lockfile'], 'w') @@ -309,7 +298,7 @@ class Client: # execute the said configuration self.tools.Execute() - if not self.setup['omit-lock-check']: + if not self.setup['omit_lock_check']: #unlock here if lockfile: try: @@ -318,20 +307,21 @@ class Client: except OSError: self.logger.error("Failed to unlock lockfile %s" % lockfile.name) - if not self.setup['file'] and not self.setup['bundle-quick']: + if not self.setup['file'] and not self.setup['bundle_quick']: # upload statistics feedback = self.tools.GenerateStats() try: proxy.RecvStats(Bcfg2.Client.XML.tostring(feedback, - encoding='UTF-8', - xml_declaration=True)) + xml_declaration=False).decode('UTF-8')) except Bcfg2.Proxy.ProxyError: err = sys.exc_info()[1] self.logger.error("Failed to upload configuration statistics: " "%s" % err) raise SystemExit(2) + self.logger.info("Finished Bcfg2 client run at %s" % time.time()) + if __name__ == '__main__': signal.signal(signal.SIGINT, cb_sigint_handler) client = Client() diff --git a/src/sbin/bcfg2-admin b/src/sbin/bcfg2-admin index 007dd0af3..24e9eaac4 100755 --- a/src/sbin/bcfg2-admin +++ b/src/sbin/bcfg2-admin @@ -11,6 +11,7 @@ from Bcfg2.Bcfg2Py3k import StringIO log = logging.getLogger('bcfg2-admin') + def mode_import(modename): """Load Bcfg2.Server.Admin.<mode>.""" modname = modename.capitalize() @@ -18,10 +19,12 @@ def mode_import(modename): (modname)).Server.Admin, modname) return getattr(mod, modname) + def get_modes(): """Get all available modes, except for the base mode.""" return [x.lower() for x in Bcfg2.Server.Admin.__all__ if x != 'mode'] + def create_description(): """Create the description string from the list of modes.""" modes = get_modes() @@ -35,26 +38,25 @@ def create_description(): pass return description.getvalue() + def main(): - optinfo = { - 'configfile': Bcfg2.Options.CFILE, - 'help': Bcfg2.Options.HELP, - 'verbose': Bcfg2.Options.VERBOSE, - 'repo': Bcfg2.Options.SERVER_REPOSITORY, - 'plugins': Bcfg2.Options.SERVER_PLUGINS, - 'event debug': Bcfg2.Options.DEBUG, - 'filemonitor': Bcfg2.Options.SERVER_FILEMONITOR, - 'password': Bcfg2.Options.SERVER_PASSWORD, - 'encoding': Bcfg2.Options.ENCODING, - } + optinfo = dict() + optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) setup = Bcfg2.Options.OptionParser(optinfo) # override default help message to include description of all modes - setup.hm = "%s\n%s" % (setup.buildHelpMessage(), create_description()) + setup.hm = "Usage:\n\n%s\n%s" % (setup.buildHelpMessage(), + create_description()) setup.parse(sys.argv[1:]) - log_args = dict(to_syslog=False, to_console=logging.WARNING) - if setup['verbose']: - log_args['to_console'] = logging.DEBUG + if setup['debug']: + level = logging.DEBUG + elif setup['verbose']: + level = logging.INFO + else: + level = logging.WARNING + Bcfg2.Logger.setup_logging('bcfg2-admin', to_syslog=setup['syslog'], + level=level) # Provide help if requested or no args were specified if (not setup['args'] or len(setup['args']) < 1 or @@ -83,8 +85,7 @@ def main(): if hasattr(mode, 'bcore'): mode.bcore.shutdown() else: - log.error("Unknown mode %s" % setup['args'][0]) - print("Usage:\n %s" % setup.buildHelpMessage()) + log.error("Error: Unknown mode '%s'\n" % setup['args'][0]) print(create_description()) raise SystemExit(1) diff --git a/src/sbin/bcfg2-build-reports b/src/sbin/bcfg2-build-reports index 7fa08110a..318e9de5d 100755 --- a/src/sbin/bcfg2-build-reports +++ b/src/sbin/bcfg2-build-reports @@ -110,7 +110,7 @@ def rss(reportxml, delivery, report): for item in items: channel.append(item) - tree = tostring(rssdata, encoding='UTF-8', xml_declaration=True) + tree = tostring(rssdata, xml_declaration=False).decode('UTF-8') fil.write(tree) fil.close() @@ -260,7 +260,7 @@ if __name__ == '__main__': # Apply XSLT, different ones based on report type, and options if deliverymechanism == 'null-operator': # Special Cases - fileout(tostring(ElementTree(procnodereport).getroot(), encoding='UTF-8', xml_declaration=True), deliv) + fileout(tostring(ElementTree(procnodereport).getroot(), xml_declaration=False).decode('UTF-8'), deliv) break transform = delivtype + '-' + deliverymechanism + '.xsl' @@ -312,7 +312,7 @@ if __name__ == '__main__': (toastring, socket.getfqdn(), outputstring) mail(outputstring, c) #call function to send else: - outputstring = tostring(stylesheet.apply(ElementTree(procnodereport)).getroot(), encoding='UTF-8', xml_declaration=True) + outputstring = tostring(stylesheet.apply(ElementTree(procnodereport)).getroot(), xml_declaration=False).decode('UTF-8') if deliverymechanism == 'rss': rss(outputstring, deliv, reprt) else: # Must be deliverymechanism == 'www': diff --git a/src/sbin/bcfg2-crypt b/src/sbin/bcfg2-crypt new file mode 100755 index 000000000..9ce21da82 --- /dev/null +++ b/src/sbin/bcfg2-crypt @@ -0,0 +1,362 @@ +#!/usr/bin/env python +""" helper for encrypting/decrypting Cfg and Properties files """ + +import os +import sys +import logging +import lxml.etree +import Bcfg2.Logger +import Bcfg2.Options +try: + import Bcfg2.Encryption +except ImportError: + err = sys.exc_info()[1] + print("Import failed '%s'. Is M2Crypto installed?" % + err) + raise SystemExit(1) + +LOGGER = None + +def get_logger(verbose=0): + """ set up logging according to the verbose level given on the + command line """ + global LOGGER + if LOGGER is None: + LOGGER = logging.getLogger(sys.argv[0]) + stderr = logging.StreamHandler() + if verbose: + level = logging.DEBUG + else: + level = logging.WARNING + LOGGER.setLevel(level) + LOGGER.addHandler(stderr) + syslog = logging.handlers.SysLogHandler("/dev/log") + syslog.setFormatter(logging.Formatter("%(name)s: %(message)s")) + LOGGER.addHandler(syslog) + return LOGGER + + +class Encryptor(object): + def __init__(self, setup): + self.setup = setup + self.logger = get_logger() + self.passphrase = None + self.pname = None + + def get_encrypted_filename(self, plaintext_filename): + return plaintext_filename + + def get_plaintext_filename(self, encrypted_filename): + return encrypted_filename + + def chunk(self, data): + yield data + + def unchunk(self, data, original): + return data[0] + + def set_passphrase(self): + if (not self.setup.cfp.has_section("encryption") or + self.setup.cfp.options("encryption") == 0): + self.logger.error("No passphrases available in %s" % + self.setup['configfile']) + return False + + if self.passphrase: + self.logger.debug("Using previously determined passphrase %s" % + self.pname) + return True + + if self.setup['passphrase']: + self.pname = self.setup['passphrase'] + + if self.pname: + if self.setup.cfp.has_option("encryption", self.pname): + self.passphrase = self.setup.cfp.get("encryption", + self.pname) + self.logger.debug("Using passphrase %s specified on command " + "line" % self.pname) + return True + else: + self.logger.error("Could not find passphrase %s in %s" % + (self.pname, self.setup['configfile'])) + return False + else: + pnames = self.setup.cfp.options("encryption") + if len(pnames) == 1: + self.passphrase = self.setup.cfp.get(pnames[0]) + self.pname = pnames[0] + self.logger.info("Using passphrase %s" % pnames[0]) + return True + self.logger.info("No passphrase could be determined") + return False + + def encrypt(self, fname): + try: + plaintext = open(fname).read() + except IOError: + err = sys.exc_info()[1] + self.logger.error("Error reading %s, skipping: %s" % (fname, err)) + return False + + self.set_passphrase() + + crypted = [] + for chunk in self.chunk(plaintext): + try: + passphrase, pname = self.get_passphrase(chunk) + except TypeError: + return False + + crypted.append(self._encrypt(chunk, passphrase, name=pname)) + + new_fname = self.get_encrypted_filename(fname) + try: + open(new_fname, "wb").write(self.unchunk(crypted, plaintext)) + self.logger.info("Wrote encrypted data to %s" % new_fname) + return True + except IOError: + err = sys.exc_info()[1] + self.logger.error("Error writing encrypted data from %s to %s: %s" % + (fname, new_fname, err)) + return False + + def _encrypt(self, plaintext, passphrase, name=None): + return Bcfg2.Encryption.ssl_encrypt(plaintext, passphrase) + + def decrypt(self, fname): + try: + crypted = open(fname).read() + except IOError: + err = sys.exc_info()[1] + self.logger.error("Error reading %s, skipping: %s" % (fname, err)) + return False + + self.set_passphrase() + + plaintext = [] + for chunk in self.chunk(crypted): + try: + passphrase, pname = self.get_passphrase(chunk) + try: + plaintext.append(self._decrypt(chunk, passphrase)) + except Bcfg2.Encryption.EVPError: + self.logger.info("Could not decrypt %s with the specified " + "passphrase" % fname) + return False + except: + err = sys.exc_info()[1] + self.logger.error("Error decrypting %s: %s" % (fname, err)) + return False + except TypeError: + pchunk = None + for pname in self.setup.cfp.options('encryption'): + self.logger.debug("Trying passphrase %s" % pname) + passphrase = self.setup.cfp.get('encryption', pname) + try: + pchunk = self._decrypt(chunk, passphrase) + break + except Bcfg2.Encryption.EVPError: + pass + except: + err = sys.exc_info()[1] + self.logger.error("Error decrypting %s: %s" % + (fname, err)) + if pchunk is not None: + plaintext.append(pchunk) + else: + self.logger.error("Could not decrypt %s with any " + "passphrase in %s" % + (fname, self.setup['configfile'])) + return False + + new_fname = self.get_plaintext_filename(fname) + try: + open(new_fname, "wb").write(self.unchunk(plaintext, crypted)) + self.logger.info("Wrote decrypted data to %s" % new_fname) + return True + except IOError: + err = sys.exc_info()[1] + self.logger.error("Error writing encrypted data from %s to %s: %s" % + (fname, new_fname, err)) + return False + + def get_passphrase(self, chunk): + pname = self._get_passphrase(chunk) + if not self.pname: + if not pname: + self.logger.info("No passphrase given on command line or " + "found in file") + return False + elif self.setup.cfp.has_option("encryption", pname): + passphrase = self.setup.cfp.get("encryption", pname) + else: + self.logger.error("Could not find passphrase %s in %s" % + (pname, self.setup['configfile'])) + return False + else: + pname = self.pname + passphrase = self.passphrase + if self.pname != pname: + self.logger.warning("Passphrase given on command line (%s) " + "differs from passphrase embedded in " + "file (%s), using command-line option" % + (self.pname, pname)) + return (passphrase, pname) + + def _get_passphrase(self, chunk): + return None + + def _decrypt(self, crypted, passphrase): + return Bcfg2.Encryption.ssl_decrypt(crypted, passphrase) + + +class CfgEncryptor(Encryptor): + def get_encrypted_filename(self, plaintext_filename): + return plaintext_filename + ".crypt" + + def get_plaintext_filename(self, encrypted_filename): + if encrypted_filename.endswith(".crypt"): + return encrypted_filename[:-6] + else: + return Encryptor.get_plaintext_filename(self, encrypted_filename) + + +class PropertiesEncryptor(Encryptor): + def _encrypt(self, plaintext, passphrase, name=None): + # plaintext is an lxml.etree._Element + if name is None: + name = "true" + if plaintext.text and plaintext.text.strip(): + plaintext.text = Bcfg2.Encryption.ssl_encrypt(plaintext.text, + passphrase) + plaintext.set("encrypted", name) + return plaintext + + def chunk(self, data): + xdata = lxml.etree.XML(data) + if self.setup['xpath']: + elements = xdata.xpath(self.setup['xpath']) + else: + elements = xdata.xpath('//*[@encrypted]') + if not elements: + elements = list(xdata.getiterator()) + # this is not a good use of a generator, but we need to + # generate the full list of elements in order to ensure that + # some exist before we know what to return + for elt in elements: + yield elt + + def unchunk(self, data, original): + # Properties elements are modified in-place, so we don't + # actually need to unchunk anything + xdata = data[0] + # find root element + while xdata.getparent() != None: + xdata = xdata.getparent() + xdata.set("encryption", "true") + return lxml.etree.tostring(xdata, xml_declaration=False).decode('UTF-8') + + def _get_passphrase(self, chunk): + pname = chunk.get("encrypted") or chunk.get("encryption") + if pname and pname.lower() != "true": + return pname + return None + + def _decrypt(self, crypted, passphrase): + # crypted is in lxml.etree._Element + if not crypted.text or not crypted.text.strip(): + self.logger.warning("Skipping empty element %s" % crypted.tag) + return crypted + rv = Bcfg2.Encryption.ssl_decrypt(crypted.text, passphrase) + crypted.text = rv + return crypted + + +def main(): + optinfo = dict() + optinfo.update(Bcfg2.Options.CRYPT_OPTIONS) + optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) + setup = Bcfg2.Options.OptionParser(optinfo) + setup.hm = " bcfg2-crypt [options] <filename>\nOptions:\n%s" % \ + setup.buildHelpMessage() + setup.parse(sys.argv[1:]) + + if not setup['args']: + print(setup.hm) + raise SystemExit(1) + elif setup['encrypt'] and setup['decrypt']: + print("You cannot specify both --encrypt) and --decrypt") + raise SystemExit(1) + elif setup['cfg'] and setup['properties']: + print("You cannot specify both --cfg and --properties") + raise SystemExit(1) + elif setup['cfg'] and setup['properties']: + print("Specifying --xpath with --cfg is nonsensical, ignoring --xpath") + setup['xpath'] = Bcfg2.Options.CRYPT_XPATH.default + elif setup['decrypt'] and setup['remove']: + print("--remove cannot be used with --decrypt, ignoring") + setup['remove'] = Bcfg2.Options.CRYPT_REMOVE.default + + logger = get_logger(setup['verbose']) + + props_crypt = PropertiesEncryptor(setup) + cfg_crypt = CfgEncryptor(setup) + + for fname in setup['args']: + if not os.path.exists(fname): + logger.error("%s does not exist, skipping" % fname) + continue + + # figure out if we need to encrypt this as a Properties file + # or as a Cfg file + props = False + if setup['properties']: + props = True + elif setup['cfg']: + props = False + elif fname.endswith(".xml"): + try: + xroot = lxml.etree.parse(fname).getroot() + if xroot.tag == "Properties": + props = True + else: + props = False + except IOError: + err = sys.exc_info()[1] + logger.error("Error reading %s, skipping: %s" % (fname, err)) + continue + except lxml.etree.XMLSyntaxError: + props = False + else: + props = False + + if props: + encryptor = props_crypt + else: + encryptor = cfg_crypt + + if setup['encrypt']: + if not encryptor.encrypt(fname): + print("Failed to encrypt %s, skipping" % fname) + elif setup['decrypt']: + if not encryptor.decrypt(fname): + print("Failed to decrypt %s, skipping" % fname) + else: + logger.info("Neither --encrypt nor --decrypt specified, " + "determining mode") + if not encryptor.decrypt(fname): + logger.info("Failed to decrypt %s, trying encryption" % fname) + if not encryptor.encrypt(fname): + print("Failed to encrypt %s, skipping" % fname) + + if setup['remove'] and encryptor.get_encrypted_filename(fname) != fname: + try: + os.unlink(fname) + except IOError: + err = sys.exc_info()[1] + logger.error("Error removing %s: %s" % (fname, err)) + continue + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/sbin/bcfg2-info b/src/sbin/bcfg2-info index 8598a58eb..7cc361a1c 100755 --- a/src/sbin/bcfg2-info +++ b/src/sbin/bcfg2-info @@ -10,6 +10,7 @@ import fnmatch import logging import tempfile import lxml.etree +import traceback from code import InteractiveConsole try: @@ -26,9 +27,14 @@ import Bcfg2.Logger import Bcfg2.Options import Bcfg2.Server.Core import Bcfg2.Server.Plugins.Metadata -import Bcfg2.Server.Plugins.SGenshi import Bcfg2.Server.Plugin +try: + from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile + has_genshi = True +except ImportError: + has_genshi = False + logger = logging.getLogger('bcfg2-info') USAGE = """Commands: build <hostname> <filename> - Build config for hostname, writing to filename @@ -96,7 +102,7 @@ def getClientList(hostglobs): """ given a host glob, get a list of clients that match it """ # special cases to speed things up: if '*' in hostglobs: - return list(self.metadata.clients.keys()) + return self.metadata.clients has_wildcards = False for glob in hostglobs: # check if any wildcard characters are in the string @@ -107,7 +113,7 @@ def getClientList(hostglobs): return hostglobs rv = set() - clist = set(self.metadata.clients.keys()) + clist = set(self.metadata.clients) for glob in hostglobs: for client in clist: if fnmatch.fnmatch(client, glob): @@ -131,20 +137,50 @@ def displayTrace(trace, num=80, sort=('time', 'calls')): stats.sort_stats('cumulative', 'calls', 'time') stats.print_stats(200) -class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): +def load_interpreters(): + interpreters = dict(python=lambda v: InteractiveConsole(v).interact()) + best = "python" + try: + import bpython.cli + interpreters["bpython"] = lambda v: bpython.cli.main(args=[], locals_=v) + best = "bpython" + except ImportError: + pass + + try: + # whether ipython is actually better than bpython is + # up for debate, but this is the behavior that existed + # before --interpreter was added, so we call IPython + # better + import IPython + if hasattr(IPython, "Shell"): + interpreters["ipython"] = lambda v: \ + IPython.Shell.IPShell(argv=[], user_ns=v).mainloop() + best = "ipython" + elif hasattr(IPython, "embed"): + interpreters["ipython"] = lambda v: IPython.embed(user_ns=v) + best = "ipython" + else: + print("Unknown IPython API version") + except ImportError: + pass + + interpreters['best'] = interpreters[best] + return interpreters + + +class infoCore(cmd.Cmd, Bcfg2.Server.Core.BaseCore): """Main class for bcfg2-info.""" def __init__(self, repo, plgs, passwd, encoding, event_debug, filemonitor='default', setup=None): cmd.Cmd.__init__(self) try: - Bcfg2.Server.Core.Core.__init__(self, repo, plgs, passwd, - encoding, filemonitor=filemonitor, - setup=setup) + Bcfg2.Server.Core.BaseCore.__init__(self, setup=setup) if event_debug: self.fam.debug = True except Bcfg2.Server.Core.CoreInitError: msg = sys.exc_info()[1] - print("Core load failed because %s" % msg) + print("Core load failed: %s" % msg) raise SystemExit(1) self.prompt = '> ' self.cont = True @@ -185,24 +221,21 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): spath = opt[1] elif opt[0] == '-n': interactive = False - sh = InteractiveConsole(locals()) if scriptmode: + sh = InteractiveConsole(locals()) for command in [c.strip() for c in open(spath).readlines()]: if command: sh.push(command) if interactive: - print("Dropping to python interpreter; press ^D to resume") - try: - import IPython - if hasattr(IPython, "Shell"): - shell = IPython.Shell.IPShell(argv=[], user_ns=locals()) - shell.mainloop() - elif hasattr(IPython, "embed"): - IPython.embed(user_ns=locals()) - else: - raise ImportError - except ImportError: - sh.interact() + interpreters = load_interpreters() + if setup['interpreter'] in interpreters: + print("Dropping to %s interpreter; press ^D to resume" % + setup['interpreter']) + interpreters[setup['interpreter']](locals()) + else: + logger.error("Invalid interpreter %s" % setup['interpreter']) + logger.error("Valid interpreters are: %s" % + ", ".join(interpeters.keys())) def do_quit(self, _): """ @@ -295,7 +328,7 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): if len(alist) > 1: clients = getClientList(alist[1:]) else: - clients = list(self.metadata.clients.keys()) + clients = self.metadata.clients for client in clients: self.do_build("%s %s" % (client, os.path.join(destdir, client + ".xml"))) @@ -327,7 +360,7 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): if len(args) > 2: clients = getClientList(args[1:]) else: - clients = list(self.metadata.clients.keys()) + clients = self.metadata.clients if altsrc: args = "--altsrc %s -f %%s %%s %%s" % altsrc else: @@ -362,8 +395,8 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): try: metadata = self.build_metadata(client) self.Bind(entry, metadata) - data = lxml.etree.tostring(entry, encoding="UTF-8", - xml_declaration=True) + data = lxml.etree.tostring(entry, + xml_declaration=False).decode('UTF-8') if outfile: open(outfile, 'w').write(data) else: @@ -373,7 +406,8 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): print("Could not write to %s: %s" % (outfile, err)) print(data) except Exception: - print("Failed to build entry %s for host %s" % (fname, client)) + print("Failed to build entry %s for host %s: %s" % + (fname, client, traceback.format_exc().splitlines()[-1])) raise def do_buildbundle(self, args): @@ -384,8 +418,9 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): metadata = self.build_metadata(client) if bname in self.plugins['Bundler'].entries: bundle = self.plugins['Bundler'].entries[bname] - if isinstance(bundle, - Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile): + if (has_genshi and + isinstance(bundle, + BundleTemplateFile)): stream = bundle.template.generate(metadata=metadata) print(stream.render("xml")) else: @@ -413,10 +448,11 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): def do_clients(self, _): """Print out client info.""" data = [('Client', 'Profile')] - clist = list(self.metadata.clients.keys()) + clist = self.metadata.clients clist.sort() for client in clist: - data.append((client, self.metadata.clients[client])) + imd = self.metadata.get_initial_metadata(client) + data.append((client, imd.profile)) printTabular(data) def do_config(self, _): @@ -466,22 +502,18 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): def do_groups(self, _): """Print out group info.""" - data = [("Groups", "Profile", "Category", "Contains")] + # FIXME: Contains doesn't work. Not sure what it was used for + #data = [("Groups", "Profile", "Category", "Contains")] + data = [("Groups", "Profile", "Category")] grouplist = list(self.metadata.groups.keys()) grouplist.sort() for group in grouplist: - if group in self.metadata.profiles: + if self.metadata.groups[group].is_profile: prof = 'yes' else: prof = 'no' - if group in self.metadata.categories: - cat = self.metadata.categories[group] - else: - cat = '' - gdata = [grp for grp in self.metadata.groups[group][1]] - if group in gdata: - gdata.remove(group) - data.append((group, prof, cat, ','.join(gdata))) + cat = self.metadata.groups[group].category + data.append((group, prof, cat)) printTabular(data) def do_showclient(self, args): @@ -496,21 +528,34 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): except: print("Client %s not defined" % client) continue - print("Hostname:\t%s" % client_meta.hostname) - print("Profile:\t%s" % client_meta.profile) - print("Groups:\t\t%s" % list(client_meta.groups)[0]) - for grp in list(client_meta.groups)[1:]: - print("\t\t%s" % grp) + fmt = "%-10s %s" + print(fmt % ("Hostname:", client_meta.hostname)) + print(fmt % ("Profile:", client_meta.profile)) + + group_fmt = "%-10s %-30s %s" + header = False + for group in list(client_meta.groups): + category = "" + for cat, grp in client_meta.categories.items(): + if grp == group: + category = "Category: %s" % cat + break + if not header: + print(group_fmt % ("Groups:", group, category)) + header = True + else: + print(group_fmt % ("", group, category)) + if client_meta.bundles: - print("Bundles:\t%s" % list(client_meta.bundles)[0]) + print(fmt % ("Bundles:", list(client_meta.bundles)[0])) for bnd in list(client_meta.bundles)[1:]: - print("\t\t%s" % bnd) + print(fmt % ("", bnd)) if client_meta.connectors: print("Connector data") print("=" * 80) for conn in client_meta.connectors: if getattr(client_meta, conn): - print("%s:\t%s" % (conn, getattr(client_meta, conn))) + print(fmt % (conn + ":", getattr(client_meta, conn))) print("=" * 80) def do_mappings(self, args): @@ -568,6 +613,9 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): print("Usage: packageresolve <hostname> <package> [<package>...]") return + if 'Packages' not in self.plugins: + print("Packages plugin not enabled") + return hostname = arglist[0] initial = arglist[1:] metadata = self.build_metadata(hostname) @@ -585,42 +633,28 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): print(" %s" % "\n ".join(unknown)) def do_packagesources(self, args): + if not args: + print("Usage: packagesources <hostname>") + return + if 'Packages' not in self.plugins: + print("Packages plugin not enabled") + return try: metadata = self.build_metadata(args) except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: print("Unable to build metadata for host %s" % args) return collection = self.plugins['Packages']._get_collection(metadata) - for source in collection.sources: - # get_urls() loads url_map as a side-effect - source.get_urls() - for url_map in source.url_map: - for arch in url_map['arches']: - # make sure client is in all the proper arch groups - if arch not in metadata.groups: - continue - reponame = source.get_repo_name(url_map) - print("Name: %s" % reponame) - print(" Type: %s" % source.ptype) - if url_map['url'] != '': - print(" URL: %s" % url_map['url']) - elif url_map['rawurl'] != '': - print(" RAWURL: %s" % url_map['rawurl']) - if source.gpgkeys: - print(" GPG Key(s): %s" % ", ".join(source.gpgkeys)) - else: - print(" GPG Key(s): None") - if len(source.blacklist): - print(" Blacklist: %s" % ", ".join(source.blacklist)) - if len(source.whitelist): - print(" Whitelist: %s" % ", ".join(source.whitelist)) - print("") + print(collection.sourcelist()) def do_profile(self, arg): """.""" if not have_profile: print("Profiling functionality not available.") return + if len(arg) == 0: + print("Usage: profile <command> <args>") + return tracefname = tempfile.mktemp() p = profile.Profile() p.runcall(self.onecmd, arg) @@ -635,34 +669,27 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core): self.do_loop() if __name__ == '__main__': - Bcfg2.Logger.setup_logging('bcfg2-info', to_syslog=False) - optinfo = { - 'configfile': Bcfg2.Options.CFILE, - 'help': Bcfg2.Options.HELP, - 'event debug': Bcfg2.Options.DEBUG, - 'profile': Bcfg2.Options.CORE_PROFILE, - 'encoding': Bcfg2.Options.ENCODING, - # Server options - 'repo': Bcfg2.Options.SERVER_REPOSITORY, - 'plugins': Bcfg2.Options.SERVER_PLUGINS, - 'password': Bcfg2.Options.SERVER_PASSWORD, - 'mconnect': Bcfg2.Options.SERVER_MCONNECT, - 'filemonitor': Bcfg2.Options.SERVER_FILEMONITOR, - 'location': Bcfg2.Options.SERVER_LOCATION, - 'static': Bcfg2.Options.SERVER_STATIC, - 'key': Bcfg2.Options.SERVER_KEY, - 'cert': Bcfg2.Options.SERVER_CERT, - 'ca': Bcfg2.Options.SERVER_CA, - 'password': Bcfg2.Options.SERVER_PASSWORD, - 'protocol': Bcfg2.Options.SERVER_PROTOCOL, - # More options - 'logging': Bcfg2.Options.LOGGING_FILE_PATH - } + optinfo = dict(profile=Bcfg2.Options.CORE_PROFILE, + mconnect=Bcfg2.Options.SERVER_MCONNECT, + interactive=Bcfg2.Options.INTERACTIVE, + interpreter=Bcfg2.Options.INTERPRETER) + optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) setup = Bcfg2.Options.OptionParser(optinfo) - setup.hm = "Usage:\n %s\n%s" % (setup.buildHelpMessage(), - USAGE) + setup.hm = "\n".join([" bcfg2-info [options] [command <command args>]", + "Options:", + setup.buildHelpMessage(), + USAGE]) setup.parse(sys.argv[1:]) + if setup['debug']: + level = logging.DEBUG + elif setup['verbose']: + level = logging.INFO + else: + level = logging.WARNING + Bcfg2.Logger.setup_logging('bcfg2-info', to_syslog=False, + level=level) if setup['args'] and setup['args'][0] == 'help': print(setup.hm) sys.exit(0) @@ -670,14 +697,14 @@ if __name__ == '__main__': prof = profile.Profile() loop = prof.runcall(infoCore, setup['repo'], setup['plugins'], setup['password'], setup['encoding'], - setup['event debug'], setup['filemonitor'], + setup['debug'], setup['filemonitor'], setup) displayTrace(prof) else: if setup['profile']: print("Profiling functionality not available.") loop = infoCore(setup['repo'], setup['plugins'], setup['password'], - setup['encoding'], setup['event debug'], + setup['encoding'], setup['debug'], setup['filemonitor'], setup) loop.Run(setup['args']) diff --git a/src/sbin/bcfg2-lint b/src/sbin/bcfg2-lint index 78b833f02..1038beca8 100755 --- a/src/sbin/bcfg2-lint +++ b/src/sbin/bcfg2-lint @@ -61,51 +61,32 @@ def get_errorhandler(config): def load_server(setup): """ load server """ - core = Bcfg2.Server.Core.Core(setup['repo'], setup['plugins'], - setup['password'], setup['encoding'], - filemonitor=setup['filemonitor'], - setup=setup) - if setup['event debug']: - core.fam.debug = True + core = Bcfg2.Server.Core.BaseCore(setup) core.fam.handle_events_in_interval(4) return core +def load_plugin(module, obj_name=None): + parts = module.split(".") + if obj_name is None: + obj_name = parts[-1] + + mod = __import__(module) + for p in parts[1:]: + mod = getattr(mod, p) + return getattr(mod, obj_name) + if __name__ == '__main__': - optinfo = { - 'configfile': Bcfg2.Options.CFILE, - 'help': Bcfg2.Options.HELP, - 'verbose': Bcfg2.Options.VERBOSE, - 'event debug': Bcfg2.Options.DEBUG, - 'encoding': Bcfg2.Options.ENCODING, - # Server options - 'repo': Bcfg2.Options.SERVER_REPOSITORY, - 'plugins': Bcfg2.Options.SERVER_PLUGINS, - 'mconnect': Bcfg2.Options.SERVER_MCONNECT, - 'filemonitor': Bcfg2.Options.SERVER_FILEMONITOR, - 'location': Bcfg2.Options.SERVER_LOCATION, - 'static': Bcfg2.Options.SERVER_STATIC, - 'key': Bcfg2.Options.SERVER_KEY, - 'cert': Bcfg2.Options.SERVER_CERT, - 'ca': Bcfg2.Options.SERVER_CA, - 'password': Bcfg2.Options.SERVER_PASSWORD, - 'protocol': Bcfg2.Options.SERVER_PROTOCOL, - # More options - 'logging': Bcfg2.Options.LOGGING_FILE_PATH, - 'stdin': Bcfg2.Options.FILES_ON_STDIN, - 'schema': Bcfg2.Options.SCHEMA_PATH, - 'config': Bcfg2.Options.Option('Specify bcfg2-lint configuration file', - '/etc/bcfg2-lint.conf', - cmd='--lint-config', - odesc='<conffile>', - long_arg=True), - 'showerrors': Bcfg2.Options.Option('Show error handling', False, - cmd='--list-errors', - long_arg=True), - } + optinfo = dict(config=Bcfg2.Options.LINT_CONFIG, + showerrors=Bcfg2.Options.LINT_SHOW_ERRORS, + stdin=Bcfg2.Options.LINT_FILES_ON_STDIN, + schema=Bcfg2.Options.SCHEMA_PATH, + plugins=Bcfg2.Options.SERVER_PLUGINS) + optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) setup = Bcfg2.Options.OptionParser(optinfo) setup.parse(sys.argv[1:]) - log_args = dict(to_syslog=False, to_console=logging.WARNING) + log_args = dict(to_syslog=setup['syslog'], to_console=logging.WARNING) if setup['verbose']: log_args['to_console'] = logging.DEBUG Bcfg2.Logger.setup_logging('bcfg2-info', **log_args) @@ -116,36 +97,40 @@ if __name__ == '__main__': # get list of plugins to run if setup['args']: - allplugins = setup['args'] + plugin_list = setup['args'] elif "bcfg2-repo-validate" in sys.argv[0]: - allplugins = 'Duplicates,RequiredAttrs,Validate'.split(',') + plugin_list = 'Duplicates,RequiredAttrs,Validate'.split(',') else: try: - allplugins = config.get('lint', 'plugins').split(',') + plugin_list = config.get('lint', 'plugins').split(',') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - allplugins = Bcfg2.Server.Lint.__all__ + plugin_list = Bcfg2.Server.Lint.__all__ if setup['stdin']: files = [s.strip() for s in sys.stdin.readlines()] else: files = None - # load plugins - serverplugins = {} - serverlessplugins = {} - for plugin_name in allplugins: + allplugins = dict() + for plugin in plugin_list: try: - mod = getattr(__import__("Bcfg2.Server.Lint.%s" % - (plugin_name)).Server.Lint, plugin_name) + allplugins[plugin] = load_plugin("Bcfg2.Server.Lint." + plugin) except ImportError: try: - mod = __import__(plugin_name) - except Exception: - err = sys.exc_info()[1] - logger.error("Failed to load plugin %s: %s" % (plugin_name, - err)) - raise SystemExit(1) - plugin = getattr(mod, plugin_name) + allplugins[plugin] = \ + load_plugin("Bcfg2.Server.Plugins." + plugin, + obj_name=plugin + "Lint") + except (ImportError, AttributeError): + err = sys.exc_info()[1] + logger.error("Failed to load plugin %s: %s" % (plugin + "Lint", + err)) + except AttributeError: + err = sys.exc_info()[1] + logger.error("Failed to load plugin %s: %s" % (obj_name, err)) + + serverplugins = dict() + serverlessplugins = dict() + for plugin_name, plugin in allplugins.items(): if [c for c in inspect.getmro(plugin) if c == Bcfg2.Server.Lint.ServerPlugin]: serverplugins[plugin_name] = plugin diff --git a/src/sbin/bcfg2-ping-sweep b/src/sbin/bcfg2-ping-sweep deleted file mode 100755 index be8994be3..000000000 --- a/src/sbin/bcfg2-ping-sweep +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -#GenerateHostInfo - Joey Hagedorn - hagedorn@mcs.anl.gov - -"""Generates hostinfo.xml at a regular interval.""" - -from os import dup2, execl, fork, uname, wait -import sys -import time -import lxml.etree - -import Bcfg2.Options - -if __name__ == '__main__': - opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY, - 'configfile': Bcfg2.Options.CFILE} - setup = Bcfg2.Options.OptionParser(opts) - setup.parse(sys.argv[1:]) - - cfpath = setup['configfile'] - clientdatapath = "%s/Metadata/clients.xml" % setup['repo'] - - clientElement = lxml.etree.parse(clientdatapath) - hostlist = [client.get('name') - for client in clientElement.findall("Client")] - - pids = {} - null = open('/dev/null', 'w+') - - #use uname to detect OS and use -t for darwin and -w for linux - #/bin/ping on linux /sbin/ping on os x - osname = uname()[0] - - while hostlist or pids: - if hostlist and len(list(pids.keys())) < 15: - host = hostlist.pop() - pid = fork() - if pid == 0: - # in child - dup2(null.fileno(), sys.__stdin__.fileno()) - dup2(null.fileno(), sys.__stdout__.fileno()) - dup2(null.fileno(), sys.__stderr__.fileno()) - if osname == 'Linux': - execl('/bin/ping', 'ping', '-w', '5', '-c', '1', host) - elif osname in ['Darwin', 'FreeBSD']: - execl('/sbin/ping', 'ping', '-t', '5', '-c', '1', host) - elif osname == 'SunOS': - execl('/usr/sbin/ping', 'ping', host, '56', '1') - else: # default - execl('/bin/ping', 'ping', '-w', '5', '-c', '1', host) - else: - pids[pid] = host - else: - try: - (cpid, status) = wait() - except OSError: - continue - chost = pids[cpid] - del pids[cpid] - elm = clientElement.xpath("//Client[@name='%s']" % chost)[0] - if status == 0: - elm.set("pingable", 'Y') - elm.set("pingtime", str(time.time())) - else: - elm.set("pingable", 'N') - - fout = open(clientdatapath, 'w') - fout.write(lxml.etree.tostring(clientElement.getroot(), - encoding='UTF-8', - xml_declaration=True)) - fout.close() diff --git a/src/sbin/bcfg2-reports b/src/sbin/bcfg2-reports index 1f101b9a7..cb553c0ba 100755 --- a/src/sbin/bcfg2-reports +++ b/src/sbin/bcfg2-reports @@ -3,6 +3,9 @@ import os import sys +import datetime +from optparse import OptionParser, OptionGroup, make_option +from Bcfg2.Bcfg2Py3k import ConfigParser try: import Bcfg2.Server.Reports.settings @@ -20,376 +23,277 @@ sys.path.pop() # Set DJANGO_SETTINGS_MODULE appropriately. os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name -from Bcfg2.Server.Reports.reports.models import Client -import getopt -import datetime -import fileinput - -usage = """Usage: bcfg2-reports [option] ... - -Options and arguments (and corresponding environment variables): --a : shows all hosts, including expired hosts --b NAME : single-host mode - shows bad entries from the - current interaction of NAME --c : shows only clean hosts --d : shows only dirty hosts --e NAME : single-host mode - shows extra entries from the - current interaction of NAME --h : shows help and usage info about bcfg2-reports --m NAME : single-host mode - shows modified entries from the - current interaction of NAME --s NAME : single-host mode - shows bad, modified, and extra - entries from the current interaction of NAME --t NAME : single-host mode - shows total number of managed and - good entries from the current interaction of NAME --x NAME : toggles expired/unexpired state of NAME ---badentry=KIND,NAME : shows only hosts whose current interaction has bad - entries in of KIND kind and NAME name; if a single - argument ARG1 is given, then KIND,NAME pairs will be - read from a file of name ARG1 ---modifiedentry=KIND,NAME : shows only hosts whose current interaction has - modified entries in of KIND kind and NAME name; if a - single argument ARG1 is given, then KIND,NAME pairs - will be read from a file of name ARG1 ---extraentry=KIND,NAME : shows only hosts whose current interaction has extra - entries in of KIND kind and NAME name; if a single - argument ARG1 is given, then KIND,NAME pairs will be - read from a file of name ARG1 ---fields=ARG1,ARG2,... : only displays the fields ARG1,ARG2,... - (name,time,state) ---sort=ARG1,ARG2,... : sorts output on ARG1,ARG2,... (name,time,state) ---stale : shows hosts which haven't run in the last 24 hours -""" - -def timecompare(client1, client2): - """Compares two clients by their timestamps.""" - return cmp(client1.current_interaction.timestamp, \ - client2.current_interaction.timestamp) - -def namecompare(client1, client2): - """Compares two clients by their names.""" - return cmp(client1.name, client2.name) - -def statecompare(client1, client2): - """Compares two clients by their states.""" - clean1 = client1.current_interaction.isclean() - clean2 = client2.current_interaction.isclean() - - if clean1 and not clean2: - return -1 - elif clean2 and not clean1: - return 1 - else: - return 0 - -def totalcompare(client1, client2): - """Compares two clients by their total entry counts.""" - return cmp(client2.current_interaction.totalcount, \ - client1.current_interaction.totalcount) - -def goodcompare(client1, client2): - """Compares two clients by their good entry counts.""" - return cmp(client2.current_interaction.goodcount, \ - client1.current_interaction.goodcount) +from Bcfg2.Server.Reports.reports.models import (Client, Entries_interactions, + Entries, TYPE_CHOICES) -def badcompare(client1, client2): - """Compares two clients by their bad entry counts.""" - return cmp(client2.current_interaction.totalcount - \ - client2.current_interaction.goodcount, \ - client1.current_interaction.totalcount - \ - client1.current_interaction.goodcount) +def hosts_by_entry_type(clients, etype, entryspec): + result = [] + for entry in entryspec: + for client in clients: + items = getattr(client.current_interaction, etype)() + for item in items: + if (item.entry.kind == entry[0] and + item.entry.name == entry[1]): + result.append(client) + return result -def crit_compare(criterion, client1, client2): - """Compares two clients by the criteria provided in criterion.""" - for crit in criterion: - comp = 0 - if crit == 'name': - comp = namecompare(client1, client2) - elif crit == 'state': - comp = statecompare(client1, client2) - elif crit == 'time': - comp = timecompare(client1, client2) - elif crit == 'total': - comp = totalcompare(client1, client2) - elif crit == 'good': - comp = goodcompare(client1, client2) - elif crit == 'bad': - comp = badcompare(client1, client2) - - if comp != 0: - return comp - - return 0 - -def print_fields(fields, cli, max_name, entrydict): +def print_fields(fields, client, fmt, extra=None): """ - Prints the fields specified in fields of cli, max_name + Prints the fields specified in fields of client, max_name specifies the column width of the name column. """ - fmt = '' - for field in fields: - if field == 'name': - fmt += ("%%-%ds " % (max_name)) - else: - fmt += "%s " fdata = [] + if extra is None: + extra = dict() for field in fields: if field == 'time': - fdata.append(str(cli.current_interaction.timestamp)) + fdata.append(str(client.current_interaction.timestamp)) elif field == 'state': - if cli.current_interaction.isclean(): + if client.current_interaction.isclean(): fdata.append("clean") else: fdata.append("dirty") elif field == 'total': - fdata.append("%5d" % cli.current_interaction.totalcount) + fdata.append(client.current_interaction.totalcount) elif field == 'good': - fdata.append("%5d" % cli.current_interaction.goodcount) + fdata.append(client.current_interaction.goodcount) + elif field == 'modified': + fdata.append(client.current_interaction.modified_entry_count()) + elif field == 'extra': + fdata.append(client.current_interaction.extra_entry_count()) elif field == 'bad': - fdata.append("%5d" % cli.current_interaction.totalcount \ - - cli.current_interaction.goodcount) + fdata.append((client.current_interaction.badcount())) else: try: - fdata.append(getattr(cli, field)) + fdata.append(getattr(client, field)) except: - fdata.append("N/A") + fdata.append(extra.get(field, "N/A")) - display = fmt % tuple(fdata) - if len(entrydict) > 0: - display += " " - display += str(entrydict[cli]) - print(display) + print(fmt % tuple(fdata)) -def print_entry(item, max_name): - fmt = ("%%-%ds " % (max_name)) - fdata = item.entry.kind + ":" + item.entry.name - display = fmt % (fdata) - print(display) - -fields = "" -sort = "" -badentry = "" -modifiedentry = "" -extraentry = "" -expire = "" -singlehost = "" +def print_entries(interaction, etype): + items = getattr(interaction, etype)() + for item in items: + print("%-70s %s" % (item.entry.kind + ":" + item.entry.name, etype)) -c_list = Client.objects.all() +def main(): + parser = OptionParser(usage="%prog [options] <mode> [arg]") -result = list() -entrydict = dict() + # single host modes + multimodes = [] + singlemodes = [] + multimodes.append(make_option("-b", "--bad", action="store_true", + default=False, + help="Show bad entries from HOST")) + multimodes.append(make_option("-e", "--extra", action="store_true", + default=False, + help="Show extra entries from HOST")) + multimodes.append(make_option("-m", "--modified", action="store_true", + default=False, + help="Show modified entries from HOST")) + multimodes.append(make_option("-s", "--show", action="store_true", + default=False, + help="Equivalent to --bad --extra --modified")) + singlemodes.append(make_option("-t", "--total", action="store_true", + default=False, + help="Show total number of managed and good " + "entries from HOST")) + singlemodes.append(make_option("-x", "--expire", action="store_true", + default=False, + help="Toggle expired/unexpired state of " + "HOST")) + hostmodes = \ + OptionGroup(parser, "Single-Host Modes", + "The following mode flags require a single HOST argument") + hostmodes.add_options(multimodes) + hostmodes.add_options(singlemodes) + parser.add_option_group(hostmodes) -args = sys.argv[1:] -try: - opts, pargs = getopt.getopt(args, 'ab:cde:hm:s:t:x:', - ['stale', - 'sort=', - 'fields=', - 'badentry=', - 'modifiedentry=', - 'extraentry=']) -except getopt.GetoptError: - msg = sys.exc_info()[1] - print(msg) - print(usage) - sys.exit(2) + # all host modes + allhostmodes = OptionGroup(parser, "Host Selection Modes", + "The following mode flags require no arguments") + allhostmodes.add_option("-a", "--all", action="store_true", default=False, + help="Show all hosts, including expired hosts") + allhostmodes.add_option("-c", "--clean", action="store_true", default=False, + help="Show only clean hosts") + allhostmodes.add_option("-d", "--dirty", action="store_true", default=False, + help="Show only dirty hosts") + allhostmodes.add_option("--stale", action="store_true", default=False, + help="Show hosts that haven't run in the last 24 " + "hours") + parser.add_option_group(allhostmodes) + + # entry modes + entrymodes = \ + OptionGroup(parser, "Entry Modes", + "The following mode flags require either any number of " + "TYPE:NAME arguments describing entries, or the --file " + "option") + entrymodes.add_option("--badentry", action="store_true", default=False, + help="Show hosts that have bad entries that match " + "the argument") + entrymodes.add_option("--modifiedentry", action="store_true", default=False, + help="Show hosts that have modified entries that " + "match the argument") + entrymodes.add_option("--extraentry", action="store_true", default=False, + help="Show hosts that have extra entries that match " + "the argument") + entrymodes.add_option("--entrystatus", action="store_true", default=False, + help="Show the status of the named entry on all " + "hosts. Only supports a single entry.") + parser.add_option_group(entrymodes) + + # entry options + entryopts = OptionGroup(parser, "Entry Options", + "Options that can be used with entry modes") + entryopts.add_option("--fields", metavar="FIELD,FIELD,...", + help="Only display the listed fields", + default='name,time,state') + entryopts.add_option("--file", metavar="FILE", + help="Read TYPE:NAME pairs from the specified file " + "instead of the command line") + parser.add_option_group(entryopts) -for option in opts: - if len(option) > 0: - if option[0] == '--fields': - fields = option[1] - if option[0] == '--sort': - sort = option[1] - if option[0] == '--badentry': - badentry = option[1] - if option[0] == '--modifiedentry': - modifiedentry = option[1] - if option[0] == '--extraentry': - extraentry = option[1] - if option[0] == '-x': - expire = option[1] - if option[0] == '-s' or \ - option[0] == '-t' or \ - option[0] == '-b' or \ - option[0] == '-m' or \ - option[0] == '-e': - singlehost = option[1] + options, args = parser.parse_args() -if expire != "": - for c_inst in c_list: - if expire == c_inst.name: - if c_inst.expiration == None: - c_inst.expiration = datetime.datetime.now() + # make sure we've specified exactly one mode + mode_family = None + mode = None + for opt in allhostmodes.option_list + entrymodes.option_list + \ + singlemodes: + if getattr(options, opt.dest): + if mode is not None: + parser.error("Only one mode can be specified; found %s and %s" % + (mode.get_opt_string(), opt.get_opt_string())) + mode = opt + mode_family = parser.get_option_group(opt.get_opt_string()) + + # you can specify more than one of --bad, --extra, --modified, --show, so + # consider single-host options separately + if not mode_family: + for opt in multimodes: + if getattr(options, opt.dest): + mode_family = parser.get_option_group(opt.get_opt_string()) + break + + if not mode_family: + parser.error("You must specify a mode") + + if mode_family == hostmodes: + try: + cname = args.pop() + client = Client.objects.select_related().get(name=cname) + except IndexError: + parser.error("%s require a single HOST argument" % hostmodes.title) + except Client.DoesNotExist: + print("No such host: %s" % cname) + return 2 + + if options.expire: + if client.expiration == None: + client.expiration = datetime.datetime.now() print("Host expired.") else: - c_inst.expiration = None + client.expiration = None print("Host un-expired.") - c_inst.save() + client.save() + elif options.total: + managed = client.current_interaction.totalcount + good = client.current_interaction.goodcount + print("Total managed entries: %d (good: %d)" % (managed, good)) + elif mode_family == hostmodes: + if options.bad or options.show: + print_entries(client.current_interaction, "bad") -elif '-h' in args: - print(usage) -elif singlehost != "": - for c_inst in c_list: - if singlehost == c_inst.name: - if '-t' in args: - managed = c_inst.current_interaction.totalcount - good = c_inst.current_interaction.goodcount - print("Total managed entries: %d (good: %d)" % (managed, good)) - baditems = c_inst.current_interaction.bad() - if len(baditems) > 0 and ('-b' in args or '-s' in args): - print("Bad Entries:") - max_name = -1 - for item in baditems: - if len(item.entry.name) > max_name: - max_name = len(item.entry.name) - for item in baditems: - print_entry(item, max_name) - modifieditems = c_inst.current_interaction.modified() - if len(modifieditems) > 0 and ('-m' in args or '-s' in args): - print "Modified Entries:" - max_name = -1 - for item in modifieditems: - if len(item.entry.name) > max_name: - max_name = len(item.entry.name) - for item in modifieditems: - print_entry(item, max_name) - extraitems = c_inst.current_interaction.extra() - if len(extraitems) > 0 and ('-e' in args or '-s' in args): - print("Extra Entries:") - max_name = -1 - for item in extraitems: - if len(item.entry.name) > max_name: - max_name = len(item.entry.name) - for item in extraitems: - print_entry(item, max_name) - + if options.modified or options.show: + print_entries(client.current_interaction, "modified") -else: - if fields == "": - fields = ['name', 'time', 'state'] + if options.extra or options.show: + print_entries(client.current_interaction, "extra") else: - fields = fields.split(',') - - if sort != "": - sort = sort.split(',') + clients = Client.objects.exclude(current_interaction__isnull=True) + result = list() + edata = dict() + fields = options.fields.split(',') - if badentry != "": - badentry = badentry.split(',') + if mode_family == allhostmodes: + if args: + print("%s do not take any arguments, ignoring" % + allhostmodes.title) - if modifiedentry != "": - modifiedentry = modifiedentry.split(',') + for client in clients: + interaction = client.current_interaction + if (options.all or + (options.stale and interaction.isstale()) or + (options.clean and interaction.isclean()) or + (options.dirty and not interaction.isclean())): + result.append(client) + else: + # entry query modes + if options.file: + try: + entries = [l.strip().split(":") + for l in open(options.file)] + except IOError, err: + print("Cannot read entries from %s: %s" % (options.file, + err)) + return 2 + elif args: + entries = [a.split(":") for a in args] + else: + parser.error("%s require either a list of entries on the " + "command line or the --file options" % + mode_family.title) + + if options.badentry: + result = hosts_by_entry_type(clients, "bad", entries) + elif options.modifiedentry: + result = hosts_by_entry_type(clients, "modified", entries) + elif options.extraentry: + result = hosts_by_entry_type(clients, "extra", entries) + elif options.entrystatus: + if 'state' in fields: + fields.remove('state') + fields.append("entry state") + try: + entry_obj = Entries.objects.get( + kind=entries[0][0], + name=entries[0][1]) + except Entries.DoesNotExist: + print("No entry %s found" % ":".join(entries[0])) + return 2 - if extraentry != "": - extraentry = extraentry.split(',') - - # stale hosts - if '--stale' in args: - for c_inst in c_list: - if c_inst.current_interaction.isstale(): - result.append(c_inst) - # clean hosts - elif '-c' in args: - for c_inst in c_list: - if c_inst.current_interaction.isclean(): - result.append(c_inst) - # dirty hosts - elif '-d' in args: - for c_inst in c_list: - if not c_inst.current_interaction.isclean(): - result.append(c_inst) + for client in clients: + try: + entry = \ + Entries_interactions.objects.select_related().get( + interaction=client.current_interaction, + entry=entry_obj) + edata[client] = \ + {"entry state":dict(TYPE_CHOICES)[entry.type], + "reason":entry.reason} + result.append(client) + except Entries_interactions.DoesNotExist: + pass - elif badentry != "": - if len(badentry) == 1: - fileread = fileinput.input(badentry[0]) - try: - for line in fileread: - badentry = line.strip().split(',') - for c_inst in c_list: - baditems = c_inst.current_interaction.bad() - for item in baditems: - if item.entry.name == badentry[1] and item.entry.kind == badentry[0]: - result.append(c_inst) - if c_inst in entrydict: - entrydict.get(c_inst).append(badentry[1]) - else: - entrydict[c_inst] = [badentry[1]] - break - except IOError: - e = sys.exc_info()[1] - print("Cannot read %s: %s" % (e.filename, e.strerror)) - else: - for c_inst in c_list: - baditems = c_inst.current_interaction.bad() - for item in baditems: - if item.entry.name == badentry[1] and item.entry.kind == badentry[0]: - result.append(c_inst) - break - elif modifiedentry != "": - if len(modifiedentry) == 1: - fileread = fileinput.input(modifiedentry[0]) - try: - for line in fileread: - modifiedentry = line.strip().split(',') - for c_inst in c_list: - modifieditems = c_inst.current_interaction.modified() - for item in modifieditems: - if item.entry.name == modifiedentry[1] and item.entry.kind == modifiedentry[0]: - result.append(c_inst) - if c_inst in entrydict: - entrydict.get(c_inst).append(modifiedentry[1]) - else: - entrydict[c_inst] = [modifiedentry[1]] - break - except IOError: - e = sys.exc_info()[1] - print("Cannot read %s: %s" % (e.filename, e.strerror)) - else: - for c_inst in c_list: - modifieditems = c_inst.current_interaction.modified() - for item in modifieditems: - if item.entry.name == modifiedentry[1] and item.entry.kind == modifiedentry[0]: - result.append(c_inst) - break - elif extraentry != "": - if len(extraentry) == 1: - fileread = fileinput.input(extraentry[0]) - try: - for line in fileread: - extraentry = line.strip().split(',') - for c_inst in c_list: - extraitems = c_inst.current_interaction.extra() - for item in extraitems: - if item.entry.name == extraentry[1] and item.entry.kind == extraentry[0]: - result.append(c_inst) - if c_inst in entrydict: - entrydict.get(c_inst).append(extraentry[1]) - else: - entrydict[c_inst] = [extraentry[1]] - break - except IOError: - e = sys.exc_info()[1] - print("Cannot read %s: %s" % (e.filename, e.strerror)) - else: - for c_inst in c_list: - extraitems = c_inst.current_interaction.extra() - for item in extraitems: - if item.entry.name == extraentry[1] and item.entry.kind == extraentry[0]: - result.append(c_inst) - break - else: - for c_inst in c_list: - result.append(c_inst) - max_name = -1 - if 'name' in fields: - for c_inst in result: - if len(c_inst.name) > max_name: - max_name = len(c_inst.name) + if 'name' not in fields: + fields.insert(0, "name") + max_name = max(len(c.name) for c in result) + ffmt = [] + for field in fields: + if field == "name": + ffmt.append("%%-%ds" % max_name) + elif field == "time": + ffmt.append("%-19s") + else: + ffmt.append("%%-%ds" % len(field)) + fmt = " ".join(ffmt) + print(fmt % tuple(f.title() for f in fields)) + for client in result: + if not client.expiration: + print_fields(fields, client, fmt, + extra=edata.get(client, None)) - if sort != "": - result.sort(lambda x, y: crit_compare(sort, x, y)) - - if fields != "": - for c_inst in result: - if '-a' in args or c_inst.expiration == None: - print_fields(fields, c_inst, max_name, entrydict) +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/sbin/bcfg2-server b/src/sbin/bcfg2-server index 757172464..32c97b63d 100755 --- a/src/sbin/bcfg2-server +++ b/src/sbin/bcfg2-server @@ -2,74 +2,46 @@ """The XML-RPC Bcfg2 server.""" -import logging -import os.path +import os import sys - +import logging import Bcfg2.Logger import Bcfg2.Options -import Bcfg2.Component -import Bcfg2.Server.Plugins.Metadata from Bcfg2.Server.Core import CoreInitError logger = logging.getLogger('bcfg2-server') if __name__ == '__main__': - OPTINFO = { - 'configfile': Bcfg2.Options.CFILE, - 'daemon' : Bcfg2.Options.DAEMON, - 'debug' : Bcfg2.Options.DEBUG, - 'help' : Bcfg2.Options.HELP, - 'verbose' : Bcfg2.Options.VERBOSE, - 'to_file' : Bcfg2.Options.LOGGING_FILE_PATH, - 'repo' : Bcfg2.Options.SERVER_REPOSITORY, - 'plugins' : Bcfg2.Options.SERVER_PLUGINS, - 'password' : Bcfg2.Options.SERVER_PASSWORD, - 'fm' : Bcfg2.Options.SERVER_FILEMONITOR, - 'key' : Bcfg2.Options.SERVER_KEY, - 'cert' : Bcfg2.Options.SERVER_CERT, - 'ca' : Bcfg2.Options.SERVER_CA, - 'listen_all': Bcfg2.Options.SERVER_LISTEN_ALL, - 'location' : Bcfg2.Options.SERVER_LOCATION, - 'passwd' : Bcfg2.Options.SERVER_PASSWORD, - 'static' : Bcfg2.Options.SERVER_STATIC, - 'encoding' : Bcfg2.Options.ENCODING, - 'filelog' : Bcfg2.Options.LOGGING_FILE_PATH, - 'protocol' : Bcfg2.Options.SERVER_PROTOCOL, - } - - setup = Bcfg2.Options.OptionParser(OPTINFO) + optinfo = dict() + optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.DAEMON_COMMON_OPTIONS) + setup = Bcfg2.Options.OptionParser(optinfo) setup.parse(sys.argv[1:]) + # check whether the specified bcfg2.conf exists + if not os.path.exists(setup['configfile']): + print("Could not read %s" % setup['configfile']) + sys.exit(1) + + if setup['backend'] not in ['best', 'cherrypy', 'builtin']: + print("Unknown server backend %s, using 'best'" % setup['backend']) + setup['backend'] = 'best' + if setup['backend'] == 'cherrypy': + try: + from Bcfg2.Server.CherryPyCore import Core + except ImportError: + err = sys.exc_info()[1] + print("Unable to import CherryPy server core: %s" % err) + raise + elif setup['backend'] == 'builtin' or setup['backend'] == 'best': + from Bcfg2.Server.BuiltinCore import Core + try: - # check whether the specified bcfg2.conf exists - if not os.path.exists(setup['configfile']): - print("Could not read %s" % setup['configfile']) - sys.exit(1) - Bcfg2.Component.run_component(Bcfg2.Server.Core.Core, - listen_all=setup['listen_all'], - location=setup['location'], - daemon=setup['daemon'], - pidfile_name=setup['daemon'], - protocol=setup['protocol'], - to_file=setup['to_file'], - cfile=setup['configfile'], - register=False, - cls_kwargs={'repo':setup['repo'], - 'plugins':setup['plugins'], - 'password':setup['password'], - 'encoding':setup['encoding'], - 'ca':setup['ca'], - 'filemonitor':setup['fm'], - 'start_fam_thread':True, - 'setup':setup}, - keyfile=setup['key'], - certfile=setup['cert'], - ca=setup['ca'] - ) + core = Core(setup, start_fam_thread=True) + core.run() except CoreInitError: msg = sys.exc_info()[1] logger.error(msg) - logger.error("exiting") sys.exit(1) except KeyboardInterrupt: sys.exit(1) diff --git a/src/sbin/bcfg2-test b/src/sbin/bcfg2-test index e3cfd27cc..8323eeb22 100755 --- a/src/sbin/bcfg2-test +++ b/src/sbin/bcfg2-test @@ -61,18 +61,11 @@ class ClientTest(TestCase): id = __str__ def main(): - optinfo = { - 'configfile': Bcfg2.Options.CFILE, - 'help': Bcfg2.Options.HELP, - 'encoding': Bcfg2.Options.ENCODING, - 'repo': Bcfg2.Options.SERVER_REPOSITORY, - 'plugins': Bcfg2.Options.SERVER_PLUGINS, - 'password': Bcfg2.Options.SERVER_PASSWORD, - 'verbose': Bcfg2.Options.VERBOSE, - 'noseopts': Bcfg2.Options.TEST_NOSEOPTS, - 'ignore': Bcfg2.Options.TEST_IGNORE, - 'validate': Bcfg2.Options.CFG_VALIDATION, - } + optinfo = dict(noseopts=Bcfg2.Options.TEST_NOSEOPTS, + test_ignore=Bcfg2.Options.TEST_IGNORE, + validate=Bcfg2.Options.CFG_VALIDATION) + optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) + optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) setup = Bcfg2.Options.OptionParser(optinfo) setup.hm = \ "bcfg2-test [options] [client] [client] [...]\nOptions:\n %s" % \ @@ -80,19 +73,12 @@ def main(): setup.parse(sys.argv[1:]) if setup['verbose']: - Bcfg2.Logger.setup_logging("bcfg2-test", to_syslog=False) + Bcfg2.Logger.setup_logging("bcfg2-test", to_syslog=setup['syslog']) - core = Bcfg2.Server.Core.Core( - setup['repo'], - setup['plugins'], - setup['password'], - setup['encoding'], - filemonitor='pseudo', - setup=setup - ) + core = Bcfg2.Server.Core.BaseCore(setup) ignore = dict() - for entry in setup['ignore']: + for entry in setup['test_ignore']: tag, name = entry.split(":") try: ignore[tag].append(name) diff --git a/src/sbin/bcfg2-yum-helper b/src/sbin/bcfg2-yum-helper index 2da7c6336..a0698cc90 100755 --- a/src/sbin/bcfg2-yum-helper +++ b/src/sbin/bcfg2-yum-helper @@ -9,8 +9,7 @@ import os import sys import yum import logging -import Bcfg2.Logger -from optparse import OptionParser, OptionError +from optparse import OptionParser try: import json @@ -37,6 +36,24 @@ def get_logger(verbose=0): LOGGER.addHandler(syslog) return LOGGER +def pkg_to_tuple(package): + """ json doesn't distinguish between tuples and lists, but yum + does, so we convert a package in list format to one in tuple + format """ + if isinstance(package, list): + return tuple(package) + else: + return package + +def pkgtup_to_string(package): + rv = [package[0], "-"] + if package[2]: + rv.extend([package[2], ':']) + rv.extend([package[3], '-', package[4]]) + if package[1]: + rv.extend(['.', package[1]]) + return ''.join(str(e) for e in rv) + class DepSolver(object): def __init__(self, cfgfile, verbose=1): @@ -64,27 +81,28 @@ class DepSolver(object): def is_package(self, package): if isinstance(package, tuple): if package[1] is None and package[2] == (None, None, None): - package = package[0] - else: - return None - - return bool(self.get_package_object(package, silent=True)) + pkgtup = (package[0], None, None, None, None) + elif len(package) == 5: + pkgtup = package + else: + pkgtup = (package, None, None, None, None) + return bool(self.get_package_object(pkgtup, silent=True)) def is_virtual_package(self, package): return bool(self.get_provides(package, silent=True)) - def get_package_object(self, package, silent=False): + def get_package_object(self, pkgtup, silent=False): try: - matches = self.yumbase.pkgSack.returnNewestByName(name=package) + matches = yum.packageSack.packagesNewestByName(self.yumbase.pkgSack.searchPkgTuple(pkgtup)) except yum.Errors.PackageSackError: if not silent: self.logger.warning("Package '%s' not found" % - self.get_package_name(package)) + self.get_package_name(pkgtup)) matches = [] except yum.Errors.RepoError: err = sys.exc_info()[1] self.logger.error("Temporary failure loading metadata for %s: %s" % - (self.get_package_name(package), err)) + (self.get_package_name(pkgtup), err)) matches = [] pkgs = self._filter_arch(matches) @@ -100,7 +118,7 @@ class DepSolver(object): deps = set(pkg.requires) # filter out things the package itself provides deps.difference_update([dep for dep in deps - if pkg.checkPrco('provides', dep)]) + if pkg.checkPrco('provides', dep)]) else: self.logger.error("No package available: %s" % self.get_package_name(package)) @@ -120,7 +138,7 @@ class DepSolver(object): return [] if prov and not all: - prov = self._filter_provides(required, prov) + prov = self._filter_provides(prov) elif not prov and not silent: self.logger.error("No package provides %s" % self.get_package_name(required)) @@ -134,7 +152,7 @@ class DepSolver(object): if self.yumbase.comps.has_group(group): group = self.yumbase.comps.return_group(group) else: - self.logger.warning("%s is not a valid group" % group) + self.logger.error("%s is not a valid group" % group) return [] except yum.Errors.GroupsError: err = sys.exc_info()[1] @@ -155,7 +173,7 @@ class DepSolver(object): self.logger.warning("Unknown group package type '%s'" % ptype) return [] - def _filter_provides(self, package, providers): + def _filter_provides(self, providers): providers = [pkg for pkg in self._filter_arch(providers)] if len(providers) > 1: # go through each provider and make sure it's the newest @@ -174,7 +192,7 @@ class DepSolver(object): # provider of perl(lib). rv = [] for pkg in providers: - found = self.get_package_object(pkg.name) + found = self.get_package_object(pkg.pkgtup) if found == pkg or found.pkgtup == pkg.pkgtup: rv.append(pkg) else: @@ -182,7 +200,7 @@ class DepSolver(object): (pkg, found)) else: rv = providers - return [p.name for p in rv] + return rv def _filter_arch(self, packages): matching = [] @@ -204,115 +222,38 @@ class DepSolver(object): """ get the name of a package or virtual package from the internal representation used by this Collection class """ if isinstance(package, tuple): - return yum.misc.prco_tuple_to_string(package) + if len(package) == 3: + return yum.misc.prco_tuple_to_string(package) + else: + return pkgtup_to_string(package) else: return str(package) def complete(self, packagelist): packages = set() - pkgs = set(packagelist) - requires = set() - satisfied = set() unknown = set() - final_pass = False - - while requires or pkgs: - # infinite loop protection - start_reqs = len(requires) - - while pkgs: - package = pkgs.pop() - if package in packages: - continue - - if not self.is_package(package): - # try this package out as a requirement - self.logger.debug("Adding requirement %s" % package) - requires.add((package, None, (None, None, None))) - continue - - packages.add(package) - reqs = set(self.get_deps(package)).difference(satisfied) - if reqs: - self.logger.debug("Adding requirements for %s: %s" % - (package, - ",".join([self.get_package_name(r) - for r in reqs]))) - requires.update(reqs) - - reqs_satisfied = set() - for req in requires: - if req in satisfied: - reqs_satisfied.add(req) - continue - - if req[1] is None and self.is_package(req[0]): - if req[0] not in packages: - pkgs.add(req[0]) - reqs_satisfied.add(req) - continue - - self.logger.debug("Handling requirement '%s'" % - self.get_package_name(req)) - providers = list(set(self.get_provides(req))) - if len(providers) > 1: - # hopefully one of the providing packages is already - # included - best = [p for p in providers if p in packages] - if best: - providers = best - else: - # pick a provider whose name matches the requirement - best = [p for p in providers if p == req[0]] - if len(best) == 1: - providers = best - elif not final_pass: - self.logger.debug("%s has multiple providers: %s" % - (self.get_package_name(req), - providers)) - self.logger.debug("No provider is obviously the " - "best; deferring") - providers = None - else: - # found no "best" package, but it's the - # final pass, so include them all - self.logger.debug("Found multiple providers for %s," - "including all" % - self.get_package_name(req)) - - if providers: - self.logger.debug("Requirement '%s' satisfied by %s" % - (self.get_package_name(req), - ",".join([self.get_package_name(p) - for p in providers]))) - newpkgs = set(providers).difference(packages) - if newpkgs: - for package in newpkgs: - if self.is_package(package): - pkgs.add(package) - else: - unknown.add(package) - reqs_satisfied.add(req) - elif providers is not None: - # nothing provided this requirement at all - self.logger.debug("Nothing provides %s" % - self.get_package_name(req)) - unknown.add(req) - reqs_satisfied.add(req) - # else, defer - requires.difference_update(reqs_satisfied) - - # infinite loop protection - if len(requires) == start_reqs and len(pkgs) == 0: - final_pass = True - - if final_pass and requires: - unknown.update(requires) - requires = set() - - unknown = [self.get_package_name(p) for p in unknown] + for pkg in packagelist: + if isinstance(pkg, tuple): + pkgtup = pkg + else: + pkgtup = (pkg, None, None, None, None) + po = self.get_package_object(pkgtup) + if not po: + self.logger.debug("Unknown package %s" % + self.get_package_name(pkg)) + unknown.add(pkg) + else: + if self.yumbase.tsInfo.exists(pkgtup=po.pkgtup): + self.logger.debug("%s added to transaction multiple times" % + po) + else: + self.logger.debug("Adding %s to transaction" % po) + self.yumbase.tsInfo.addInstall(po) + self.yumbase.resolveDeps() - return packages, unknown + for txmbr in self.yumbase.tsInfo: + packages.add(txmbr.pkgtup) + return list(packages), list(unknown) def clean_cache(self): for mdtype in ["Headers", "Packages", "Sqlite", "Metadata", @@ -345,29 +286,41 @@ def main(): depsolver = DepSolver(options.config, options.verbose) if cmd == "clean": depsolver.clean_cache() - print json.dumps(True) + print(json.dumps(True)) elif cmd == "complete": data = json.loads(sys.stdin.read()) depsolver.groups = data['groups'] - (packages, unknown) = depsolver.complete(data['packages']) - print json.dumps(dict(packages=list(packages), - unknown=list(unknown))) + (packages, unknown) = depsolver.complete([pkg_to_tuple(p) + for p in data['packages']]) + print(json.dumps(dict(packages=list(packages), + unknown=list(unknown)))) elif cmd == "is_virtual_package": - package = json.loads(sys.stdin.read()) - print json.dumps(bool(depsolver.get_provides(package, silent=True))) + package = pkg_to_tuple(json.loads(sys.stdin.read())) + print(json.dumps(bool(depsolver.get_provides(package, silent=True)))) elif cmd == "get_deps" or cmd == "get_provides": - package = json.loads(sys.stdin.read()) - print json.dumps(list(getattr(depsolver, cmd)(package))) + package = pkg_to_tuple(json.loads(sys.stdin.read())) + print(json.dumps([p.name for p in getattr(depsolver, cmd)(package)])) elif cmd == "get_group": data = json.loads(sys.stdin.read()) if "type" in data: packages = depsolver.get_group(data['group'], ptype=data['type']) else: packages = depsolver.get_group(data['group']) - print json.dumps(list(packages)) + print(json.dumps(list(packages))) + elif cmd == "get_groups": + data = json.loads(sys.stdin.read()) + rv = dict() + for gdata in data: + if "type" in gdata: + packages = depsolver.get_group(gdata['group'], + ptype=gdata['type']) + else: + packages = depsolver.get_group(gdata['group']) + rv[gdata['group']] = list(packages) + print(json.dumps(rv)) elif cmd == "is_package": - package = json.loads(sys.stdin.read()) - print json.dumps(getattr(depsolver, cmd)(package)) + package = pkg_to_tuple(json.loads(sys.stdin.read())) + print(json.dumps(getattr(depsolver, cmd)(package))) if __name__ == '__main__': |