diff options
Diffstat (limited to 'src/lib/Bcfg2')
212 files changed, 29246 insertions, 0 deletions
diff --git a/src/lib/Bcfg2/Bcfg2Py3k.py b/src/lib/Bcfg2/Bcfg2Py3k.py new file mode 100644 index 000000000..6af8b3e5c --- /dev/null +++ b/src/lib/Bcfg2/Bcfg2Py3k.py @@ -0,0 +1,81 @@ +import sys + +try: + from email.Utils import formatdate +except ImportError: + from email.utils import formatdate + +# urllib imports +try: + from urlparse import urljoin, urlparse + from urllib2 import HTTPBasicAuthHandler + from urllib2 import HTTPPasswordMgrWithDefaultRealm + from urllib2 import build_opener + from urllib2 import install_opener + from urllib2 import urlopen + from urllib2 import HTTPError +except ImportError: + from urllib.parse import urljoin, urlparse + from urllib.request import HTTPBasicAuthHandler + from urllib.request import HTTPPasswordMgrWithDefaultRealm + from urllib.request import build_opener + from urllib.request import install_opener + from urllib.request import urlopen + from urllib.error import HTTPError + +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + +try: + import ConfigParser +except ImportError: + import configparser as ConfigParser + +try: + import cPickle +except ImportError: + import pickle as cPickle + +try: + from Queue import Queue, Empty, Full +except ImportError: + from queue import Queue, Empty, Full + +# xmlrpc imports +try: + import xmlrpclib, SimpleXMLRPCServer +except ImportError: + import xmlrpc.client as xmlrpclib + import xmlrpc.server as SimpleXMLRPCServer + +# socketserver import +try: + import SocketServer +except ImportError: + import socketserver as SocketServer + +# httplib imports +try: + import httplib +except ImportError: + import http.client as httplib + +# print to file compatibility +def u_str(string, encoding=None): + if sys.hexversion >= 0x03000000: + if encoding is not None: + return string.encode(encoding) + else: + return string + else: + if encoding is not None: + return unicode(string, encoding) + else: + return unicode(string) + +if sys.hexversion >= 0x03000000: + from io import FileIO as file +else: + file = file diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py new file mode 100644 index 000000000..9ad669ad6 --- /dev/null +++ b/src/lib/Bcfg2/Client/Frame.py @@ -0,0 +1,453 @@ +""" +Frame is the Client Framework that verifies and +installs entries, and generates statistics. +""" + +import logging +import sys +import time +import Bcfg2.Client.Tools + + +def cmpent(ent1, ent2): + """Sort entries.""" + if ent1.tag != ent2.tag: + return cmp(ent1.tag, ent2.tag) + else: + return cmp(ent1.get('name'), ent2.get('name')) + + +def promptFilter(prompt, entries): + """Filter a supplied list based on user input.""" + ret = [] + entries.sort(cmpent) + for entry in entries[:]: + if 'qtext' in entry.attrib: + iprompt = entry.get('qtext') + else: + iprompt = prompt % (entry.tag, entry.get('name')) + try: + # py3k compatibility + try: + ans = raw_input(iprompt.encode(sys.stdout.encoding, 'replace')) + except NameError: + ans = input(iprompt) + if ans in ['y', 'Y']: + ret.append(entry) + except EOFError: + # python 2.4.3 on CentOS doesn't like ^C for some reason + break + except: + print("Error while reading input") + continue + return ret + + +def matches_entry(entryspec, entry): + # both are (tag, name) + if entryspec == entry: + return True + else: + for i in [0, 1]: + if entryspec[i] == entry[i]: + continue + elif entryspec[i] == '*': + continue + elif '*' in entryspec[i]: + starpt = entryspec[i].index('*') + if entry[i].startswith(entryspec[i][:starpt]): + continue + return False + return True + + +def matches_white_list(entry, whitelist): + return True in [matches_entry(we, (entry.tag, entry.get('name'))) + for we in whitelist] + + +def passes_black_list(entry, blacklist): + return True not in [matches_entry(be, (entry.tag, entry.get('name'))) + for be in blacklist] + + +class Frame: + """Frame is the container for all Tool objects and state information.""" + def __init__(self, config, setup, times, drivers, dryrun): + self.config = config + self.times = times + self.dryrun = dryrun + self.times['initialization'] = time.time() + self.setup = setup + self.tools = [] + self.states = {} + self.whitelist = [] + self.blacklist = [] + self.removal = [] + self.logger = logging.getLogger("Bcfg2.Client.Frame") + for driver in drivers[:]: + if driver not in Bcfg2.Client.Tools.drivers and \ + isinstance(driver, str): + self.logger.error("Tool driver %s is not available" % driver) + drivers.remove(driver) + + tclass = {} + for tool in drivers: + if not isinstance(tool, str): + tclass[time.time()] = tool + tool_class = "Bcfg2.Client.Tools.%s" % tool + try: + tclass[tool] = getattr(__import__(tool_class, globals(), + locals(), ['*']), + tool) + except ImportError: + continue + except: + self.logger.error("Tool %s unexpectedly failed to load" % tool, + exc_info=1) + + for tool in list(tclass.values()): + try: + self.tools.append(tool(self.logger, setup, config)) + except Bcfg2.Client.Tools.toolInstantiationError: + continue + except: + self.logger.error("Failed to instantiate tool %s" % \ + (tool), exc_info=1) + + for tool in self.tools[:]: + for conflict in getattr(tool, 'conflicts', []): + [self.tools.remove(item) for item in self.tools \ + if item.name == conflict] + + self.logger.info("Loaded tool drivers:") + self.logger.info([tool.name for tool in self.tools]) + + # find entries not handled by any tools + problems = [entry for struct in config for \ + entry in struct if entry not in self.handled] + + if problems: + self.logger.error("The following entries are not handled by any tool:") + self.logger.error(["%s:%s:%s" % (entry.tag, entry.get('type'), \ + entry.get('name')) for entry in problems]) + self.logger.error("") + entries = [(entry.tag, entry.get('name')) + for struct in config for entry in struct] + pkgs = [(entry.get('name'), entry.get('origin')) + for struct in config for entry in struct if entry.tag == 'Package'] + multi = [] + for entry in entries[:]: + if entries.count(entry) > 1: + multi.append(entry) + entries.remove(entry) + if multi: + self.logger.debug("The following entries are included multiple times:") + self.logger.debug(["%s:%s" % entry for entry in multi]) + self.logger.debug("") + if pkgs: + self.logger.debug("The following packages are specified in bcfg2:") + self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == None]) + self.logger.debug("The following packages are prereqs added by Packages:") + self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) + + def __getattr__(self, name): + if name in ['extra', 'handled', 'modified', '__important__']: + ret = [] + for tool in self.tools: + ret += getattr(tool, name) + return ret + elif name in self.__dict__: + return self.__dict__[name] + raise AttributeError(name) + + def InstallImportant(self): + """Install important entries + + We also process the decision mode stuff here because we want to prevent + non-whitelisted/blacklisted 'important' entries from being installed + prior to determining the decision mode on the client. + """ + # Need to process decision stuff early so that dryrun mode works with it + self.whitelist = [entry for entry in self.states \ + if not self.states[entry]] + if not self.setup['file']: + if self.setup['decision'] == 'whitelist': + dwl = self.setup['decision_list'] + w_to_rem = [e for e in self.whitelist \ + if not matches_white_list(e, dwl)] + if w_to_rem: + self.logger.info("In whitelist mode: suppressing installation of:") + self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem]) + self.whitelist = [x for x in self.whitelist \ + if x not in w_to_rem] + elif self.setup['decision'] == 'blacklist': + b_to_rem = [e for e in self.whitelist \ + if not passes_black_list(e, self.setup['decision_list'])] + if b_to_rem: + self.logger.info("In blacklist mode: suppressing installation of:") + self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem]) + self.whitelist = [x for x in self.whitelist if x not in b_to_rem] + + # take care of important entries first + if not self.dryrun and not self.setup['bundle']: + for cfile in [cfl for cfl in self.config.findall(".//Path") \ + if cfl.get('name') in self.__important__ and \ + cfl.get('type') == 'file']: + if cfile not in self.whitelist: + continue + tl = [t for t in self.tools if t.handlesEntry(cfile) \ + and t.canVerify(cfile)] + if tl: + if self.setup['interactive'] and not \ + promptFilter("Install %s: %s? (y/N):", [cfile]): + self.whitelist.remove(cfile) + continue + try: + self.states[cfile] = tl[0].InstallPath(cfile) + if self.states[cfile]: + tl[0].modified.append(cfile) + except: + self.logger.error("Unexpected tool failure", + exc_info=1) + cfile.set('qtext', '') + if tl[0].VerifyPath(cfile, []): + self.whitelist.remove(cfile) + + def Inventory(self): + """ + Verify all entries, + find extra entries, + and build up workqueues + + """ + # initialize all states + for struct in self.config.getchildren(): + for entry in struct.getchildren(): + self.states[entry] = False + for tool in self.tools: + try: + tool.Inventory(self.states) + except: + self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) + + def Decide(self): + """Set self.whitelist based on user interaction.""" + prompt = "Install %s: %s? (y/N): " + rprompt = "Remove %s: %s? (y/N): " + if self.setup['remove']: + if self.setup['remove'] == 'all': + self.removal = self.extra + elif self.setup['remove'] in ['services', 'Services']: + self.removal = [entry for entry in self.extra + if entry.tag == 'Service'] + elif self.setup['remove'] in ['packages', 'Packages']: + self.removal = [entry for entry in self.extra + if entry.tag == 'Package'] + + candidates = [entry for entry in self.states + if not self.states[entry]] + + if self.dryrun: + if self.whitelist: + self.logger.info("In dryrun mode: suppressing entry installation for:") + self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) + for entry in self.whitelist]) + self.whitelist = [] + if self.removal: + self.logger.info("In dryrun mode: suppressing entry removal for:") + self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) + for entry in self.removal]) + self.removal = [] + return + # Here is where most of the work goes + # first perform bundle filtering + if self.setup['bundle']: + all_bundle_names = [b.get('name') for b in + self.config.findall('./Bundle')] + # warn if non-existent bundle given + for bundle in self.setup['bundle']: + if bundle not in all_bundle_names: + self.logger.info("Warning: Bundle %s not found" % bundle) + bundles = [b for b in self.config.findall('./Bundle') + if b.get('name') in self.setup['bundle']] + self.whitelist = [e for e in self.whitelist + if True in [e in b for b in bundles]] + elif self.setup['indep']: + bundles = [nb for nb in self.config.getchildren() + if nb.tag != 'Bundle'] + else: + bundles = self.config.getchildren() + + # first process prereq actions + for bundle in bundles[:]: + if bundle.tag != 'Bundle': + continue + bmodified = len([item for item in bundle if item in self.whitelist]) + actions = [a for a in bundle.findall('./Action') + if (a.get('timing') != 'post' and + (bmodified or a.get('when') == 'always'))] + # now we process all "always actions" + if self.setup['interactive']: + promptFilter(prompt, actions) + self.DispatchInstallCalls(actions) + + # need to test to fail entries in whitelist + if False in [self.states[a] for a in actions]: + # then display bundles forced off with entries + self.logger.info("Bundle %s failed prerequisite action" % + (bundle.get('name'))) + bundles.remove(bundle) + b_to_remv = [ent for ent in self.whitelist if ent in bundle] + if b_to_remv: + self.logger.info("Not installing entries from Bundle %s" % + (bundle.get('name'))) + self.logger.info(["%s:%s" % (e.tag, e.get('name')) + for e in b_to_remv]) + [self.whitelist.remove(ent) for ent in b_to_remv] + + if self.setup['interactive']: + self.whitelist = promptFilter(prompt, self.whitelist) + self.removal = promptFilter(rprompt, self.removal) + + for entry in candidates: + if entry not in self.whitelist: + self.blacklist.append(entry) + + def DispatchInstallCalls(self, entries): + """Dispatch install calls to underlying tools.""" + for tool in self.tools: + handled = [entry for entry in entries if tool.canInstall(entry)] + if not handled: + continue + try: + tool.Install(handled, self.states) + except: + self.logger.error("%s.Install() call failed:" % tool.name, exc_info=1) + + def Install(self): + """Install all entries.""" + self.DispatchInstallCalls(self.whitelist) + mods = self.modified + mbundles = [struct for struct in self.config.findall('Bundle') if \ + [mod for mod in mods if mod in struct]] + + if self.modified: + # Handle Bundle interdeps + if mbundles: + self.logger.info("The Following Bundles have been modified:") + self.logger.info([mbun.get('name') for mbun in mbundles]) + self.logger.info("") + tbm = [(t, b) for t in self.tools for b in mbundles] + for tool, bundle in tbm: + try: + tool.Inventory(self.states, [bundle]) + except: + self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) + clobbered = [entry for bundle in mbundles for entry in bundle \ + if not self.states[entry] and entry not in self.blacklist] + if clobbered: + self.logger.debug("Found clobbered entries:") + self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) \ + for entry in clobbered]) + if not self.setup['interactive']: + self.DispatchInstallCalls(clobbered) + + for bundle in self.config.findall('.//Bundle'): + if self.setup['bundle'] and \ + bundle.get('name') not in self.setup['bundle']: + # prune out unspecified bundles when running with -b + continue + for tool in self.tools: + try: + if bundle in mbundles: + tool.BundleUpdated(bundle, self.states) + else: + tool.BundleNotUpdated(bundle, self.states) + except: + self.logger.error("%s.BundleNotUpdated() call failed:" % \ + (tool.name), exc_info=1) + + def Remove(self): + """Remove extra entries.""" + for tool in self.tools: + extras = [entry for entry in self.removal if tool.handlesEntry(entry)] + if extras: + try: + tool.Remove(extras) + except: + self.logger.error("%s.Remove() failed" % tool.name, exc_info=1) + + def CondDisplayState(self, phase): + """Conditionally print tracing information.""" + self.logger.info('\nPhase: %s' % phase) + self.logger.info('Correct entries:\t%d' % list(self.states.values()).count(True)) + self.logger.info('Incorrect entries:\t%d' % list(self.states.values()).count(False)) + if phase == 'final' and list(self.states.values()).count(False): + self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for \ + entry in self.states if not self.states[entry]]) + self.logger.info('Total managed entries:\t%d' % len(list(self.states.values()))) + self.logger.info('Unmanaged entries:\t%d' % len(self.extra)) + if phase == 'final' and self.setup['extra']: + self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) \ + for entry in self.extra]) + + self.logger.info("") + + if ((list(self.states.values()).count(False) == 0) and not self.extra): + self.logger.info('All entries correct.') + + def ReInventory(self): + """Recheck everything.""" + if not self.dryrun and self.setup['kevlar']: + self.logger.info("Rechecking system inventory") + self.Inventory() + + def Execute(self): + """Run all methods.""" + self.Inventory() + self.times['inventory'] = time.time() + self.CondDisplayState('initial') + self.InstallImportant() + self.Decide() + self.Install() + self.times['install'] = time.time() + self.Remove() + self.times['remove'] = time.time() + if self.modified: + self.ReInventory() + self.times['reinventory'] = time.time() + self.times['finished'] = time.time() + self.CondDisplayState('final') + + def GenerateStats(self): + """Generate XML summary of execution statistics.""" + feedback = Bcfg2.Client.XML.Element("upload-statistics") + stats = Bcfg2.Client.XML.SubElement(feedback, + 'Statistics', + total=str(len(self.states)), + version='2.0', + revision=self.config.get('revision', '-1')) + good = len([key for key, val in list(self.states.items()) if val]) + stats.set('good', str(good)) + if len([key for key, val in list(self.states.items()) if not val]) == 0: + stats.set('state', 'clean') + else: + stats.set('state', 'dirty') + + # List bad elements of the configuration + for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), \ + ([entry for entry in self.states if not \ + self.states[entry]], "Bad")]: + container = Bcfg2.Client.XML.SubElement(stats, ename) + for item in data: + item.set('qtext', '') + container.append(item) + item.text = None + + timeinfo = Bcfg2.Client.XML.Element("OpStamps") + feedback.append(stats) + for (event, timestamp) in list(self.times.items()): + timeinfo.set(event, str(timestamp)) + stats.append(timeinfo) + return feedback diff --git a/src/lib/Bcfg2/Client/Tools/APK.py b/src/lib/Bcfg2/Client/Tools/APK.py new file mode 100644 index 000000000..aaaf2472f --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/APK.py @@ -0,0 +1,61 @@ +"""This provides Bcfg2 support for Alpine Linux APK packages.""" + +import Bcfg2.Client.Tools + + +class APK(Bcfg2.Client.Tools.PkgTool): + """Support for Apk packages.""" + name = 'APK' + __execs__ = ["/sbin/apk"] + __handles__ = [('Package', 'apk')] + __req__ = {'Package': ['name', 'version']} + pkgtype = 'apk' + pkgtool = ("/sbin/apk add %s", ("%s", ["name"])) + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + self.installed = {} + self.RefreshPackages() + + def RefreshPackages(self): + """Refresh memory hashes of packages.""" + names = self.cmd.run("/sbin/apk info")[1] + nameversions = self.cmd.run("/sbin/apk info -v")[1] + for pkg in zip(names, nameversions): + pkgname = pkg[0] + version = pkg[1][len(pkgname) + 1:] + self.logger.debug(" pkgname: %s\n version: %s" % + (pkgname, version)) + self.installed[pkgname] = version + + def VerifyPackage(self, entry, modlist): + """Verify Package status for entry.""" + if not 'version' in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % + (entry.attrib['name'])) + return False + + if entry.attrib['name'] in self.installed: + if entry.attrib['version'] in ['auto', self.installed[entry.attrib['name']]]: + #if not self.setup['quick'] and \ + # entry.get('verify', 'true') == 'true': + #FIXME: Does APK have any sort of verification mechanism? + return True + else: + self.loggger.info(" pkg %s at version %s, not %s" % + (entry.attrib['name'], + self.installed[entry.attrib['name']], + entry.attrib['version'])) + entry.set('current_version', self.installed[entry.get('name')]) + return False + entry.set('current_exists', 'false') + return False + + def RemovePackages(self, packages): + """Remove extra packages.""" + names = [pkg.get('name') for pkg in packages] + self.logger.info("Removing packages: %s" % " ".join(names)) + self.cmd.run("/sbin/apk del %s" % \ + " ".join(names)) + self.RefreshPackages() + self.extra = self.FindExtraPackages() diff --git a/src/lib/Bcfg2/Client/Tools/APT.py b/src/lib/Bcfg2/Client/Tools/APT.py new file mode 100644 index 000000000..6b839ffbc --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/APT.py @@ -0,0 +1,271 @@ +"""This is the Bcfg2 support for apt-get.""" + +# suppress apt API warnings +import warnings +warnings.filterwarnings("ignore", "apt API not stable yet", + FutureWarning) +import apt.cache +import os + +import Bcfg2.Client.Tools +import Bcfg2.Options + +# Options for tool locations +opts = {'install_path': Bcfg2.Options.CLIENT_APT_TOOLS_INSTALL_PATH, + 'var_path': Bcfg2.Options.CLIENT_APT_TOOLS_VAR_PATH, + 'etc_path': Bcfg2.Options.CLIENT_SYSTEM_ETC_PATH} +setup = Bcfg2.Options.OptionParser(opts) +setup.parse([]) +install_path = setup['install_path'] +var_path = setup['var_path'] +etc_path = setup['etc_path'] +DEBSUMS = '%s/bin/debsums' % install_path +APTGET = '%s/bin/apt-get' % install_path +DPKG = '%s/bin/dpkg' % install_path + +class APT(Bcfg2.Client.Tools.Tool): + """The Debian toolset implements package and service operations and inherits + the rest from Toolset.Toolset. + + """ + name = 'APT' + __execs__ = [DEBSUMS, APTGET, DPKG] + __handles__ = [('Package', 'deb'), ('Path', 'ignore')] + __req__ = {'Package': ['name', 'version'], 'Path': ['type']} + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + path_entries = os.environ['PATH'].split(':') + for reqdir in ['/sbin', '/usr/sbin']: + if reqdir not in path_entries: + os.environ['PATH'] = os.environ['PATH'] + ':' + reqdir + self.pkgcmd = '%s ' % APTGET + \ + '-o DPkg::Options::=--force-overwrite ' + \ + '-o DPkg::Options::=--force-confold ' + \ + '-o DPkg::Options::=--force-confmiss ' + \ + '--reinstall ' + \ + '--force-yes ' + if not self.setup['debug']: + self.pkgcmd += '-q=2 ' + self.pkgcmd += '-y install %s' + self.ignores = [entry.get('name') for struct in config \ + for entry in struct \ + if entry.tag == 'Path' and \ + entry.get('type') == 'ignore'] + self.__important__ = self.__important__ + \ + ["%s/cache/debconf/config.dat" % var_path, + "%s/cache/debconf/templates.dat" % var_path, + '/etc/passwd', '/etc/group', + '%s/apt/apt.conf' % etc_path, + '%s/dpkg/dpkg.cfg' % etc_path] + \ + [entry.get('name') for struct in config for entry in struct \ + if entry.tag == 'Path' and \ + entry.get('name').startswith('%s/apt/sources.list' % etc_path)] + self.nonexistent = [entry.get('name') for struct in config for entry in struct \ + if entry.tag == 'Path' and entry.get('type') == 'nonexistent'] + os.environ["DEBIAN_FRONTEND"] = 'noninteractive' + self.actions = {} + if self.setup['kevlar'] and not self.setup['dryrun']: + self.cmd.run("%s --force-confold --configure --pending" % DPKG) + self.cmd.run("%s clean" % APTGET) + try: + self.pkg_cache = apt.cache.Cache() + except SystemError: + e = sys.exc_info()[1] + self.logger.info("Failed to initialize APT cache: %s" % e) + raise Bcfg2.Client.Tools.toolInstantiationError + self.pkg_cache.update() + self.pkg_cache = apt.cache.Cache() + if 'req_reinstall_pkgs' in dir(self.pkg_cache): + self._newapi = True + else: + self._newapi = False + + def FindExtra(self): + """Find extra packages.""" + packages = [entry.get('name') for entry in self.getSupportedEntries()] + if self._newapi: + extras = [(p.name, p.installed.version) for p in self.pkg_cache + if p.is_installed and p.name not in packages] + else: + extras = [(p.name, p.installedVersion) for p in self.pkg_cache + if p.isInstalled and p.name not in packages] + return [Bcfg2.Client.XML.Element('Package', name=name, \ + type='deb', version=version) \ + for (name, version) in extras] + + def VerifyDebsums(self, entry, modlist): + output = self.cmd.run("%s -as %s" % (DEBSUMS, entry.get('name')))[1] + if len(output) == 1 and "no md5sums for" in output[0]: + self.logger.info("Package %s has no md5sums. Cannot verify" % \ + entry.get('name')) + entry.set('qtext', "Reinstall Package %s-%s to setup md5sums? (y/N) " \ + % (entry.get('name'), entry.get('version'))) + return False + files = [] + for item in output: + if "checksum mismatch" in item: + files.append(item.split()[-1]) + elif "changed file" in item: + files.append(item.split()[3]) + elif "can't open" in item: + if item.split()[5] not in self.nonexistent: + files.append(item.split()[5]) + elif "missing file" in item and \ + item.split()[3] in self.nonexistent: + # these files should not exist + continue + elif "is not installed" in item or "missing file" in item: + self.logger.error("Package %s is not fully installed" \ + % entry.get('name')) + else: + self.logger.error("Got Unsupported pattern %s from debsums" \ + % item) + files.append(item) + files = list(set(files) - set(self.ignores)) + # We check if there is file in the checksum to do + if files: + # if files are found there we try to be sure our modlist is sane + # with erroneous symlinks + modlist = [os.path.realpath(filename) for filename in modlist] + bad = [filename for filename in files if filename not in modlist] + if bad: + self.logger.debug("It is suggested that you either manage these " + "files, revert the changes, or ignore false " + "failures:") + self.logger.info("Package %s failed validation. Bad files are:" % \ + entry.get('name')) + self.logger.info(bad) + entry.set('qtext', + "Reinstall Package %s-%s to fix failing files? (y/N) " % \ + (entry.get('name'), entry.get('version'))) + return False + return True + + def VerifyPackage(self, entry, modlist, checksums=True): + """Verify package for entry.""" + if not 'version' in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % + (entry.attrib['name'])) + return False + pkgname = entry.get('name') + if self.pkg_cache.has_key(pkgname): + if self._newapi: + is_installed = self.pkg_cache[pkgname].is_installed + else: + is_installed = self.pkg_cache[pkgname].isInstalled + if not self.pkg_cache.has_key(pkgname) or not is_installed: + self.logger.info("Package %s not installed" % (entry.get('name'))) + entry.set('current_exists', 'false') + return False + + pkg = self.pkg_cache[pkgname] + if self._newapi: + installed_version = pkg.installed.version + candidate_version = pkg.candidate.version + else: + installed_version = pkg.installedVersion + candidate_version = pkg.candidateVersion + if entry.get('version') == 'auto': + if self._newapi: + is_upgradable = self.pkg_cache._depcache.is_upgradable(pkg._pkg) + else: + is_upgradable = self.pkg_cache._depcache.IsUpgradable(pkg._pkg) + if is_upgradable: + desiredVersion = candidate_version + else: + desiredVersion = installed_version + elif entry.get('version') == 'any': + desiredVersion = installed_version + else: + desiredVersion = entry.get('version') + if desiredVersion != installed_version: + entry.set('current_version', installed_version) + entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % \ + (entry.get('name'), entry.get('current_version'), + desiredVersion)) + return False + else: + # version matches + if not self.setup['quick'] and entry.get('verify', 'true') == 'true' \ + and checksums: + pkgsums = self.VerifyDebsums(entry, modlist) + return pkgsums + return True + + def Remove(self, packages): + """Deal with extra configuration detected.""" + pkgnames = " ".join([pkg.get('name') for pkg in packages]) + self.pkg_cache = apt.cache.Cache() + if len(packages) > 0: + self.logger.info('Removing packages:') + self.logger.info(pkgnames) + for pkg in pkgnames.split(" "): + try: + if self._newapi: + self.pkg_cache[pkg].mark_delete(purge=True) + else: + self.pkg_cache[pkg].markDelete(purge=True) + except: + if self._newapi: + self.pkg_cache[pkg].mark_delete() + else: + self.pkg_cache[pkg].markDelete() + try: + self.pkg_cache.commit() + except SystemExit: + # thank you python-apt 0.6 + pass + self.pkg_cache = apt.cache.Cache() + self.modified += packages + self.extra = self.FindExtra() + + def Install(self, packages, states): + # it looks like you can't install arbitrary versions of software + # out of the pkg cache, we will still need to call apt-get + ipkgs = [] + bad_pkgs = [] + for pkg in packages: + if not self.pkg_cache.has_key(pkg.get('name')): + self.logger.error("APT has no information about package %s" % (pkg.get('name'))) + continue + if pkg.get('version') in ['auto', 'any']: + if self._newapi: + ipkgs.append("%s=%s" % (pkg.get('name'), + self.pkg_cache[pkg.get('name')].candidate.version)) + else: + ipkgs.append("%s=%s" % (pkg.get('name'), + self.pkg_cache[pkg.get('name')].candidateVersion)) + continue + if self._newapi: + avail_vers = [x.ver_str for x in \ + self.pkg_cache[pkg.get('name')]._pkg.version_list] + else: + avail_vers = [x.VerStr for x in \ + self.pkg_cache[pkg.get('name')]._pkg.VersionList] + if pkg.get('version') in avail_vers: + ipkgs.append("%s=%s" % (pkg.get('name'), pkg.get('version'))) + continue + else: + self.logger.error("Package %s: desired version %s not in %s" \ + % (pkg.get('name'), pkg.get('version'), + avail_vers)) + bad_pkgs.append(pkg.get('name')) + if bad_pkgs: + self.logger.error("Cannot find correct versions of packages:") + self.logger.error(bad_pkgs) + if not ipkgs: + return + rc = self.cmd.run(self.pkgcmd % (" ".join(ipkgs)))[0] + if rc: + self.logger.error("APT command failed") + self.pkg_cache = apt.cache.Cache() + self.extra = self.FindExtra() + for package in packages: + states[package] = self.VerifyPackage(package, [], checksums=False) + if states[package]: + self.modified.append(package) + + def VerifyPath(self, entry, _): + """Do nothing here since we only verify Path type=ignore.""" + return True diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py new file mode 100644 index 000000000..dc49347e9 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Action.py @@ -0,0 +1,107 @@ +"""Action driver""" + +import Bcfg2.Client.Tools +from Bcfg2.Client.Frame import matches_white_list, passes_black_list + +""" +<Action timing='pre|post|both' + name='name' + command='cmd text' + when='always|modified' + status='ignore|check'/> +<PostInstall name='foo'/> + => <Action timing='post' + when='modified' + name='n' + command='foo' + status='ignore'/> +""" + + +class Action(Bcfg2.Client.Tools.Tool): + """Implement Actions""" + name = 'Action' + __handles__ = [('PostInstall', None), ('Action', None)] + __req__ = {'PostInstall': ['name'], + 'Action': ['name', 'timing', 'when', 'command', 'status']} + + def _action_allowed(self, action): + if self.setup['decision'] == 'whitelist' and \ + not matches_white_list(action, self.setup['decision_list']): + self.logger.info("In whitelist mode: suppressing Action:" + \ + action.get('name')) + return False + if self.setup['decision'] == 'blacklist' and \ + not passes_black_list(action, self.setup['decision_list']): + self.logger.info("In blacklist mode: suppressing Action:" + \ + action.get('name')) + return False + return True + + def RunAction(self, entry): + """This method handles command execution and status return.""" + if not self.setup['dryrun']: + if self.setup['interactive']: + prompt = ('Run Action %s, %s: (y/N): ' % + (entry.get('name'), entry.get('command'))) + # py3k compatibility + try: + ans = raw_input(prompt) + except NameError: + ans = input(prompt) + if ans not in ['y', 'Y']: + return False + if self.setup['servicemode'] == 'build': + if entry.get('build', 'true') == 'false': + self.logger.debug("Action: Deferring execution of %s due to build mode" % (entry.get('command'))) + return False + self.logger.debug("Running Action %s" % (entry.get('name'))) + rc = self.cmd.run(entry.get('command'))[0] + self.logger.debug("Action: %s got rc %s" % (entry.get('command'), rc)) + entry.set('rc', str(rc)) + if entry.get('status', 'check') == 'ignore': + return True + else: + return rc == 0 + else: + self.logger.debug("In dryrun mode: not running action:\n %s" % + (entry.get('name'))) + return False + + def VerifyAction(self, dummy, _): + """Actions always verify true.""" + return True + + def VerifyPostInstall(self, dummy, _): + """Actions always verify true.""" + return True + + def InstallAction(self, entry): + """Run actions as pre-checks for bundle installation.""" + if entry.get('timing') != 'post': + return self.RunAction(entry) + return True + + def InstallPostInstall(self, entry): + return self.InstallAction(self, entry) + + def BundleUpdated(self, bundle, states): + """Run postinstalls when bundles have been updated.""" + for postinst in bundle.findall("PostInstall"): + if not self._action_allowed(postinst): + continue + self.cmd.run(postinst.get('name')) + for action in bundle.findall("Action"): + if action.get('timing') in ['post', 'both']: + if not self._action_allowed(action): + continue + states[action] = self.RunAction(action) + + def BundleNotUpdated(self, bundle, states): + """Run Actions when bundles have not been updated.""" + for action in bundle.findall("Action"): + if action.get('timing') in ['post', 'both'] and \ + action.get('when') != 'modified': + if not self._action_allowed(action): + continue + states[action] = self.RunAction(action) diff --git a/src/lib/Bcfg2/Client/Tools/Blast.py b/src/lib/Bcfg2/Client/Tools/Blast.py new file mode 100644 index 000000000..5d5e74ab2 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Blast.py @@ -0,0 +1,32 @@ +"""This provides Bcfg2 support for Blastwave.""" + +import tempfile +import Bcfg2.Client.Tools.SYSV + + +class Blast(Bcfg2.Client.Tools.SYSV.SYSV): + """Support for Blastwave packages.""" + pkgtype = 'blast' + pkgtool = ("/opt/csw/bin/pkg-get install %s", ("%s", ["bname"])) + name = 'Blast' + __execs__ = ['/opt/csw/bin/pkg-get', "/usr/bin/pkginfo"] + __handles__ = [('Package', 'blast')] + __ireq__ = {'Package': ['name', 'version', 'bname']} + + def __init__(self, logger, setup, config): + # dont use the sysv constructor + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + noaskfile = tempfile.NamedTemporaryFile() + self.noaskname = noaskfile.name + try: + noaskfile.write(Bcfg2.Client.Tools.SYSV.noask) + except: + pass + + # VerifyPackage comes from Bcfg2.Client.Tools.SYSV + # Install comes from Bcfg2.Client.Tools.PkgTool + # Extra comes from Bcfg2.Client.Tools.Tool + # Remove comes from Bcfg2.Client.Tools.SYSV + def FindExtraPackages(self): + """Pass through to null FindExtra call.""" + return [] diff --git a/src/lib/Bcfg2/Client/Tools/Chkconfig.py b/src/lib/Bcfg2/Client/Tools/Chkconfig.py new file mode 100644 index 000000000..12ea5f132 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Chkconfig.py @@ -0,0 +1,108 @@ +# This is the bcfg2 support for chkconfig + +"""This is chkconfig support.""" + +import os + +import Bcfg2.Client.Tools +import Bcfg2.Client.XML + + +class Chkconfig(Bcfg2.Client.Tools.SvcTool): + """Chkconfig support for Bcfg2.""" + name = 'Chkconfig' + __execs__ = ['/sbin/chkconfig'] + __handles__ = [('Service', 'chkconfig')] + __req__ = {'Service': ['name', 'status']} + os.environ['LANG'] = 'C' + + def get_svc_command(self, service, action): + return "/sbin/service %s %s" % (service.get('name'), action) + + def VerifyService(self, entry, _): + """Verify Service status for entry.""" + try: + cmd = "/sbin/chkconfig --list %s " % (entry.get('name')) + raw = self.cmd.run(cmd)[1] + patterns = ["error reading information", "unknown service"] + srvdata = [line.split() for line in raw for pattern in patterns \ + if pattern not in line][0] + except IndexError: + # Ocurrs when no lines are returned (service not installed) + entry.set('current_status', 'off') + return False + if len(srvdata) == 2: + # This is an xinetd service + if entry.get('status') == srvdata[1]: + return True + else: + entry.set('current_status', srvdata[1]) + return False + + try: + onlevels = [level.split(':')[0] for level in srvdata[1:] \ + if level.split(':')[1] == 'on'] + except IndexError: + onlevels = [] + + if entry.get('status') == 'on': + status = (len(onlevels) > 0) + command = 'start' + else: + status = (len(onlevels) == 0) + command = 'stop' + + if entry.get('mode', 'default') == 'supervised': + # turn on or off the service in supervised mode + pstatus = self.cmd.run('/sbin/service %s status' % \ + entry.get('name'))[0] + needs_modification = ((command == 'start' and pstatus) or \ + (command == 'stop' and not pstatus)) + if (not self.setup.get('dryrun') and + self.setup['servicemode'] != 'disabled' and + needs_modification): + self.cmd.run(self.get_svc_command(entry, command)) + # service was modified, so it failed + pstatus = False + + # chkconfig/init.d service + if entry.get('status') == 'on': + status = status and not pstatus + + if not status: + if entry.get('status') == 'on': + entry.set('current_status', 'off') + else: + entry.set('current_status', 'on') + return status + + def InstallService(self, entry): + """Install Service entry.""" + # don't take any actions for mode='manual' + if entry.get('mode', 'default') == 'manual': + self.logger.info("Service %s mode set to manual. Skipping " + "installation." % (entry.get('name'))) + return False + rcmd = "/sbin/chkconfig %s %s" + self.cmd.run("/sbin/chkconfig --add %s" % (entry.attrib['name'])) + self.logger.info("Installing Service %s" % (entry.get('name'))) + pass1 = True + if entry.get('status') == 'off': + rc = self.cmd.run(rcmd % (entry.get('name'), + entry.get('status')) + \ + " --level 0123456")[0] + pass1 = rc == 0 + rc = self.cmd.run(rcmd % (entry.get('name'), entry.get('status')))[0] + return pass1 and rc == 0 + + def FindExtra(self): + """Locate extra chkconfig Services.""" + allsrv = [line.split()[0] for line in \ + self.cmd.run("/sbin/chkconfig --list 2>/dev/null|grep :on")[1]] + self.logger.debug('Found active services:') + self.logger.debug(allsrv) + specified = [srv.get('name') for srv in self.getSupportedEntries()] + return [Bcfg2.Client.XML.Element('Service', + type='chkconfig', + name=name) \ + for name in allsrv if name not in specified] diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py new file mode 100644 index 000000000..ca6fc439e --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/DebInit.py @@ -0,0 +1,125 @@ +"""Debian Init Support for Bcfg2""" + +import glob +import os +import re +import Bcfg2.Client.Tools + +# Debian squeeze and beyond uses a dependecy based boot sequence +DEBIAN_OLD_STYLE_BOOT_SEQUENCE = ('etch', '4.0', 'lenny') + + +class DebInit(Bcfg2.Client.Tools.SvcTool): + """Debian Service Support for Bcfg2.""" + name = 'DebInit' + __execs__ = ['/usr/sbin/update-rc.d', '/usr/sbin/invoke-rc.d'] + __handles__ = [('Service', 'deb')] + __req__ = {'Service': ['name', 'status']} + svcre = re.compile("/etc/.*/(?P<action>[SK])(?P<sequence>\d+)(?P<name>\S+)") + + # implement entry (Verify|Install) ops + def VerifyService(self, entry, _): + """Verify Service status for entry.""" + + if entry.get('status') == 'ignore': + return True + + rawfiles = glob.glob("/etc/rc*.d/[SK]*%s" % (entry.get('name'))) + files = [] + + try: + deb_version = open('/etc/debian_version', 'r').read().split('/', 1)[0] + except IOError: + deb_version = 'unknown' + + if entry.get('sequence'): + if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or + deb_version.startswith('5') or + os.path.exists('/etc/init.d/.legacy-bootordering')): + start_sequence = int(entry.get('sequence')) + kill_sequence = 100 - start_sequence + else: + start_sequence = None + self.logger.warning("Your debian version boot sequence is " + "dependency based \"sequence\" attribute " + "will be ignored.") + else: + start_sequence = None + + for filename in rawfiles: + match = self.svcre.match(filename) + if not match: + self.logger.error("Failed to match file: %s" % filename) + continue + if match.group('name') == entry.get('name'): + files.append(filename) + if entry.get('status') == 'off': + if files: + entry.set('current_status', 'on') + return False + else: + return True + else: + if files: + if start_sequence: + for filename in files: + match = self.svcre.match(filename) + file_sequence = int(match.group('sequence')) + if match.group('action') == 'S' and file_sequence != start_sequence: + return False + if match.group('action') == 'K' and file_sequence != kill_sequence: + return False + return True + else: + entry.set('current_status', 'off') + return False + + def InstallService(self, entry): + """Install Service for entry.""" + # don't take any actions for mode='manual' + if entry.get('mode', 'default') == 'manual': + self.logger.info("Service %s mode set to manual. Skipping " + "installation." % (entry.get('name'))) + return False + self.logger.info("Installing Service %s" % (entry.get('name'))) + try: + os.stat('/etc/init.d/%s' % entry.get('name')) + except OSError: + self.logger.debug("Init script for service %s does not exist" % entry.get('name')) + return False + + if entry.get('status') == 'off': + self.cmd.run("/usr/sbin/invoke-rc.d %s stop" % (entry.get('name'))) + cmdrc = self.cmd.run("/usr/sbin/update-rc.d -f %s remove" % entry.get('name'))[0] + else: + command = "/usr/sbin/update-rc.d %s defaults" % (entry.get('name')) + if entry.get('sequence'): + cmdrc = self.cmd.run("/usr/sbin/update-rc.d -f %s remove" % entry.get('name'))[0] + if cmdrc != 0: + return False + start_sequence = int(entry.get('sequence')) + kill_sequence = 100 - start_sequence + command = "%s %d %d" % (command, start_sequence, kill_sequence) + cmdrc = self.cmd.run(command)[0] + return cmdrc == 0 + + def FindExtra(self): + """Find Extra Debian Service entries.""" + specified = [entry.get('name') for entry in self.getSupportedEntries()] + extra = [] + for name in [self.svcre.match(fname).group('name') for fname in + glob.glob("/etc/rc[12345].d/S*") \ + if self.svcre.match(fname).group('name') not in specified]: + if name not in extra: + extra.append(name) + return [Bcfg2.Client.XML.Element('Service', name=name, type='deb') for name \ + in extra] + + def Remove(self, _): + """Remove extra service entries.""" + # Extra service removal is nonsensical + # Extra services need to be reflected in the config + return + + def get_svc_command(self, service, action): + return '/usr/sbin/invoke-rc.d %s %s' % (service.get('name'), action) diff --git a/src/lib/Bcfg2/Client/Tools/Encap.py b/src/lib/Bcfg2/Client/Tools/Encap.py new file mode 100644 index 000000000..fa09c3ec7 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Encap.py @@ -0,0 +1,53 @@ +"""Bcfg2 Support for Encap Packages""" + +import glob +import re +import Bcfg2.Client.Tools + +class Encap(Bcfg2.Client.Tools.PkgTool): + """Support for Encap packages.""" + name = 'Encap' + __execs__ = ['/usr/local/bin/epkg'] + __handles__ = [('Package', 'encap')] + __req__ = {'Package': ['version', 'url']} + pkgtype = 'encap' + pkgtool = ("/usr/local/bin/epkg -l -f -q %s", ("%s", ["url"])) + splitter = re.compile('.*/(?P<name>[\w-]+)\-(?P<version>[\w\.+-]+)') + +# If you define self.pkgtool and self.pkgname it will [use] the Pkgtool.Install +# method will do the installation stuff for you + + def RefreshPackages(self): + """Try to find encap packages.""" + self.installed = {} + for pkg in glob.glob("/usr/local/encap/*"): + match = self.splitter.match(pkg) + if match: + self.installed[match.group('name')] = match.group('version') + else: + print("Failed to split name %s" % pkg) + self.logger.debug("Encap.py: RefreshPackages: self.installed.keys() are:") + self.logger.debug("%s" % list(self.installed.keys())) + + def VerifyPackage(self, entry, _): + """Verify Package status for entry.""" + if not entry.get('version'): + self.logger.info("Insufficient information of Package %s; cannot Verify" % entry.get('name')) + return False + cmdrc = self.cmd.run("/usr/local/bin/epkg -q -S -k %s-%s >/dev/null" % + (entry.get('name'), entry.get('version')))[0] + if cmdrc != 0: + self.logger.debug("Package %s version incorrect" % entry.get('name')) + else: + return True + return False + + # Can use the FindExtraPackages method from Bcfg2.Client.Tools.PkgTool + + def RemovePackages(self, packages): + """Deal with extra configuration detected.""" + names = " ".join([pkg.get('name') for pkg in packages]) + self.logger.info("Removing packages: %s" % (names)) + self.cmd.run("/usr/local/bin/epkg -l -q -r %s" % (names)) + self.RefreshPackages() + self.extra = self.FindExtraPackages() diff --git a/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py new file mode 100644 index 000000000..10f0f2e93 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py @@ -0,0 +1,27 @@ +"""FreeBSD Init Support for Bcfg2.""" +__revision__ = '$Rev$' + +# TODO +# - hardcoded path to ports rc.d +# - doesn't know about /etc/rc.d/ + +import os +import Bcfg2.Client.Tools + + +class FreeBSDInit(Bcfg2.Client.Tools.SvcTool): + """FreeBSD service support for Bcfg2.""" + name = 'FreeBSDInit' + __handles__ = [('Service', 'freebsd')] + __req__ = {'Service': ['name', 'status']} + + def __init__(self, logger, cfg, setup): + Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup) + if os.uname()[0] != 'FreeBSD': + raise Bcfg2.Client.Tools.toolInstantiationError + + def VerifyService(self, entry, _): + return True + + def get_svc_command(self, service, action): + return "/usr/local/etc/rc.d/%s %s" % (service.get('name'), action) diff --git a/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py b/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py new file mode 100644 index 000000000..3e6f2b6bb --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py @@ -0,0 +1,46 @@ +"""This is the Bcfg2 tool for the FreeBSD package system.""" + +# TODO +# - actual package installation +# - verification of package files + +import re +import Bcfg2.Client.Tools + + +class FreeBSDPackage(Bcfg2.Client.Tools.PkgTool): + """The FreeBSD toolset implements package operations and inherits + the rest from Toolset.Toolset.""" + name = 'FreeBSDPackage' + __execs__ = ['/usr/sbin/pkg_add', '/usr/sbin/pkg_info'] + __handles__ = [('Package', 'freebsdpkg')] + __req__ = {'Package': ['name', 'version']} + pkgtool = ('/usr/sbin/pkg_add -r %s', ('%s-%s', ['name', 'version'])) + pkgtype = 'freebsdpkg' + + def RefreshPackages(self): + self.installed = {} + packages = self.cmd.run("/usr/sbin/pkg_info -a -E")[1] + pattern = re.compile('(.*)-(\d.*)') + for pkg in packages: + if pattern.match(pkg): + name = pattern.match(pkg).group(1) + version = pattern.match(pkg).group(2) + self.installed[name] = version + + def VerifyPackage(self, entry, modlist): + if not 'version' in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % + (entry.attrib['name'])) + return False + if entry.attrib['name'] in self.installed: + if self.installed[entry.attrib['name']] == entry.attrib['version']: + # TODO: verfification + return True + else: + entry.set('current_version', self.installed[entry.get('name')]) + return False + + self.logger.info("Package %s not installed" % (entry.get('name'))) + entry.set('current_exists', 'false') + return False diff --git a/src/lib/Bcfg2/Client/Tools/IPS.py b/src/lib/Bcfg2/Client/Tools/IPS.py new file mode 100644 index 000000000..e30bbd2a4 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/IPS.py @@ -0,0 +1,60 @@ +"""This is the Bcfg2 support for OpenSolaris packages.""" + +import pkg.client.image as image +import pkg.client.progress as progress + +import Bcfg2.Client.Tools + + +class IPS(Bcfg2.Client.Tools.PkgTool): + """The IPS driver implements OpenSolaris package operations.""" + name = 'IPS' + pkgtype = 'ips' + conflicts = ['SYSV'] + __handles__ = [('Package', 'ips')] + __req__ = {'Package': ['name', 'version']} + pkgtool = ('pkg install --no-refresh %s', ('%s', ['name'])) + + def __init__(self, logger, setup, cfg): + self.installed = {} + self.pending_upgrades = set() + self.image = image.Image() + self.image.find_root('/', False) + self.image.load_config() + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, cfg) + self.cfg = cfg + + def RefreshPackages(self): + self.installed = dict() + self.image.history.operation_name = "list" + self.image.load_catalogs(progress.NullProgressTracker()) + for (pfmri, pinfo) in self.image.inventory([], False): + pname = pfmri.pkg_name + pversion = pfmri.version.get_short_version() + self.installed[pname] = pversion + if pinfo['upgradable']: + self.pending_upgrades.add(pname) + + def VerifyPackage(self, entry, modlist): + """Verify package for entry.""" + pname = entry.get('name') + if not 'version' in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % (pname)) + return False + if pname not in self.installed: + self.logger.debug("IPS: Package %s not installed" % pname) + return False + if entry.get('version') == 'auto': + if pname in self.pending_upgrades: + return False + elif entry.get('version') == 'any': + pass + else: + if entry.get('version') != self.installed[pname]: + self.logger.debug("IPS: Package %s: have %s want %s" \ + % (pname, self.installed[pname], + entry.get('version'))) + return False + + # need to implement pkg chksum validation + return True diff --git a/src/lib/Bcfg2/Client/Tools/MacPorts.py b/src/lib/Bcfg2/Client/Tools/MacPorts.py new file mode 100644 index 000000000..9724fab57 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/MacPorts.py @@ -0,0 +1,66 @@ +"""This provides Bcfg2 support for macports packages.""" + +import Bcfg2.Client.Tools + + +class MacPorts(Bcfg2.Client.Tools.PkgTool): + """macports package support.""" + name = 'MacPorts' + __execs__ = ["/opt/local/bin/port"] + __handles__ = [('Package', 'macport')] + __req__ = {'Package': ['name', 'version']} + pkgtype = 'macport' + pkgtool = ('/opt/local/bin/port install %s', ('%s', ['name'])) + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + self.installed = {} + self.RefreshPackages() + + def RefreshPackages(self): + """Refresh memory hashes of packages.""" + pkgcache = self.cmd.run("/opt/local/bin/port installed")[1] + self.installed = {} + for pkg in pkgcache: + if pkg.startswith("The following ports are currently installed"): + continue + pkgname = pkg.split('@')[0].strip() + version = pkg.split('@')[1].split(' ')[0] + self.logger.info(" pkgname: %s version: %s" % (pkgname, version)) + self.installed[pkgname] = version + + def VerifyPackage(self, entry, modlist): + """Verify Package status for entry.""" + if not 'version' in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % + (entry.attrib['name'])) + return False + + if entry.attrib['name'] in self.installed: + if (self.installed[entry.attrib['name']] == entry.attrib['version'] or + entry.attrib['version'] == 'any'): + #if not self.setup['quick'] and \ + # entry.get('verify', 'true') == 'true': + #FIXME: We should be able to check this once + # http://trac.macports.org/ticket/15709 is implemented + return True + else: + self.logger.info(" %s: Wrong version installed. " + "Want %s, but have %s" % (entry.get("name"), + entry.get("version"), + self.installed[entry.get("name")], + )) + + entry.set('current_version', self.installed[entry.get('name')]) + return False + entry.set('current_exists', 'false') + return False + + def RemovePackages(self, packages): + """Remove extra packages.""" + names = [pkg.get('name') for pkg in packages] + self.logger.info("Removing packages: %s" % " ".join(names)) + self.cmd.run("/opt/local/bin/port uninstall %s" % \ + " ".join(names)) + self.RefreshPackages() + self.extra = self.FindExtraPackages() diff --git a/src/lib/Bcfg2/Client/Tools/POSIX.py b/src/lib/Bcfg2/Client/Tools/POSIX.py new file mode 100644 index 000000000..0d67dbbab --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/POSIX.py @@ -0,0 +1,943 @@ +"""All POSIX Type client support for Bcfg2.""" + +import binascii +from datetime import datetime +import difflib +import errno +import grp +import logging +import os +import pwd +import shutil +import stat +import sys +import time +# py3k compatibility +if sys.hexversion >= 0x03000000: + unicode = str + +import Bcfg2.Client.Tools +import Bcfg2.Options +from Bcfg2.Client import XML + +log = logging.getLogger('POSIX') + +# map between dev_type attribute and stat constants +device_map = {'block': stat.S_IFBLK, + 'char': stat.S_IFCHR, + 'fifo': stat.S_IFIFO} + + +def calcPerms(initial, perms): + """This compares ondisk permissions with specified ones.""" + pdisp = [{1:stat.S_ISVTX, 2:stat.S_ISGID, 4:stat.S_ISUID}, + {1:stat.S_IXUSR, 2:stat.S_IWUSR, 4:stat.S_IRUSR}, + {1:stat.S_IXGRP, 2:stat.S_IWGRP, 4:stat.S_IRGRP}, + {1:stat.S_IXOTH, 2:stat.S_IWOTH, 4:stat.S_IROTH}] + tempperms = initial + if len(perms) == 3: + perms = '0%s' % (perms) + pdigits = [int(perms[digit]) for digit in range(4)] + for index in range(4): + for (num, perm) in list(pdisp[index].items()): + if pdigits[index] & num: + tempperms |= perm + return tempperms + + +def normGid(entry): + """ + This takes a group name or gid and + returns the corresponding gid or False. + """ + try: + try: + return int(entry.get('group')) + except: + return int(grp.getgrnam(entry.get('group'))[2]) + except (OSError, KeyError): + log.error('GID normalization failed for %s. Does group %s exist?' + % (entry.get('name'), entry.get('group'))) + return False + + +def normUid(entry): + """ + This takes a user name or uid and + returns the corresponding uid or False. + """ + try: + try: + return int(entry.get('owner')) + except: + return int(pwd.getpwnam(entry.get('owner'))[2]) + except (OSError, KeyError): + log.error('UID normalization failed for %s. Does owner %s exist?' + % (entry.get('name'), entry.get('owner'))) + return False + + +def isString(strng, encoding): + """ + Returns true if the string contains no ASCII control characters + and can be decoded from the specified encoding. + """ + for char in strng: + if ord(char) < 9 or ord(char) > 13 and ord(char) < 32: + return False + try: + strng.decode(encoding) + return True + except: + return False + + +class POSIX(Bcfg2.Client.Tools.Tool): + """POSIX File support code.""" + name = 'POSIX' + __handles__ = [('Path', 'device'), + ('Path', 'directory'), + ('Path', 'file'), + ('Path', 'hardlink'), + ('Path', 'nonexistent'), + ('Path', 'permissions'), + ('Path', 'symlink')] + __req__ = {'Path': ['name', 'type']} + + # grab paranoid options from /etc/bcfg2.conf + opts = {'ppath': Bcfg2.Options.PARANOID_PATH, + 'max_copies': Bcfg2.Options.PARANOID_MAX_COPIES} + setup = Bcfg2.Options.OptionParser(opts) + setup.parse([]) + ppath = setup['ppath'] + max_copies = setup['max_copies'] + + def canInstall(self, entry): + """Check if entry is complete for installation.""" + if Bcfg2.Client.Tools.Tool.canInstall(self, entry): + if (entry.tag, + entry.get('type'), + entry.text, + entry.get('empty', 'false')) == ('Path', + 'file', + None, + 'false'): + return False + return True + else: + return False + + def gatherCurrentData(self, entry): + if entry.tag == 'Path' and entry.get('type') == 'file': + try: + ondisk = os.stat(entry.get('name')) + except OSError: + entry.set('current_exists', 'false') + self.logger.debug("%s %s does not exist" % + (entry.tag, entry.get('name'))) + return False + try: + entry.set('current_owner', str(ondisk[stat.ST_UID])) + entry.set('current_group', str(ondisk[stat.ST_GID])) + except (OSError, KeyError): + pass + entry.set('perms', str(oct(ondisk[stat.ST_MODE])[-4:])) + + def Verifydevice(self, entry, _): + """Verify device entry.""" + if entry.get('dev_type') == None or \ + entry.get('owner') == None or \ + entry.get('group') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % (entry.get('name'))) + return False + if entry.get('dev_type') in ['block', 'char']: + # check if major/minor are properly specified + if entry.get('major') == None or \ + entry.get('minor') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % (entry.get('name'))) + return False + try: + # check for file existence + filestat = os.stat(entry.get('name')) + except OSError: + entry.set('current_exists', 'false') + self.logger.debug("%s %s does not exist" % + (entry.tag, entry.get('name'))) + return False + + try: + # attempt to verify device properties as specified in config + dev_type = entry.get('dev_type') + mode = calcPerms(device_map[dev_type], + entry.get('mode', '0600')) + owner = normUid(entry) + group = normGid(entry) + if dev_type in ['block', 'char']: + # check for incompletely specified entries + if entry.get('major') == None or \ + entry.get('minor') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % (entry.get('name'))) + return False + major = int(entry.get('major')) + minor = int(entry.get('minor')) + if major == os.major(filestat.st_rdev) and \ + minor == os.minor(filestat.st_rdev) and \ + mode == filestat.st_mode and \ + owner == filestat.st_uid and \ + group == filestat.st_gid: + return True + else: + return False + elif dev_type == 'fifo' and \ + mode == filestat.st_mode and \ + owner == filestat.st_uid and \ + group == filestat.st_gid: + return True + else: + self.logger.info('Device properties for %s incorrect' % \ + entry.get('name')) + return False + except OSError: + self.logger.debug("%s %s failed to verify" % + (entry.tag, entry.get('name'))) + return False + + def Installdevice(self, entry): + """Install device entries.""" + try: + # check for existing paths and remove them + os.lstat(entry.get('name')) + try: + os.unlink(entry.get('name')) + exists = False + except OSError: + self.logger.info('Failed to unlink %s' % \ + entry.get('name')) + return False + except OSError: + exists = False + + if not exists: + try: + dev_type = entry.get('dev_type') + mode = calcPerms(device_map[dev_type], + entry.get('mode', '0600')) + if dev_type in ['block', 'char']: + # check if major/minor are properly specified + if entry.get('major') == None or \ + entry.get('minor') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % (entry.get('name'))) + return False + major = int(entry.get('major')) + minor = int(entry.get('minor')) + device = os.makedev(major, minor) + os.mknod(entry.get('name'), mode, device) + else: + os.mknod(entry.get('name'), mode) + """ + Python uses the OS mknod(2) implementation which modifies the + mode based on the umask of the running process. Therefore, the + following chmod(2) call is needed to make sure the permissions + are set as specified by the user. + """ + os.chmod(entry.get('name'), mode) + os.chown(entry.get('name'), normUid(entry), normGid(entry)) + return True + except KeyError: + self.logger.error('Failed to install %s' % entry.get('name')) + except OSError: + self.logger.error('Failed to install %s' % entry.get('name')) + return False + + def Verifydirectory(self, entry, modlist): + """Verify Path type='directory' entry.""" + if entry.get('perms') == None or \ + entry.get('owner') == None or \ + entry.get('group') == None: + self.logger.error("POSIX: Entry %s not completely specified. " + "Try running bcfg2-lint." % (entry.get('name'))) + return False + while len(entry.get('perms', '')) < 4: + entry.set('perms', '0' + entry.get('perms', '')) + try: + ondisk = os.stat(entry.get('name')) + except OSError: + entry.set('current_exists', 'false') + self.logger.info("POSIX: %s %s does not exist" % + (entry.tag, entry.get('name'))) + return False + try: + owner = str(ondisk[stat.ST_UID]) + group = str(ondisk[stat.ST_GID]) + except (OSError, KeyError): + self.logger.info("POSIX: User/Group resolution failed " + "for path %s" % entry.get('name')) + owner = 'root' + group = '0' + finfo = os.stat(entry.get('name')) + perms = oct(finfo[stat.ST_MODE])[-4:] + if entry.get('mtime', '-1') != '-1': + mtime = str(finfo[stat.ST_MTIME]) + else: + mtime = '-1' + pTrue = ((owner == str(normUid(entry))) and + (group == str(normGid(entry))) and + (perms == entry.get('perms')) and + (mtime == entry.get('mtime', '-1'))) + + pruneTrue = True + ex_ents = [] + if entry.get('prune', 'false') == 'true' \ + and (entry.tag == 'Path' and entry.get('type') == 'directory'): + # check for any extra entries when prune='true' attribute is set + try: + entries = ['/'.join([entry.get('name'), ent]) \ + for ent in os.listdir(entry.get('name'))] + ex_ents = [e for e in entries if e not in modlist] + if ex_ents: + pruneTrue = False + self.logger.info("POSIX: Directory %s contains " + "extra entries:" % entry.get('name')) + self.logger.info(ex_ents) + nqtext = entry.get('qtext', '') + '\n' + nqtext += "Directory %s contains extra entries: " % \ + entry.get('name') + nqtext += ":".join(ex_ents) + entry.set('qtest', nqtext) + [entry.append(XML.Element('Prune', path=x)) \ + for x in ex_ents] + except OSError: + ex_ents = [] + pruneTrue = True + + if not pTrue: + if owner != str(normUid(entry)): + entry.set('current_owner', owner) + self.logger.debug("%s %s ownership wrong" % \ + (entry.tag, entry.get('name'))) + nqtext = entry.get('qtext', '') + '\n' + nqtext += "%s owner wrong. is %s should be %s" % \ + (entry.get('name'), owner, entry.get('owner')) + entry.set('qtext', nqtext) + if group != str(normGid(entry)): + entry.set('current_group', group) + self.logger.debug("%s %s group wrong" % \ + (entry.tag, entry.get('name'))) + nqtext = entry.get('qtext', '') + '\n' + nqtext += "%s group is %s should be %s" % \ + (entry.get('name'), group, entry.get('group')) + entry.set('qtext', nqtext) + if perms != entry.get('perms'): + entry.set('current_perms', perms) + self.logger.debug("%s %s permissions are %s should be %s" % + (entry.tag, + entry.get('name'), + perms, + entry.get('perms'))) + nqtext = entry.get('qtext', '') + '\n' + nqtext += "%s %s perms are %s should be %s" % \ + (entry.tag, + entry.get('name'), + perms, + entry.get('perms')) + entry.set('qtext', nqtext) + if mtime != entry.get('mtime', '-1'): + entry.set('current_mtime', mtime) + self.logger.debug("%s %s mtime is %s should be %s" \ + % (entry.tag, entry.get('name'), mtime, + entry.get('mtime'))) + nqtext = entry.get('qtext', '') + '\n' + nqtext += "%s mtime is %s should be %s" % \ + (entry.get('name'), mtime, entry.get('mtime')) + entry.set('qtext', nqtext) + if entry.get('type') != 'file': + nnqtext = entry.get('qtext') + nnqtext += '\nInstall %s %s: (y/N) ' % (entry.get('type'), + entry.get('name')) + entry.set('qtext', nnqtext) + return pTrue and pruneTrue + + def Installdirectory(self, entry): + """Install Path type='directory' entry.""" + if entry.get('perms') == None or \ + entry.get('owner') == None or \ + entry.get('group') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % \ + (entry.get('name'))) + return False + self.logger.info("Installing directory %s" % (entry.get('name'))) + try: + fmode = os.lstat(entry.get('name')) + if not stat.S_ISDIR(fmode[stat.ST_MODE]): + self.logger.debug("Found a non-directory entry at %s" % \ + (entry.get('name'))) + try: + os.unlink(entry.get('name')) + exists = False + except OSError: + self.logger.info("Failed to unlink %s" % \ + (entry.get('name'))) + return False + else: + self.logger.debug("Found a pre-existing directory at %s" % \ + (entry.get('name'))) + exists = True + except OSError: + # stat failed + exists = False + + if not exists: + parent = "/".join(entry.get('name').split('/')[:-1]) + if parent: + try: + os.stat(parent) + except: + self.logger.debug('Creating parent path for directory %s' % (entry.get('name'))) + for idx in range(len(parent.split('/')[:-1])): + current = '/'+'/'.join(parent.split('/')[1:2+idx]) + try: + sloc = os.stat(current) + except OSError: + try: + os.mkdir(current) + continue + except OSError: + return False + if not stat.S_ISDIR(sloc[stat.ST_MODE]): + try: + os.unlink(current) + os.mkdir(current) + except OSError: + return False + + try: + os.mkdir(entry.get('name')) + except OSError: + self.logger.error('Failed to create directory %s' % \ + (entry.get('name'))) + return False + if entry.get('prune', 'false') == 'true' and entry.get("qtest"): + for pent in entry.findall('Prune'): + pname = pent.get('path') + ulfailed = False + if os.path.isdir(pname): + self.logger.info("Not removing extra directory %s, " + "please check and remove manually" % pname) + continue + try: + self.logger.debug("Unlinking file %s" % pname) + os.unlink(pname) + except OSError: + self.logger.error("Failed to unlink path %s" % pname) + ulfailed = True + if ulfailed: + return False + return self.Installpermissions(entry) + + def Verifyfile(self, entry, _): + """Verify Path type='file' entry.""" + # permissions check + content check + permissionStatus = self.Verifydirectory(entry, _) + tbin = False + if entry.text == None and entry.get('empty', 'false') == 'false': + self.logger.error("Cannot verify incomplete Path type='%s' %s" % + (entry.get('type'), entry.get('name'))) + return False + if entry.get('encoding', 'ascii') == 'base64': + tempdata = binascii.a2b_base64(entry.text) + tbin = True + elif entry.get('empty', 'false') == 'true': + tempdata = '' + else: + tempdata = entry.text + if type(tempdata) == unicode: + try: + tempdata = tempdata.encode(self.setup['encoding']) + except UnicodeEncodeError: + e = sys.exc_info()[1] + self.logger.error("Error encoding file %s:\n %s" % \ + (entry.get('name'), e)) + + different = False + content = None + if not os.path.exists(entry.get("name")): + # first, see if the target file exists at all; if not, + # they're clearly different + different = True + content = "" + else: + # next, see if the size of the target file is different + # from the size of the desired content + try: + estat = os.stat(entry.get('name')) + except OSError: + err = sys.exc_info()[1] + self.logger.error("Failed to stat %s: %s" % + (err.filename, err)) + return False + if len(tempdata) != estat[stat.ST_SIZE]: + different = True + else: + # finally, read in the target file and compare them + # directly. comparison could be done with a checksum, + # which might be faster for big binary files, but + # slower for everything else + try: + content = open(entry.get('name')).read() + except IOError: + err = sys.exc_info()[1] + self.logger.error("Failed to read %s: %s" % + (err.filename, err)) + return False + different = content != tempdata + + if different: + if self.setup['interactive']: + prompt = [entry.get('qtext', '')] + if not tbin and content is None: + # it's possible that we figured out the files are + # different without reading in the local file. if + # the supplied version of the file is not binary, + # we now have to read in the local file to figure + # out if _it_ is binary, and either include that + # fact or the diff in our prompts for -I + try: + content = open(entry.get('name')).read() + except IOError: + err = sys.exc_info()[1] + self.logger.error("Failed to read %s: %s" % + (err.filename, err)) + return False + if tbin or not isString(content, self.setup['encoding']): + # don't compute diffs if the file is binary + prompt.append('Binary file, no printable diff') + else: + diff = self._diff(content, tempdata, + difflib.unified_diff, + filename=entry.get("name")) + if diff: + udiff = '\n'.join(diff) + try: + prompt.append(udiff.decode(self.setup['encoding'])) + except UnicodeDecodeError: + prompt.append("Binary file, no printable diff") + else: + prompt.append("Diff took too long to compute, no " + "printable diff") + prompt.append("Install %s %s: (y/N): " % (entry.tag, + entry.get('name'))) + entry.set("qtext", "\n".join(prompt)) + + if entry.get('sensitive', 'false').lower() != 'true': + if content is None: + # it's possible that we figured out the files are + # different without reading in the local file. we + # now have to read in the local file to figure out + # if _it_ is binary, and either include the whole + # file or the diff for reports + try: + content = open(entry.get('name')).read() + except IOError: + err = sys.exc_info()[1] + self.logger.error("Failed to read %s: %s" % + (err.filename, err)) + return False + + if tbin or not isString(content, self.setup['encoding']): + # don't compute diffs if the file is binary + entry.set('current_bfile', binascii.b2a_base64(content)) + else: + diff = self._diff(content, tempdata, difflib.ndiff, + filename=entry.get("name")) + if diff: + entry.set("current_bdiff", + binascii.b2a_base64("\n".join(diff))) + elif not tbin and isString(content, self.setup['encoding']): + entry.set('current_bfile', binascii.b2a_base64(content)) + elif permissionStatus == False and self.setup['interactive']: + prompt = [entry.get('qtext', '')] + prompt.append("Install %s %s: (y/N): " % (entry.tag, + entry.get('name'))) + entry.set("qtext", "\n".join(prompt)) + + + return permissionStatus and not different + + def Installfile(self, entry): + """Install Path type='file' entry.""" + self.logger.info("Installing file %s" % (entry.get('name'))) + + parent = "/".join(entry.get('name').split('/')[:-1]) + if parent: + try: + os.stat(parent) + except: + self.logger.debug('Creating parent path for config file %s' % \ + (entry.get('name'))) + current = '/' + for next in parent.split('/')[1:]: + current += next + '/' + try: + sloc = os.stat(current) + try: + if not stat.S_ISDIR(sloc[stat.ST_MODE]): + self.logger.debug('%s is not a directory; recreating' \ + % (current)) + os.unlink(current) + os.mkdir(current) + except OSError: + return False + except OSError: + try: + self.logger.debug("Creating non-existent path %s" % current) + os.mkdir(current) + except OSError: + return False + + # If we get here, then the parent directory should exist + if (entry.get("paranoid", False) in ['true', 'True']) and \ + self.setup.get("paranoid", False) and not \ + (entry.get('current_exists', 'true') == 'false'): + bkupnam = entry.get('name').replace('/', '_') + # current list of backups for this file + try: + bkuplist = [f for f in os.listdir(self.ppath) if + f.startswith(bkupnam)] + except OSError: + e = sys.exc_info()[1] + self.logger.error("Failed to create backup list in %s: %s" % + (self.ppath, e.strerror)) + return False + bkuplist.sort() + while len(bkuplist) >= int(self.max_copies): + # remove the oldest backup available + oldest = bkuplist.pop(0) + self.logger.info("Removing %s" % oldest) + try: + os.remove("%s/%s" % (self.ppath, oldest)) + except: + self.logger.error("Failed to remove %s/%s" % \ + (self.ppath, oldest)) + return False + try: + # backup existing file + shutil.copy(entry.get('name'), + "%s/%s_%s" % (self.ppath, bkupnam, + datetime.isoformat(datetime.now()))) + self.logger.info("Backup of %s saved to %s" % + (entry.get('name'), self.ppath)) + except IOError: + e = sys.exc_info()[1] + self.logger.error("Failed to create backup file for %s" % \ + (entry.get('name'))) + self.logger.error(e) + return False + try: + newfile = open("%s.new"%(entry.get('name')), 'w') + if entry.get('encoding', 'ascii') == 'base64': + filedata = binascii.a2b_base64(entry.text) + elif entry.get('empty', 'false') == 'true': + filedata = '' + else: + if type(entry.text) == unicode: + filedata = entry.text.encode(self.setup['encoding']) + else: + filedata = entry.text + newfile.write(filedata) + newfile.close() + try: + os.chown(newfile.name, normUid(entry), normGid(entry)) + except KeyError: + self.logger.error("Failed to chown %s to %s:%s" % + (newfile.name, entry.get('owner'), + entry.get('group'))) + os.chown(newfile.name, 0, 0) + except OSError: + err = sys.exc_info()[1] + self.logger.error("Could not chown %s: %s" % (newfile.name, + err)) + os.chmod(newfile.name, calcPerms(stat.S_IFREG, entry.get('perms'))) + os.rename(newfile.name, entry.get('name')) + if entry.get('mtime', '-1') != '-1': + try: + os.utime(entry.get('name'), (int(entry.get('mtime')), + int(entry.get('mtime')))) + except: + self.logger.error("File %s mtime fix failed" \ + % (entry.get('name'))) + return False + return True + except (OSError, IOError): + err = sys.exc_info()[1] + if err.errno == errno.EACCES: + self.logger.info("Failed to open %s for writing" % (entry.get('name'))) + else: + print(err) + return False + + def Verifyhardlink(self, entry, _): + """Verify HardLink entry.""" + if entry.get('to') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % \ + (entry.get('name'))) + return False + try: + if os.path.samefile(entry.get('name'), entry.get('to')): + return True + self.logger.debug("Hardlink %s is incorrect" % \ + entry.get('name')) + entry.set('qtext', "Link %s to %s? [y/N] " % \ + (entry.get('name'), + entry.get('to'))) + return False + except OSError: + entry.set('current_exists', 'false') + entry.set('qtext', "Link %s to %s? [y/N] " % \ + (entry.get('name'), + entry.get('to'))) + return False + + def Installhardlink(self, entry): + """Install HardLink entry.""" + if entry.get('to') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % \ + (entry.get('name'))) + return False + self.logger.info("Installing Hardlink %s" % (entry.get('name'))) + if os.path.lexists(entry.get('name')): + try: + fmode = os.lstat(entry.get('name'))[stat.ST_MODE] + if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode): + self.logger.debug("Non-directory entry already exists at " + "%s. Unlinking entry." % (entry.get('name'))) + os.unlink(entry.get('name')) + elif stat.S_ISDIR(fmode): + self.logger.debug("Directory already exists at %s" % \ + (entry.get('name'))) + self.cmd.run("mv %s/ %s.bak" % \ + (entry.get('name'), + entry.get('name'))) + else: + os.unlink(entry.get('name')) + except OSError: + self.logger.info("Hardlink %s cleanup failed" % \ + (entry.get('name'))) + try: + os.link(entry.get('to'), entry.get('name')) + return True + except OSError: + return False + + def Verifynonexistent(self, entry, _): + """Verify nonexistent entry.""" + # return true if path does _not_ exist + return not os.path.lexists(entry.get('name')) + + def Installnonexistent(self, entry): + '''Remove nonexistent entries''' + ename = entry.get('name') + if entry.get('recursive') in ['True', 'true']: + # ensure that configuration spec is consistent first + if [e for e in self.buildModlist() \ + if e.startswith(ename) and e != ename]: + self.logger.error('Not installing %s. One or more files ' + 'in this directory are specified in ' + 'your configuration.' % ename) + return False + try: + shutil.rmtree(ename) + except OSError: + e = sys.exc_info()[1] + self.logger.error('Failed to remove %s: %s' % (ename, + e.strerror)) + else: + if os.path.isdir(ename): + try: + os.rmdir(ename) + return True + except OSError: + e = sys.exc_info()[1] + self.logger.error('Failed to remove %s: %s' % (ename, + e.strerror)) + return False + try: + os.remove(ename) + return True + except OSError: + e = sys.exc_info()[1] + self.logger.error('Failed to remove %s: %s' % (ename, + e.strerror)) + return False + + def Verifypermissions(self, entry, _): + """Verify Path type='permissions' entry""" + if entry.get('perms') == None or \ + entry.get('owner') == None or \ + entry.get('group') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % (entry.get('name'))) + return False + if entry.get('recursive') in ['True', 'true']: + # verify ownership information recursively + owner = normUid(entry) + group = normGid(entry) + + for root, dirs, files in os.walk(entry.get('name')): + for p in dirs + files: + path = os.path.join(root, p) + pstat = os.stat(path) + if owner != pstat.st_uid: + # owner mismatch for path + entry.set('current_owner', str(pstat.st_uid)) + self.logger.debug("%s %s ownership wrong" % \ + (entry.tag, path)) + nqtext = entry.get('qtext', '') + '\n' + nqtext += ("Owner for path %s is incorrect. " + "Current owner is %s but should be %s\n" % \ + (path, pstat.st_uid, entry.get('owner'))) + nqtext += ("\nInstall %s %s: (y/N): " % + (entry.tag, entry.get('name'))) + entry.set('qtext', nqtext) + return False + if group != pstat.st_gid: + # group mismatch for path + entry.set('current_group', str(pstat.st_gid)) + self.logger.debug("%s %s group wrong" % \ + (entry.tag, path)) + nqtext = entry.get('qtext', '') + '\n' + nqtext += ("Group for path %s is incorrect. " + "Current group is %s but should be %s\n" % \ + (path, pstat.st_gid, entry.get('group'))) + nqtext += ("\nInstall %s %s: (y/N): " % + (entry.tag, entry.get('name'))) + entry.set('qtext', nqtext) + return False + return self.Verifydirectory(entry, _) + + def _diff(self, content1, content2, difffunc, filename=None): + rv = [] + start = time.time() + longtime = False + for diffline in difffunc(content1.split('\n'), + content2.split('\n')): + now = time.time() + rv.append(diffline) + if now - start > 5 and not longtime: + if filename: + self.logger.info("Diff of %s taking a long time" % + filename) + else: + self.logger.info("Diff taking a long time") + longtime = True + elif now - start > 30: + if filename: + self.logger.error("Diff of %s took too long; giving up" % + filename) + else: + self.logger.error("Diff took too long; giving up") + return False + return rv + + def Installpermissions(self, entry): + """Install POSIX permissions""" + if entry.get('perms') == None or \ + entry.get('owner') == None or \ + entry.get('group') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % (entry.get('name'))) + return False + plist = [entry.get('name')] + if entry.get('recursive') in ['True', 'true']: + # verify ownership information recursively + owner = normUid(entry) + group = normGid(entry) + + for root, dirs, files in os.walk(entry.get('name')): + for p in dirs + files: + path = os.path.join(root, p) + pstat = os.stat(path) + if owner != pstat.st_uid or group != pstat.st_gid: + # owner mismatch for path + plist.append(path) + try: + for p in plist: + os.chown(p, normUid(entry), normGid(entry)) + os.chmod(p, calcPerms(stat.S_IFDIR, entry.get('perms'))) + return True + except (OSError, KeyError): + self.logger.error('Permission fixup failed for %s' % \ + (entry.get('name'))) + return False + + def Verifysymlink(self, entry, _): + """Verify Path type='symlink' entry.""" + if entry.get('to') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % \ + (entry.get('name'))) + return False + try: + sloc = os.readlink(entry.get('name')) + if sloc == entry.get('to'): + return True + self.logger.debug("Symlink %s points to %s, should be %s" % \ + (entry.get('name'), sloc, entry.get('to'))) + entry.set('current_to', sloc) + entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'), + entry.get('to'))) + return False + except OSError: + entry.set('current_exists', 'false') + entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'), + entry.get('to'))) + return False + + def Installsymlink(self, entry): + """Install Path type='symlink' entry.""" + if entry.get('to') == None: + self.logger.error('Entry %s not completely specified. ' + 'Try running bcfg2-lint.' % \ + (entry.get('name'))) + return False + self.logger.info("Installing symlink %s" % (entry.get('name'))) + if os.path.lexists(entry.get('name')): + try: + fmode = os.lstat(entry.get('name'))[stat.ST_MODE] + if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode): + self.logger.debug("Non-directory entry already exists at " + "%s. Unlinking entry." % \ + (entry.get('name'))) + os.unlink(entry.get('name')) + elif stat.S_ISDIR(fmode): + self.logger.debug("Directory already exists at %s" %\ + (entry.get('name'))) + self.cmd.run("mv %s/ %s.bak" % \ + (entry.get('name'), + entry.get('name'))) + else: + os.unlink(entry.get('name')) + except OSError: + self.logger.info("Symlink %s cleanup failed" %\ + (entry.get('name'))) + try: + os.symlink(entry.get('to'), entry.get('name')) + return True + except OSError: + return False + + def InstallPath(self, entry): + """Dispatch install to the proper method according to type""" + ret = getattr(self, 'Install%s' % entry.get('type')) + return ret(entry) + + def VerifyPath(self, entry, _): + """Dispatch verify to the proper method according to type""" + ret = getattr(self, 'Verify%s' % entry.get('type')) + return ret(entry, _) diff --git a/src/lib/Bcfg2/Client/Tools/Pacman.py b/src/lib/Bcfg2/Client/Tools/Pacman.py new file mode 100644 index 000000000..c8c05061c --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Pacman.py @@ -0,0 +1,83 @@ +"""This is the bcfg2 support for pacman""" + +import Bcfg2.Client.Tools + + +class Pacman(Bcfg2.Client.Tools.PkgTool): + '''Archlinux package support''' + name = 'Pacman' + __execs__ = ["/usr/bin/pacman"] + __handles__ = [('Package', 'pacman')] + __req__ = {'Package': ['name', 'version']} + pkgtype = 'pacman' + pkgtool = ("/usr/bin/pacman --needed --noconfirm --noprogressbar") + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + self.installed = {} + self.RefreshPackages() + + def RefreshPackages(self): + '''Refresh memory hashes of packages''' + pkgcache = self.cmd.run("/usr/bin/pacman -Q")[1] + self.installed = {} + for pkg in pkgcache: + pkgname = pkg.split(' ')[0].strip() + version = pkg.split(' ')[1].strip() + #self.logger.info(" pkgname: %s, version: %s" % (pkgname, version)) + self.installed[pkgname] = version + + def VerifyPackage(self, entry, modlist): + '''Verify Package status for entry''' + + self.logger.info("VerifyPackage : %s : %s" % entry.get('name'), + entry.get('version')) + + if not 'version' in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % + (entry.attrib['name'])) + return False + + if entry.attrib['name'] in self.installed: + if entry.attrib['version'] == 'auto': + return True + elif self.installed[entry.attrib['name']] == entry.attrib['version']: + #if not self.setup['quick'] and \ + # entry.get('verify', 'true') == 'true': + #FIXME: need to figure out if pacman + # allows you to verify packages + return True + else: + entry.set('current_version', self.installed[entry.get('name')]) + self.logger.info("attribname: %s" % (entry.attrib['name'])) + self.logger.info("attribname: %s" % (entry.attrib['name'])) + return False + entry.set('current_exists', 'false') + self.logger.info("attribname: %s" % (entry.attrib['name'])) + return False + + def RemovePackages(self, packages): + '''Remove extra packages''' + names = [pkg.get('name') for pkg in packages] + self.logger.info("Removing packages: %s" % " ".join(names)) + self.cmd.run("%s --noconfirm --noprogressbar -R %s" % \ + (self.pkgtool, " ".join(names))) + self.RefreshPackages() + self.extra = self.FindExtraPackages() + + def Install(self, packages, states): + ''' + Pacman Install + ''' + pkgline = "" + for pkg in packages: + pkgline += " " + pkg.get('name') + + self.logger.info("packages : " + pkgline) + + try: + self.logger.debug("Running : %s -S %s" % (self.pkgtool, pkgline)) + self.cmd.run("%s -S %s" % (self.pkgtool, pkgline)) + except Exception: + e = sys.exc_info()[1] + self.logger.error("Error occurred during installation: %s" % e) diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py new file mode 100644 index 000000000..4516f419d --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Portage.py @@ -0,0 +1,125 @@ +"""This is the Bcfg2 tool for the Gentoo Portage system.""" + +import re +import Bcfg2.Client.Tools +from Bcfg2.Bcfg2Py3k import ConfigParser + + +class Portage(Bcfg2.Client.Tools.PkgTool): + """The Gentoo toolset implements package and service operations and + inherits the rest from Toolset.Toolset.""" + name = 'Portage' + __execs__ = ['/usr/bin/emerge', '/usr/bin/equery'] + __handles__ = [('Package', 'ebuild')] + __req__ = {'Package': ['name', 'version']} + pkgtype = 'ebuild' + # requires a working PORTAGE_BINHOST in make.conf + _binpkgtool = ('emerge --getbinpkgonly %s', ('=%s-%s', \ + ['name', 'version'])) + pkgtool = ('emerge %s', ('=%s-%s', ['name', 'version'])) + + def __init__(self, logger, cfg, setup): + self._initialised = False + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, cfg, setup) + self._initialised = True + self.__important__ = self.__important__ + ['/etc/make.conf'] + self._pkg_pattern = re.compile('(.*)-(\d.*)') + self._ebuild_pattern = re.compile('(ebuild|binary)') + self.cfg = cfg + self.installed = {} + self._binpkgonly = True + + # Used to get options from configuration file + parser = ConfigParser.ConfigParser() + parser.read(self.setup.get('setup')) + for opt in ['binpkgonly']: + if parser.has_option(self.name, opt): + setattr(self, ('_%s' % opt), + self._StrToBoolIfBool(parser.get(self.name, opt))) + + if self._binpkgonly: + self.pkgtool = self._binpkgtool + self.RefreshPackages() + + def _StrToBoolIfBool(self, s): + """Returns a boolean if the string specifies a boolean value. + Returns a string otherwise""" + if s.lower() in ('true', 'yes', 't', 'y', '1'): + return True + elif s.lower() in ('false', 'no', 'f', 'n', '0'): + return False + else: + return s + + def RefreshPackages(self): + """Refresh memory hashes of packages.""" + if not self._initialised: + return + self.logger.info('Getting list of installed packages') + cache = self.cmd.run("equery -q list '*'")[1] + self.installed = {} + for pkg in cache: + if self._pkg_pattern.match(pkg): + name = self._pkg_pattern.match(pkg).group(1) + version = self._pkg_pattern.match(pkg).group(2) + self.installed[name] = version + else: + self.logger.info("Failed to parse pkg name %s" % pkg) + + def VerifyPackage(self, entry, modlist): + """Verify package for entry.""" + if not 'version' in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % + (entry.get('name'))) + return False + + if not (entry.get('name') in self.installed): + # Can't verify package that isn't installed + entry.set('current_exists', 'false') + return False + + # get the installed version + version = self.installed[entry.get('name')] + entry.set('current_version', version) + + if not self.setup['quick']: + if ('verify' not in entry.attrib) or \ + self._StrToBoolIfBool(entry.get('verify')): + + # Check the package if: + # - Not running in quick mode + # - No verify option is specified in the literal configuration + # OR + # - Verify option is specified and is true + + self.logger.debug('Running equery check on %s' % + entry.get('name')) + output = self.cmd.run("/usr/bin/equery -N check '=%s-%s' " + "2>&1 | grep '!!!' | awk '{print $2}'" + % ((entry.get('name'), version)))[1] + if [filename for filename in output \ + if filename not in modlist]: + return False + + # By now the package must be in one of the following states: + # - Not require checking + # - Have no files modified at all + # - Have modified files in the modlist only + if self.installed[entry.get('name')] == version: + # Specified package version is installed + # Specified package version may be any in literal configuration + return True + + # Something got skipped. Indicates a bug + return False + + def RemovePackages(self, packages): + """Deal with extra configuration detected.""" + pkgnames = " ".join([pkg.get('name') for pkg in packages]) + if len(packages) > 0: + self.logger.info('Removing packages:') + self.logger.info(pkgnames) + self.cmd.run("emerge --unmerge --quiet %s" % + " ".join(pkgnames.split(' '))) + self.RefreshPackages() + self.extra = self.FindExtraPackages() diff --git a/src/lib/Bcfg2/Client/Tools/RPMng.py b/src/lib/Bcfg2/Client/Tools/RPMng.py new file mode 100644 index 000000000..00dd00d71 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/RPMng.py @@ -0,0 +1,1027 @@ +"""Bcfg2 Support for RPMS""" + +import os.path +import rpm +import rpmtools +import Bcfg2.Client.Tools +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + +class RPMng(Bcfg2.Client.Tools.PkgTool): + """Support for RPM packages.""" + name = 'RPMng' + + __execs__ = ['/bin/rpm', '/var/lib/rpm'] + __handles__ = [('Package', 'rpm')] + + __req__ = {'Package': ['name', 'version']} + __ireq__ = {'Package': ['url']} + + __new_req__ = {'Package': ['name'], 'Instance': ['version', 'release', 'arch']} + __new_ireq__ = {'Package': ['uri'], \ + 'Instance': ['simplefile']} + + __gpg_req__ = {'Package': ['name', 'version']} + __gpg_ireq__ = {'Package': ['name', 'version']} + + __new_gpg_req__ = {'Package': ['name'], 'Instance': ['version', 'release']} + __new_gpg_ireq__ = {'Package': ['name'], 'Instance': ['version', 'release']} + + conflicts = ['RPM'] + + pkgtype = 'rpm' + pkgtool = ("rpm --oldpackage --replacepkgs --quiet -U %s", ("%s", ["url"])) + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + + # create a global ignore list used when ignoring particular + # files during package verification + self.ignores = [entry.get('name') for struct in config for entry in struct \ + if entry.get('type') == 'ignore'] + self.instance_status = {} + self.extra_instances = [] + self.modlists = {} + self.gpg_keyids = self.getinstalledgpg() + + # Process thee RPMng section from the config file. + RPMng_CP = ConfigParser.ConfigParser() + RPMng_CP.read(self.setup.get('setup')) + + # installonlypackages + self.installOnlyPkgs = [] + if RPMng_CP.has_option(self.name, 'installonlypackages'): + for i in RPMng_CP.get(self.name, 'installonlypackages').split(','): + self.installOnlyPkgs.append(i.strip()) + if self.installOnlyPkgs == []: + self.installOnlyPkgs = ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', + 'kernel-modules', 'kernel-debug', 'kernel-unsupported', + 'kernel-source', 'kernel-devel', 'kernel-default', + 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', + 'gpg-pubkey'] + if 'gpg-pubkey' not in self.installOnlyPkgs: + self.installOnlyPkgs.append('gpg-pubkey') + self.logger.debug('installOnlyPackages = %s' % self.installOnlyPkgs) + + # erase_flags + self.erase_flags = [] + if RPMng_CP.has_option(self.name, 'erase_flags'): + for i in RPMng_CP.get(self.name, 'erase_flags').split(','): + self.erase_flags.append(i.strip()) + if self.erase_flags == []: + self.erase_flags = ['allmatches'] + self.logger.debug('erase_flags = %s' % self.erase_flags) + + # pkg_checks + if RPMng_CP.has_option(self.name, 'pkg_checks'): + self.pkg_checks = RPMng_CP.get(self.name, 'pkg_checks').lower() + else: + self.pkg_checks = 'true' + self.logger.debug('pkg_checks = %s' % self.pkg_checks) + + # pkg_verify + if RPMng_CP.has_option(self.name, 'pkg_verify'): + self.pkg_verify = RPMng_CP.get(self.name, 'pkg_verify').lower() + else: + self.pkg_verify = 'true' + self.logger.debug('pkg_verify = %s' % self.pkg_verify) + + # installed_action + if RPMng_CP.has_option(self.name, 'installed_action'): + self.installed_action = RPMng_CP.get(self.name, 'installed_action').lower() + else: + self.installed_action = 'install' + self.logger.debug('installed_action = %s' % self.installed_action) + + # version_fail_action + if RPMng_CP.has_option(self.name, 'version_fail_action'): + self.version_fail_action = RPMng_CP.get(self.name, 'version_fail_action').lower() + else: + self.version_fail_action = 'upgrade' + self.logger.debug('version_fail_action = %s' % self.version_fail_action) + + # verify_fail_action + if self.name == "RPMng": + if RPMng_CP.has_option(self.name, 'verify_fail_action'): + self.verify_fail_action = RPMng_CP.get(self.name, 'verify_fail_action').lower() + else: + self.verify_fail_action = 'reinstall' + else: # yum can't reinstall packages. + self.verify_fail_action = 'none' + self.logger.debug('verify_fail_action = %s' % self.verify_fail_action) + + # version_fail_action + if RPMng_CP.has_option(self.name, 'verify_flags'): + self.verify_flags = RPMng_CP.get(self.name, 'verify_flags').lower().split(',') + else: + self.verify_flags = [] + if '' in self.verify_flags: + self.verify_flags.remove('') + self.logger.debug('version_fail_action = %s' % self.version_fail_action) + # Force a re- prelink of all packages if prelink exists. + # Many, if not most package verifies can be caused by out of date prelinking. + if os.path.isfile('/usr/sbin/prelink') and not self.setup['dryrun']: + cmdrc, output = self.cmd.run('/usr/sbin/prelink -a -mR') + if cmdrc == 0: + self.logger.debug('Pre-emptive prelink succeeded') + else: + # FIXME : this is dumb - what if the output is huge? + self.logger.error('Pre-emptive prelink failed: %s' % output) + + + def RefreshPackages(self): + """ + Creates self.installed{} which is a dict of installed packages. + + The dict items are lists of nevra dicts. This loosely matches the + config from the server and what rpmtools uses to specify pacakges. + + e.g. + + self.installed['foo'] = [ {'name':'foo', 'epoch':None, + 'version':'1', 'release':2, + 'arch':'i386'}, + {'name':'foo', 'epoch':None, + 'version':'1', 'release':2, + 'arch':'x86_64'} ] + """ + self.installed = {} + refresh_ts = rpmtools.rpmtransactionset() + # Don't bother with signature checks at this stage. The GPG keys might + # not be installed. + refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES) + for nevra in rpmtools.rpmpackagelist(refresh_ts): + self.installed.setdefault(nevra['name'], []).append(nevra) + if self.setup['debug']: + print("The following package instances are installed:") + for name, instances in list(self.installed.items()): + self.logger.debug(" " + name) + for inst in instances: + self.logger.debug(" %s" %self.str_evra(inst)) + refresh_ts.closeDB() + del refresh_ts + + def VerifyPackage(self, entry, modlist, pinned_version=None): + """ + Verify Package status for entry. + Performs the following: + - Checks for the presence of required Package Instances. + - Compares the evra 'version' info against self.installed{}. + - RPM level package verify (rpm --verify). + - Checks for the presence of unrequired package instances. + + Produces the following dict and list for RPMng.Install() to use: + For installs/upgrades/fixes of required instances: + instance_status = { <Instance Element Object>: + { 'installed': True|False, + 'version_fail': True|False, + 'verify_fail': True|False, + 'pkg': <Package Element Object>, + 'modlist': [ <filename>, ... ], + 'verify' : [ <rpm --verify results> ] + }, ...... + } + + For deletions of unrequired instances: + extra_instances = [ <Package Element Object>, ..... ] + + Constructs the text prompts for interactive mode. + """ + instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package'] + if instances == []: + # We have an old style no Instance entry. Convert it to new style. + instance = Bcfg2.Client.XML.SubElement(entry, 'Package') + for attrib in list(entry.attrib.keys()): + instance.attrib[attrib] = entry.attrib[attrib] + if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true': + if 'any' in [entry.get('version'), pinned_version]: + version, release = 'any', 'any' + elif entry.get('version') == 'auto': + if pinned_version != None: + version, release = pinned_version.split('-') + else: + return False + else: + version, release = entry.get('version').split('-') + instance.set('version', version) + instance.set('release', release) + if entry.get('verify', 'true') == 'false': + instance.set('verify', 'false') + instances = [ instance ] + + self.logger.debug("Verifying package instances for %s" % entry.get('name')) + package_fail = False + qtext_versions = '' + + if entry.get('name') in self.installed: + # There is at least one instance installed. + if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true': + rpmTs = rpm.TransactionSet() + rpmHeader = None + for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')): + if rpmHeader is None or rpm.versionCompare(h, rpmHeader) > 0: + rpmHeader = h + rpmProvides = [ h['provides'] for h in \ + rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')) ] + rpmIntersection = set(rpmHeader['provides']) & \ + set(self.installOnlyPkgs) + if len(rpmIntersection) > 0: + # Packages that should only be installed or removed. + # e.g. kernels. + self.logger.debug(" Install only package.") + for inst in instances: + self.instance_status.setdefault(inst, {})['installed'] = False + self.instance_status[inst]['version_fail'] = False + if inst.tag == 'Package' and len(self.installed[entry.get('name')]) > 1: + self.logger.error("WARNING: Multiple instances of package %s are installed." % \ + (entry.get('name'))) + for pkg in self.installed[entry.get('name')]: + if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) \ + or self.inst_evra_equal(inst, pkg): + if inst.get('version') == 'any': + self.logger.error("got any version") + self.logger.debug(" %s" % self.str_evra(inst)) + self.instance_status[inst]['installed'] = True + + if self.pkg_verify == 'true' and \ + inst.get('pkg_verify', 'true') == 'true': + flags = inst.get('verify_flags', '').split(',') + self.verify_flags + if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \ + entry.get('name') != 'gpg-pubkey': + flags += ['nosignature', 'nodigest'] + self.logger.debug('WARNING: Package %s %s requires GPG Public key with ID %s'\ + % (pkg.get('name'), self.str_evra(pkg), \ + pkg.get('gpgkeyid', ''))) + self.logger.debug(' Disabling signature check.') + + if self.setup.get('quick', False): + if rpmtools.prelink_exists: + flags += ['nomd5', 'nosize'] + else: + flags += ['nomd5'] + self.logger.debug(" verify_flags = %s" % flags) + + if inst.get('verify', 'true') == 'false': + self.instance_status[inst]['verify'] = None + else: + vp_ts = rpmtools.rpmtransactionset() + self.instance_status[inst]['verify'] = \ + rpmtools.rpm_verify( vp_ts, pkg, flags) + vp_ts.closeDB() + del vp_ts + + if self.instance_status[inst]['installed'] == False: + self.logger.info(" Package %s %s not installed." % \ + (entry.get('name'), self.str_evra(inst))) + + qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst) + entry.set('current_exists', 'false') + else: + # Normal Packages that can be upgraded. + for inst in instances: + self.instance_status.setdefault(inst, {})['installed'] = False + self.instance_status[inst]['version_fail'] = False + + # Only installed packages with the same architecture are + # relevant. + if inst.get('arch', None) == None: + arch_match = self.installed[entry.get('name')] + else: + arch_match = [pkg for pkg in self.installed[entry.get('name')] \ + if pkg.get('arch', None) == inst.get('arch', None)] + + if len(arch_match) > 1: + self.logger.error("Multiple instances of package %s installed with the same achitecture." % \ + (entry.get('name'))) + elif len(arch_match) == 1: + # There is only one installed like there should be. + # Check that it is the right version. + for pkg in arch_match: + if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) or \ + self.inst_evra_equal(inst, pkg): + self.logger.debug(" %s" % self.str_evra(inst)) + self.instance_status[inst]['installed'] = True + + if self.pkg_verify == 'true' and \ + inst.get('pkg_verify', 'true') == 'true': + flags = inst.get('verify_flags', '').split(',') + self.verify_flags + if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \ + 'nosignature' not in flags: + flags += ['nosignature', 'nodigest'] + self.logger.info('WARNING: Package %s %s requires GPG Public key with ID %s'\ + % (pkg.get('name'), self.str_evra(pkg), \ + pkg.get('gpgkeyid', ''))) + self.logger.info(' Disabling signature check.') + + if self.setup.get('quick', False): + if rpmtools.prelink_exists: + flags += ['nomd5', 'nosize'] + else: + flags += ['nomd5'] + self.logger.debug(" verify_flags = %s" % flags) + + if inst.get('verify', 'true') == 'false': + self.instance_status[inst]['verify'] = None + else: + vp_ts = rpmtools.rpmtransactionset() + self.instance_status[inst]['verify'] = \ + rpmtools.rpm_verify( vp_ts, pkg, flags ) + vp_ts.closeDB() + del vp_ts + + else: + # Wrong version installed. + self.instance_status[inst]['version_fail'] = True + self.logger.info(" Wrong version installed. Want %s, but have %s"\ + % (self.str_evra(inst), self.str_evra(pkg))) + + qtext_versions = qtext_versions + 'U(%s -> %s) ' % \ + (self.str_evra(pkg), self.str_evra(inst)) + elif len(arch_match) == 0: + # This instance is not installed. + self.instance_status[inst]['installed'] = False + self.logger.info(" %s is not installed." % self.str_evra(inst)) + qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst) + + # Check the rpm verify results. + for inst in instances: + instance_fail = False + # Dump the rpm verify results. + #****Write something to format this nicely.***** + if self.setup['debug'] and self.instance_status[inst].get('verify', None): + self.logger.debug(self.instance_status[inst]['verify']) + + self.instance_status[inst]['verify_fail'] = False + if self.instance_status[inst].get('verify', None): + if len(self.instance_status[inst].get('verify')) > 1: + self.logger.info("WARNING: Verification of more than one package instance.") + + for result in self.instance_status[inst]['verify']: + + # Check header results + if result.get('hdr', None): + instance_fail = True + self.instance_status[inst]['verify_fail'] = True + + # Check dependency results + if result.get('deps', None): + instance_fail = True + self.instance_status[inst]['verify_fail'] = True + + # Check the rpm verify file results against the modlist + # and entry and per Instance Ignores. + ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \ + [ig.get('name') for ig in inst.findall('Ignore')] + \ + self.ignores + for file_result in result.get('files', []): + if file_result[-1] not in modlist + ignores: + instance_fail = True + self.instance_status[inst]['verify_fail'] = True + else: + self.logger.debug(" Modlist/Ignore match: %s" % \ + (file_result[-1])) + + if instance_fail == True: + self.logger.debug("*** Instance %s failed RPM verification ***" % \ + self.str_evra(inst)) + qtext_versions = qtext_versions + 'R(%s) ' % self.str_evra(inst) + self.modlists[entry] = modlist + + # Attach status structure for return to server for reporting. + inst.set('verify_status', str(self.instance_status[inst])) + + if self.instance_status[inst]['installed'] == False or \ + self.instance_status[inst].get('version_fail', False)== True or \ + self.instance_status[inst].get('verify_fail', False) == True: + package_fail = True + self.instance_status[inst]['pkg'] = entry + self.modlists[entry] = modlist + + # Find Installed Instances that are not in the Config. + extra_installed = self.FindExtraInstances(entry, self.installed[entry.get('name')]) + if extra_installed != None: + package_fail = True + self.extra_instances.append(extra_installed) + for inst in extra_installed.findall('Instance'): + qtext_versions = qtext_versions + 'D(%s) ' % self.str_evra(inst) + self.logger.debug("Found Extra Instances %s" % qtext_versions) + + if package_fail == True: + self.logger.info(" Package %s failed verification." % \ + (entry.get('name'))) + qtext = 'Install/Upgrade/delete Package %s instance(s) - %s (y/N) ' % \ + (entry.get('name'), qtext_versions) + entry.set('qtext', qtext) + + bcfg2_versions = '' + for bcfg2_inst in [inst for inst in instances if inst.tag == 'Instance']: + bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(bcfg2_inst) + if bcfg2_versions != '': + entry.set('version', bcfg2_versions) + installed_versions = '' + + for installed_inst in self.installed[entry.get('name')]: + installed_versions = installed_versions + '(%s) ' % \ + self.str_evra(installed_inst) + + entry.set('current_version', installed_versions) + return False + + else: + # There are no Instances of this package installed. + self.logger.debug("Package %s has no instances installed" % (entry.get('name'))) + entry.set('current_exists', 'false') + bcfg2_versions = '' + for inst in instances: + qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst) + self.instance_status.setdefault(inst, {})['installed'] = False + self.modlists[entry] = modlist + self.instance_status[inst]['pkg'] = entry + if inst.tag == 'Instance': + bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(inst) + if bcfg2_versions != '': + entry.set('version', bcfg2_versions) + entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % \ + (entry.get('name'), qtext_versions)) + + return False + return True + + def RemovePackages(self, packages): + """ + Remove specified entries. + + packages is a list of Package Entries with Instances generated + by FindExtraPackages(). + + """ + self.logger.debug('Running RPMng.RemovePackages()') + + pkgspec_list = [] + for pkg in packages: + for inst in pkg: + if pkg.get('name') != 'gpg-pubkey': + pkgspec = { 'name':pkg.get('name'), + 'epoch':inst.get('epoch', None), + 'version':inst.get('version'), + 'release':inst.get('release'), + 'arch':inst.get('arch') } + pkgspec_list.append(pkgspec) + else: + pkgspec = { 'name':pkg.get('name'), + 'version':inst.get('version'), + 'release':inst.get('release')} + self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ + % (pkgspec.get('name'), self.str_evra(pkgspec))) + self.logger.info(" This package will be deleted in a future version of the RPMng driver.") + #pkgspec_list.append(pkg_spec) + + erase_results = rpmtools.rpm_erase(pkgspec_list, self.erase_flags) + if erase_results == []: + self.modified += packages + for pkg in pkgspec_list: + self.logger.info("Deleted %s %s" % (pkg.get('name'), self.str_evra(pkg))) + else: + self.logger.info("Bulk erase failed with errors:") + self.logger.debug("Erase results = %s" % erase_results) + self.logger.info("Attempting individual erase for each package.") + pkgspec_list = [] + for pkg in packages: + pkg_modified = False + for inst in pkg: + if pkg.get('name') != 'gpg-pubkey': + pkgspec = { 'name':pkg.get('name'), + 'epoch':inst.get('epoch', None), + 'version':inst.get('version'), + 'release':inst.get('release'), + 'arch':inst.get('arch') } + pkgspec_list.append(pkgspec) + else: + pkgspec = { 'name':pkg.get('name'), + 'version':inst.get('version'), + 'release':inst.get('release')} + self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ + % (pkgspec.get('name'), self.str_evra(pkgspec))) + self.logger.info(" This package will be deleted in a future version of the RPMng driver.") + continue # Don't delete the gpg-pubkey packages for now. + erase_results = rpmtools.rpm_erase([pkgspec], self.erase_flags) + if erase_results == []: + pkg_modified = True + self.logger.info("Deleted %s %s" % \ + (pkgspec.get('name'), self.str_evra(pkgspec))) + else: + self.logger.error("unable to delete %s %s" % \ + (pkgspec.get('name'), self.str_evra(pkgspec))) + self.logger.debug("Failure = %s" % erase_results) + if pkg_modified == True: + self.modified.append(pkg) + + self.RefreshPackages() + self.extra = self.FindExtraPackages() + + def FixInstance(self, instance, inst_status): + """" + Control if a reinstall of a package happens or not based on the + results from RPMng.VerifyPackage(). + + Return True to reinstall, False to not reintstall. + + """ + fix = False + + if inst_status.get('installed', False) == False: + if instance.get('installed_action', 'install') == "install" and \ + self.installed_action == "install": + fix = True + else: + self.logger.debug('Installed Action for %s %s is to not install' % \ + (inst_status.get('pkg').get('name'), + self.str_evra(instance))) + + elif inst_status.get('version_fail', False) == True: + if instance.get('version_fail_action', 'upgrade') == "upgrade" and \ + self.version_fail_action == "upgrade": + fix = True + else: + self.logger.debug('Version Fail Action for %s %s is to not upgrade' % \ + (inst_status.get('pkg').get('name'), + self.str_evra(instance))) + + elif inst_status.get('verify_fail', False) == True and self.name == "RPMng": + # yum can't reinstall packages so only do this for rpm. + if instance.get('verify_fail_action', 'reinstall') == "reinstall" and \ + self.verify_fail_action == "reinstall": + for inst in inst_status.get('verify'): + # This needs to be a for loop rather than a straight get() + # because the underlying routines handle multiple packages + # and return a list of results. + self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % inst.get('nevra')) + + if inst.get("hdr", False): + fix = True + + elif inst.get('files', False): + # Parse rpm verify file results + for file_result in inst.get('files', []): + self.logger.debug('reinstall_check: file: %s' % file_result) + if file_result[-2] != 'c': + fix = True + break + + # Shouldn't really need this, but included for clarity. + elif inst.get("deps", False): + fix = False + else: + self.logger.debug('Verify Fail Action for %s %s is to not reinstall' % \ + (inst_status.get('pkg').get('name'), + self.str_evra(instance))) + + return fix + + def Install(self, packages, states): + """ + Try and fix everything that RPMng.VerifyPackages() found wrong for + each Package Entry. This can result in individual RPMs being + installed (for the first time), reinstalled, deleted, downgraded + or upgraded. + + packages is a list of Package Elements that has + states[<Package Element>] == False + + The following effects occur: + - states{} is conditionally updated for each package. + - self.installed{} is rebuilt, possibly multiple times. + - self.instance_statusi{} is conditionally updated for each instance + of a package. + - Each package will be added to self.modified[] if its states{} + entry is set to True. + + """ + self.logger.info('Runing RPMng.Install()') + + install_only_pkgs = [] + gpg_keys = [] + upgrade_pkgs = [] + + # Remove extra instances. + # Can not reverify because we don't have a package entry. + if len(self.extra_instances) > 0: + if (self.setup.get('remove') == 'all' or \ + self.setup.get('remove') == 'packages') and\ + not self.setup.get('dryrun'): + self.RemovePackages(self.extra_instances) + else: + self.logger.info("The following extra package instances will be removed by the '-r' option:") + for pkg in self.extra_instances: + for inst in pkg: + self.logger.info(" %s %s" % (pkg.get('name'), self.str_evra(inst))) + + # Figure out which instances of the packages actually need something + # doing to them and place in the appropriate work 'queue'. + for pkg in packages: + for inst in [instn for instn in pkg if instn.tag \ + in ['Instance', 'Package']]: + if self.FixInstance(inst, self.instance_status[inst]): + if pkg.get('name') == 'gpg-pubkey': + gpg_keys.append(inst) + elif pkg.get('name') in self.installOnlyPkgs: + install_only_pkgs.append(inst) + else: + upgrade_pkgs.append(inst) + + # Fix installOnlyPackages + if len(install_only_pkgs) > 0: + self.logger.info("Attempting to install 'install only packages'") + install_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ + inst.get('simplefile')) \ + for inst in install_only_pkgs]) + self.logger.debug("rpm --install --quiet --oldpackage %s" % install_args) + cmdrc, output = self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs %s" % \ + install_args) + if cmdrc == 0: + # The rpm command succeeded. All packages installed. + self.logger.info("Single Pass for InstallOnlyPkgs Succeded") + self.RefreshPackages() + + else: + # The rpm command failed. No packages installed. + # Try installing instances individually. + self.logger.error("Single Pass for InstallOnlyPackages Failed") + installed_instances = [] + for inst in install_only_pkgs: + install_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ + inst.get('simplefile')) + self.logger.debug("rpm --install --quiet --oldpackage %s" % install_args) + cmdrc, output = self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs %s" % \ + install_args) + if cmdrc == 0: + installed_instances.append(inst) + else: + self.logger.debug("InstallOnlyPackage %s %s would not install." % \ + (self.instance_status[inst].get('pkg').get('name'), \ + self.str_evra(inst))) + + install_pkg_set = set([self.instance_status[inst].get('pkg') \ + for inst in install_only_pkgs]) + self.RefreshPackages() + + # Install GPG keys. + if len(gpg_keys) > 0: + for inst in gpg_keys: + self.logger.info("Installing GPG keys.") + key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ + inst.get('simplefile')) + cmdrc, output = self.cmd.run("rpm --import %s" % key_arg) + if cmdrc != 0: + self.logger.debug("Unable to install %s-%s" % \ + (self.instance_status[inst].get('pkg').get('name'), \ + self.str_evra(inst))) + else: + self.logger.debug("Installed %s-%s-%s" % \ + (self.instance_status[inst].get('pkg').get('name'), \ + inst.get('version'), inst.get('release'))) + self.RefreshPackages() + self.gpg_keyids = self.getinstalledgpg() + pkg = self.instance_status[gpg_keys[0]].get('pkg') + states[pkg] = self.VerifyPackage(pkg, []) + + # Fix upgradeable packages. + if len(upgrade_pkgs) > 0: + self.logger.info("Attempting to upgrade packages") + upgrade_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ + inst.get('simplefile')) \ + for inst in upgrade_pkgs]) + cmdrc, output = self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \ + upgrade_args) + if cmdrc == 0: + # The rpm command succeeded. All packages upgraded. + self.logger.info("Single Pass for Upgraded Packages Succeded") + upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \ + for inst in upgrade_pkgs]) + self.RefreshPackages() + else: + # The rpm command failed. No packages upgraded. + # Try upgrading instances individually. + self.logger.error("Single Pass for Upgrading Packages Failed") + upgraded_instances = [] + for inst in upgrade_pkgs: + upgrade_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ + inst.get('simplefile')) + #self.logger.debug("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \ + # upgrade_args) + cmdrc, output = self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % upgrade_args) + if cmdrc == 0: + upgraded_instances.append(inst) + else: + self.logger.debug("Package %s %s would not upgrade." % \ + (self.instance_status[inst].get('pkg').get('name'), \ + self.str_evra(inst))) + + upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \ + for inst in upgrade_pkgs]) + self.RefreshPackages() + + if not self.setup['kevlar']: + for pkg_entry in packages: + self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name'))) + states[pkg_entry] = self.VerifyPackage(pkg_entry, \ + self.modlists.get(pkg_entry, [])) + + for entry in [ent for ent in packages if states[ent]]: + self.modified.append(entry) + + def canInstall(self, entry): + """Test if entry has enough information to be installed.""" + if not self.handlesEntry(entry): + return False + + if 'failure' in entry.attrib: + self.logger.error("Cannot install entry %s:%s with bind failure" % \ + (entry.tag, entry.get('name'))) + return False + + + instances = entry.findall('Instance') + + # If the entry wasn't verifiable, then we really don't want to try and fix something + # that we don't know is broken. + if not self.canVerify(entry): + self.logger.debug("WARNING: Package %s was not verifiable, not passing to Install()" \ + % entry.get('name')) + return False + + if not instances: + # Old non Instance format, unmodified. + if entry.get('name') == 'gpg-pubkey': + # gpg-pubkey packages aren't really pacakges, so we have to do + # something a little different. + # Check that the Package Level has what we need for verification. + if [attr for attr in self.__gpg_ireq__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot install" \ + % (entry.tag, entry.get('name'))) + return False + else: + if [attr for attr in self.__ireq__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot install" \ + % (entry.tag, entry.get('name'))) + return False + else: + if entry.get('name') == 'gpg-pubkey': + # gpg-pubkey packages aren't really pacakges, so we have to do + # something a little different. + # Check that the Package Level has what we need for verification. + if [attr for attr in self.__new_gpg_ireq__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot install" \ + % (entry.tag, entry.get('name'))) + return False + # Check that the Instance Level has what we need for verification. + for inst in instances: + if [attr for attr in self.__new_gpg_ireq__[inst.tag] \ + if attr not in inst.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot install"\ + % (inst.tag, entry.get('name'))) + return False + else: + # New format with Instances. + # Check that the Package Level has what we need for verification. + if [attr for attr in self.__new_ireq__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot install" \ + % (entry.tag, entry.get('name'))) + self.logger.error(" Required attributes that may not be present are %s" \ + % (self.__new_ireq__[entry.tag])) + return False + # Check that the Instance Level has what we need for verification. + for inst in instances: + if inst.tag == 'Instance': + if [attr for attr in self.__new_ireq__[inst.tag] \ + if attr not in inst.attrib]: + self.logger.error("Incomplete information for %s of package %s; cannot install" \ + % (inst.tag, entry.get('name'))) + self.logger.error(" Required attributes that may not be present are %s" \ + % (self.__new_ireq__[inst.tag])) + return False + return True + + def canVerify(self, entry): + """ + Test if entry has enough information to be verified. + + Three types of entries are checked. + Old style Package + New style Package with Instances + pgp-pubkey packages + + Also the old style entries get modified after the first + VerifyPackage() run, so there needs to be a second test. + + """ + if not self.handlesEntry(entry): + return False + + if 'failure' in entry.attrib: + self.logger.error("Entry %s:%s reports bind failure: %s" % \ + (entry.tag, entry.get('name'), entry.get('failure'))) + return False + + # We don't want to do any checks so we don't care what the entry has in it. + if self.pkg_checks == 'false' or \ + entry.get('pkg_checks', 'true').lower() == 'false': + return True + + instances = entry.findall('Instance') + + if not instances: + # Old non Instance format, unmodified. + if entry.get('name') == 'gpg-pubkey': + # gpg-pubkey packages aren't really pacakges, so we have to do + # something a little different. + # Check that the Package Level has what we need for verification. + if [attr for attr in self.__gpg_req__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ + % (entry.tag, entry.get('name'))) + return False + elif entry.tag == 'Path' and entry.get('type') == 'ignore': + # ignored Paths are only relevant during failed package + # verification + pass + else: + if [attr for attr in self.__req__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ + % (entry.tag, entry.get('name'))) + return False + else: + if entry.get('name') == 'gpg-pubkey': + # gpg-pubkey packages aren't really pacakges, so we have to do + # something a little different. + # Check that the Package Level has what we need for verification. + if [attr for attr in self.__new_gpg_req__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ + % (entry.tag, entry.get('name'))) + return False + # Check that the Instance Level has what we need for verification. + for inst in instances: + if [attr for attr in self.__new_gpg_req__[inst.tag] \ + if attr not in inst.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ + % (inst.tag, inst.get('name'))) + return False + else: + # New format with Instances, or old style modified. + # Check that the Package Level has what we need for verification. + if [attr for attr in self.__new_req__[entry.tag] if attr not in entry.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ + % (entry.tag, entry.get('name'))) + return False + # Check that the Instance Level has what we need for verification. + for inst in instances: + if inst.tag == 'Instance': + if [attr for attr in self.__new_req__[inst.tag] \ + if attr not in inst.attrib]: + self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ + % (inst.tag, inst.get('name'))) + return False + return True + + def FindExtraPackages(self): + """Find extra packages.""" + packages = [entry.get('name') for entry in self.getSupportedEntries()] + extras = [] + + for (name, instances) in list(self.installed.items()): + if name not in packages: + extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype) + for installed_inst in instances: + if self.setup['extra']: + self.logger.info("Extra Package %s %s." % \ + (name, self.str_evra(installed_inst))) + tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \ + version = installed_inst.get('version'), \ + release = installed_inst.get('release')) + if installed_inst.get('epoch', None) != None: + tmp_entry.set('epoch', str(installed_inst.get('epoch'))) + if installed_inst.get('arch', None) != None: + tmp_entry.set('arch', installed_inst.get('arch')) + extras.append(extra_entry) + return extras + + + def FindExtraInstances(self, pkg_entry, installed_entry): + """ + Check for installed instances that are not in the config. + Return a Package Entry with Instances to remove, or None if there + are no Instances to remove. + + """ + name = pkg_entry.get('name') + extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype) + instances = [inst for inst in pkg_entry if inst.tag == 'Instance' or inst.tag == 'Package'] + if name in self.installOnlyPkgs: + for installed_inst in installed_entry: + not_found = True + for inst in instances: + if self.pkg_vr_equal(inst, installed_inst) or \ + self.inst_evra_equal(inst, installed_inst): + not_found = False + break + if not_found == True: + # Extra package. + self.logger.info("Extra InstallOnlyPackage %s %s." % \ + (name, self.str_evra(installed_inst))) + tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \ + version = installed_inst.get('version'), \ + release = installed_inst.get('release')) + if installed_inst.get('epoch', None) != None: + tmp_entry.set('epoch', str(installed_inst.get('epoch'))) + if installed_inst.get('arch', None) != None: + tmp_entry.set('arch', installed_inst.get('arch')) + else: + # Normal package, only check arch. + for installed_inst in installed_entry: + not_found = True + for inst in instances: + if installed_inst.get('arch', None) == inst.get('arch', None) or\ + inst.tag == 'Package': + not_found = False + break + if not_found: + self.logger.info("Extra Normal Package Instance %s %s" % \ + (name, self.str_evra(installed_inst))) + tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \ + version = installed_inst.get('version'), \ + release = installed_inst.get('release')) + if installed_inst.get('epoch', None) != None: + tmp_entry.set('epoch', str(installed_inst.get('epoch'))) + if installed_inst.get('arch', None) != None: + tmp_entry.set('arch', installed_inst.get('arch')) + + if len(extra_entry) == 0: + extra_entry = None + + return extra_entry + + def str_evra(self, instance): + """Convert evra dict entries to a string.""" + if instance.get('epoch', '*') in ['*', None]: + return '%s-%s.%s' % (instance.get('version', '*'), + instance.get('release', '*'), + instance.get('arch', '*')) + else: + return '%s:%s-%s.%s' % (instance.get('epoch', '*'), + instance.get('version', '*'), + instance.get('release', '*'), + instance.get('arch', '*')) + + def pkg_vr_equal(self, config_entry, installed_entry): + ''' + Compare old style entry to installed entry. Which means ignore + the epoch and arch. + ''' + if (config_entry.tag == 'Package' and \ + config_entry.get('version') == installed_entry.get('version') and \ + config_entry.get('release') == installed_entry.get('release')): + return True + else: + return False + + def inst_evra_equal(self, config_entry, installed_entry): + """Compare new style instance to installed entry.""" + + if config_entry.get('epoch', None) != None: + epoch = int(config_entry.get('epoch')) + else: + epoch = None + + if (config_entry.tag == 'Instance' and \ + (epoch == installed_entry.get('epoch', 0) or \ + (epoch == 0 and installed_entry.get('epoch', 0) == None) or \ + (epoch == None and installed_entry.get('epoch', 0) == 0)) and \ + config_entry.get('version') == installed_entry.get('version') and \ + config_entry.get('release') == installed_entry.get('release') and \ + config_entry.get('arch', None) == installed_entry.get('arch', None)): + return True + else: + return False + + def getinstalledgpg(self): + """ + Create a list of installed GPG key IDs. + + The pgp-pubkey package version is the least significant 4 bytes + (big-endian) of the key ID which is good enough for our purposes. + + """ + init_ts = rpmtools.rpmtransactionset() + init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES) + gpg_hdrs = rpmtools.getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'}) + keyids = [ header[rpm.RPMTAG_VERSION] for header in gpg_hdrs] + keyids.append('None') + init_ts.closeDB() + del init_ts + return keyids + + def VerifyPath(self, entry, _): + """ + We don't do anything here since all + Paths are processed in __init__ + """ + return True diff --git a/src/lib/Bcfg2/Client/Tools/RcUpdate.py b/src/lib/Bcfg2/Client/Tools/RcUpdate.py new file mode 100644 index 000000000..1b9a29478 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/RcUpdate.py @@ -0,0 +1,97 @@ +"""This is rc-update support.""" + +import os +import Bcfg2.Client.Tools +import Bcfg2.Client.XML + + +class RcUpdate(Bcfg2.Client.Tools.SvcTool): + """RcUpdate support for Bcfg2.""" + name = 'RcUpdate' + __execs__ = ['/sbin/rc-update', '/bin/rc-status'] + __handles__ = [('Service', 'rc-update')] + __req__ = {'Service': ['name', 'status']} + + def VerifyService(self, entry, _): + """ + Verify Service status for entry. + Assumes we run in the "default" runlevel. + + """ + # check if service is enabled + cmd = '/sbin/rc-update show default | grep %s' + rc = self.cmd.run(cmd % entry.get('name'))[0] + is_enabled = (rc == 0) + + if entry.get('mode', 'default') == 'supervised': + # check if init script exists + try: + os.stat('/etc/init.d/%s' % entry.get('name')) + except OSError: + self.logger.debug('Init script for service %s does not exist' % + entry.get('name')) + return False + + # check if service is enabled + cmd = '/etc/init.d/%s status | grep started' + rc = self.cmd.run(cmd % entry.attrib['name'])[0] + is_running = (rc == 0) + else: + # we don't care + is_running = is_enabled + + if entry.get('status') == 'on' and not (is_enabled and is_running): + entry.set('current_status', 'off') + return False + + elif entry.get('status') == 'off' and (is_enabled or is_running): + entry.set('current_status', 'on') + return False + + return True + + def InstallService(self, entry): + """ + Install Service entry + In supervised mode we also take care it's (not) running. + + """ + # don't take any actions for mode='manual' + if entry.get('mode', 'default') == 'manual': + self.logger.info("Service %s mode set to manual. Skipping " + "installation." % (entry.get('name'))) + return False + self.logger.info('Installing Service %s' % entry.get('name')) + if entry.get('status') == 'on': + # make sure it's running if in supervised mode + if entry.get('mode', 'default') == 'supervised' \ + and entry.get('current_status') == 'off': + self.start_service(entry) + # make sure it's enabled + cmd = '/sbin/rc-update add %s default' + rc = self.cmd.run(cmd % entry.get('name'))[0] + return (rc == 0) + + elif entry.get('status') == 'off': + # make sure it's not running if in supervised mode + if entry.get('mode', 'default') == 'supervised' \ + and entry.get('current_status') == 'on': + self.stop_service(entry) + # make sure it's disabled + cmd = '/sbin/rc-update del %s default' + rc = self.cmd.run(cmd % entry.get('name'))[0] + return (rc == 0) + + return False + + def FindExtra(self): + """Locate extra rc-update services.""" + cmd = '/bin/rc-status -s | grep started' + allsrv = [line.split()[0] for line in self.cmd.run(cmd)[1]] + self.logger.debug('Found active services:') + self.logger.debug(allsrv) + specified = [srv.get('name') for srv in self.getSupportedEntries()] + return [Bcfg2.Client.XML.Element('Service', + type='rc-update', + name=name) \ + for name in allsrv if name not in specified] diff --git a/src/lib/Bcfg2/Client/Tools/SMF.py b/src/lib/Bcfg2/Client/Tools/SMF.py new file mode 100644 index 000000000..f824410ad --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/SMF.py @@ -0,0 +1,137 @@ +"""SMF support for Bcfg2""" + +import glob +import os + +import Bcfg2.Client.Tools + + +class SMF(Bcfg2.Client.Tools.SvcTool): + """Support for Solaris SMF Services.""" + __handles__ = [('Service', 'smf')] + __execs__ = ['/usr/sbin/svcadm', '/usr/bin/svcs'] + name = 'SMF' + __req__ = {'Service': ['name', 'status']} + __ireq__ = {'Service': ['name', 'status', 'FMRI']} + + def get_svc_command(self, service, action): + if service.get('type') == 'lrc': + return Bcfg2.Client.Tools.SvcTool.get_svc_command(self, + service, action) + if action == 'stop': + return "/usr/sbin/svcadm disable %s" % (service.get('FMRI')) + elif action == 'restart': + return "/usr/sbin/svcadm restart %s" % (service.get('FMRI')) + elif action == 'start': + return "/usr/sbin/svcadm enable %s" % (service.get('FMRI')) + + def GetFMRI(self, entry): + """Perform FMRI resolution for service.""" + if not 'FMRI' in entry.attrib: + name = self.cmd.run("/usr/bin/svcs -H -o FMRI %s 2>/dev/null" % \ + entry.get('name'))[1] + if name: + entry.set('FMRI', name[0]) + return True + else: + self.logger.info('Failed to locate FMRI for service %s' % \ + entry.get('name')) + return False + return True + + def VerifyService(self, entry, _): + """Verify SMF Service entry.""" + if not self.GetFMRI(entry): + self.logger.error("smf service %s doesn't have FMRI set" % \ + entry.get('name')) + return False + if entry.get('FMRI').startswith('lrc'): + filename = entry.get('FMRI').split('/')[-1] + # this is a legacy service + gname = "/etc/rc*.d/%s" % filename + files = glob.glob(gname.replace('_', '.')) + if files: + self.logger.debug("Matched %s with %s" % \ + (entry.get("FMRI"), ":".join(files))) + return entry.get('status') == 'on' + else: + self.logger.debug("No service matching %s" % \ + (entry.get("FMRI"))) + return entry.get('status') == 'off' + try: + srvdata = self.cmd.run("/usr/bin/svcs -H -o STA %s" % \ + entry.get('FMRI'))[1][0].split() + except IndexError: + # Occurs when no lines are returned (service not installed) + return False + + entry.set('current_status', srvdata[0]) + if entry.get('status') == 'on': + return srvdata[0] == 'ON' + else: + return srvdata[0] in ['OFF', 'UN', 'MNT', 'DIS', 'DGD'] + + def InstallService(self, entry): + """Install SMF Service entry.""" + # don't take any actions for mode='manual' + if entry.get('mode', 'default') == 'manual': + self.logger.info("Service %s mode set to manual. Skipping " + "installation." % (entry.get('name'))) + return False + self.logger.info("Installing Service %s" % (entry.get('name'))) + if entry.get('status') == 'off': + if entry.get("FMRI").startswith('lrc'): + try: + loc = entry.get("FMRI")[4:].replace('_', '.') + self.logger.debug("Renaming file %s to %s" % \ + (loc, loc.replace('/S', '/DISABLED.S'))) + os.rename(loc, loc.replace('/S', '/DISABLED.S')) + return True + except OSError: + self.logger.error("Failed to rename init script %s" % \ + (loc)) + return False + else: + cmdrc = self.cmd.run("/usr/sbin/svcadm disable %s" % \ + (entry.get('FMRI')))[0] + else: + if entry.get('FMRI').startswith('lrc'): + loc = entry.get("FMRI")[4:].replace('_', '.') + try: + os.stat(loc.replace('/S', '/Disabled.')) + self.logger.debug("Renaming file %s to %s" % \ + (loc.replace('/S', '/DISABLED.S'), loc)) + os.rename(loc.replace('/S', '/DISABLED.S'), loc) + cmdrc = 0 + except OSError: + self.logger.debug("Failed to rename %s to %s" % \ + (loc.replace('/S', '/DISABLED.S'), loc)) + cmdrc = 1 + else: + srvdata = self.cmd.run("/usr/bin/svcs -H -o STA %s" % + entry.get('FMRI'))[1] [0].split() + if srvdata[0] == 'MNT': + cmdarg = 'clear' + else: + cmdarg = 'enable' + cmdrc = self.cmd.run("/usr/sbin/svcadm %s -r %s" % \ + (cmdarg, entry.get('FMRI')))[0] + return cmdrc == 0 + + def Remove(self, svcs): + """Remove Extra SMF entries.""" + # Extra service entry removal is nonsensical + # Extra service entries should be reflected in config, even if disabled + pass + + def FindExtra(self): + """Find Extra SMF Services.""" + allsrv = [name for name, version in \ + [srvc.split() for srvc in + self.cmd.run("/usr/bin/svcs -a -H -o FMRI,STATE")[1]] + if version != 'disabled'] + + [allsrv.remove(svc.get('FMRI')) for svc in self.getSupportedEntries() \ + if svc.get("FMRI") in allsrv] + return [Bcfg2.Client.XML.Element("Service", type='smf', name=name) \ + for name in allsrv] diff --git a/src/lib/Bcfg2/Client/Tools/SYSV.py b/src/lib/Bcfg2/Client/Tools/SYSV.py new file mode 100644 index 000000000..eb4a13dfb --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/SYSV.py @@ -0,0 +1,105 @@ +"""This provides bcfg2 support for Solaris SYSV packages.""" + +import tempfile + +import Bcfg2.Client.Tools +import Bcfg2.Client.XML + + +noask = ''' +mail= +instance=overwrite +partial=nocheck +runlevel=nocheck +idepend=nocheck +rdepend=nocheck +space=ask +setuid=nocheck +conflict=nocheck +action=nocheck +basedir=default +''' + + +class SYSV(Bcfg2.Client.Tools.PkgTool): + """Solaris SYSV package support.""" + __execs__ = ["/usr/sbin/pkgadd", "/usr/bin/pkginfo"] + __handles__ = [('Package', 'sysv')] + __req__ = {'Package': ['name', 'version']} + __ireq__ = {'Package': ['name', 'url', 'version']} + name = 'SYSV' + pkgtype = 'sysv' + pkgtool = ("/usr/sbin/pkgadd %s -n -d %%s", (('%s %s', ['url', 'name']))) + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + # noaskfile needs to live beyond __init__ otherwise file is removed + self.noaskfile = tempfile.NamedTemporaryFile() + self.noaskname = self.noaskfile.name + try: + self.noaskfile.write(noask) + # flush admin file contents to disk + self.noaskfile.flush() + self.pkgtool = (self.pkgtool[0] % ("-a %s" % (self.noaskname)), \ + self.pkgtool[1]) + except: + self.pkgtool = (self.pkgtool[0] % (""), self.pkgtool[1]) + + def RefreshPackages(self): + """Refresh memory hashes of packages.""" + self.installed = {} + # Build list of packages + lines = self.cmd.run("/usr/bin/pkginfo -x")[1] + while lines: + # Splitting on whitespace means that packages with spaces in + # their version numbers don't work right. Found this with + # IBM TSM software with package versions like + # "Version 6 Release 1 Level 0.0" + # Should probably be done with a regex but this works. + version = lines.pop().split(') ')[1] + pkg = lines.pop().split()[0] + self.installed[pkg] = version + + def VerifyPackage(self, entry, modlist): + """Verify Package status for entry.""" + if not entry.get('version'): + self.logger.info("Insufficient information of Package %s; cannot Verify" % entry.get('name')) + return False + + desiredVersion = entry.get('version') + if desiredVersion == 'any': + desiredVersion = self.installed.get(entry.get('name'), desiredVersion) + + cmdrc = self.cmd.run("/usr/bin/pkginfo -q -v \"%s\" %s" % \ + (desiredVersion, entry.get('name')))[0] + + if cmdrc != 0: + if entry.get('name') in self.installed: + self.logger.debug("Package %s version incorrect: have %s want %s" \ + % (entry.get('name'), self.installed[entry.get('name')], + desiredVersion)) + else: + self.logger.debug("Package %s not installed" % (entry.get("name"))) + else: + if self.setup['quick'] or entry.attrib.get('verify', 'true') == 'false': + return True + (vstat, odata) = self.cmd.run("/usr/sbin/pkgchk -n %s" % (entry.get('name'))) + if vstat == 0: + return True + else: + output = [line for line in odata if line[:5] == 'ERROR'] + if len([name for name in output if name.split()[-1] not in modlist]): + self.logger.debug("Package %s content verification failed" % \ + (entry.get('name'))) + else: + return True + return False + + def RemovePackages(self, packages): + """Remove specified Sysv packages.""" + names = [pkg.get('name') for pkg in packages] + self.logger.info("Removing packages: %s" % (names)) + self.cmd.run("/usr/sbin/pkgrm -a %s -n %s" % \ + (self.noaskname, names)) + self.RefreshPackages() + self.extra = self.FindExtraPackages() diff --git a/src/lib/Bcfg2/Client/Tools/Systemd.py b/src/lib/Bcfg2/Client/Tools/Systemd.py new file mode 100644 index 000000000..e3f6a4169 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Systemd.py @@ -0,0 +1,59 @@ +# This is the bcfg2 support for systemd + +"""This is systemd support.""" + +import Bcfg2.Client.Tools +import Bcfg2.Client.XML + +class Systemd(Bcfg2.Client.Tools.SvcTool): + """Systemd support for Bcfg2.""" + name = 'Systemd' + __execs__ = ['/bin/systemctl'] + __handles__ = [('Service', 'systemd')] + __req__ = {'Service': ['name', 'status']} + + def get_svc_command(self, service, action): + return "/bin/systemctl %s %s.service" % (action, service.get('name')) + + def VerifyService(self, entry, _): + """Verify Service status for entry.""" + cmd = "/bin/systemctl status %s.service " % (entry.get('name')) + raw = ''.join(self.cmd.run(cmd)[1]) + + if raw.find('Loaded: error') >= 0: + entry.set('current_status', 'off') + status = False + + elif raw.find('Active: active') >= 0: + entry.set('current_status', 'on') + if entry.get('status') == 'off': + status = False + else: + status = True + + else: + entry.set('current_status', 'off') + if entry.get('status') == 'on': + status = False + else: + status = True + + return status + + def InstallService(self, entry): + """Install Service entry.""" + # don't take any actions for mode = 'manual' + if entry.get('mode', 'default') == 'manual': + self.logger.info("Service %s mode set to manual. Skipping " + "installation." % (entry.get('name'))) + return True + + if entry.get('status') == 'on': + pstatus = self.cmd.run(self.get_svc_command(entry, 'enable'))[0] + pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0] + + else: + pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0] + pstatus = self.cmd.run(self.get_svc_command(entry, 'disable'))[0] + + return not pstatus diff --git a/src/lib/Bcfg2/Client/Tools/Upstart.py b/src/lib/Bcfg2/Client/Tools/Upstart.py new file mode 100644 index 000000000..7afc8edd7 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/Upstart.py @@ -0,0 +1,93 @@ +"""Upstart support for Bcfg2.""" + +import glob +import re + +import Bcfg2.Client.Tools +import Bcfg2.Client.XML + + +class Upstart(Bcfg2.Client.Tools.SvcTool): + """Upstart service support for Bcfg2.""" + name = 'Upstart' + __execs__ = ['/lib/init/upstart-job', + '/sbin/initctl', + '/usr/sbin/service'] + __handles__ = [('Service', 'upstart')] + __req__ = {'Service': ['name', 'status']} + svcre = re.compile("/etc/init/(?P<name>.*).conf") + + def get_svc_command(self, service, action): + return "/usr/sbin/service %s %s" % (service.get('name'), action) + + def VerifyService(self, entry, _): + """Verify Service status for entry + + Verifying whether or not the service is enabled can be done + at the file level with upstart using the contents of + /etc/init/servicename.conf. All we need to do is make sure + the service is running when it should be. + """ + + if entry.get('status') == 'ignore': + return True + + if entry.get('parameters'): + params = entry.get('parameters') + else: + params = '' + + try: + output = self.cmd.run('/usr/sbin/service %s status %s' % \ + ( entry.get('name'), params ))[1][0] + except IndexError: + self.logger.error("Service %s not an Upstart service" % \ + entry.get('name')) + return False + + match = re.compile("%s( \(.*\))? (start|stop)/(running|waiting)" %entry.get('name') ).match( output ) + if match == None: + # service does not exist + entry.set('current_status', 'off') + status = False + elif match.group(3) == 'running': + # service is running + entry.set('current_status', 'on') + if entry.get('status') == 'off': + status = False + else: + status = True + else: + # service is not running + entry.set('current_status', 'off') + if entry.get('status') == 'on': + status = False + else: + status = True + + return status + + def InstallService(self, entry): + """Install Service for entry.""" + # don't take any actions for mode='manual' + if entry.get('mode', 'default') == 'manual': + self.logger.info("Service %s mode set to manual. Skipping " + "installation." % (entry.get('name'))) + return False + if entry.get('status') == 'on': + pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0] + elif entry.get('status') == 'off': + pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0] + # pstatus is true if command failed + return not pstatus + + def FindExtra(self): + """Locate extra Upstart services.""" + specified = [entry.get('name') for entry in self.getSupportedEntries()] + extra = [] + for name in [self.svcre.match(fname).group('name') for fname in + glob.glob("/etc/init/*.conf") \ + if self.svcre.match(fname).group('name') not in specified]: + extra.append(name) + return [Bcfg2.Client.XML.Element('Service', type='upstart', name=name) \ + for name in extra] diff --git a/src/lib/Bcfg2/Client/Tools/VCS.py b/src/lib/Bcfg2/Client/Tools/VCS.py new file mode 100644 index 000000000..e6081dc1c --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/VCS.py @@ -0,0 +1,150 @@ +"""VCS support.""" + +# TODO: +# * git_write_index +# * add svn support +# * integrate properly with reports +missing = [] + +import os +import shutil +import sys +# python-dulwich git imports +try: + import dulwich + import dulwich.index + from dulwich.errors import NotGitRepository +except: + missing.append('git') +# subversion import +try: + import pysvn +except: + missing.append('svn') + +import Bcfg2.Client.Tools + + +class VCS(Bcfg2.Client.Tools.Tool): + """VCS support.""" + name = 'VCS' + __handles__ = [('Path', 'vcs')] + __req__ = {'Path': ['name', + 'type', + 'vcstype', + 'sourceurl', + 'revision']} + + def __init__(self, logger, cfg, setup): + Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup) + self.cfg = cfg + + def git_write_index(self, entry): + """Write the git index""" + pass + + def Verifygit(self, entry, _): + """Verify git repositories""" + try: + repo = dulwich.repo.Repo(entry.get('name')) + except NotGitRepository: + self.logger.info("Repository %s does not exist" % + entry.get('name')) + return False + cur_rev = repo.head() + + if cur_rev != entry.get('revision'): + self.logger.info("At revision %s need to go to revision %s" % + (cur_rev, entry.get('revision'))) + return False + + return True + + def Installgit(self, entry): + """Checkout contents from a git repository""" + destname = entry.get('name') + if os.path.lexists(destname): + # remove incorrect contents + try: + if os.path.isdir(destname): + shutil.rmtree(destname) + else: + os.remove(destname) + except OSError: + self.logger.info('Failed to remove %s' % \ + destname) + return False + + destr = dulwich.repo.Repo.init(destname, mkdir=True) + cl, host_path = dulwich.client.get_transport_and_path(entry.get('sourceurl')) + remote_refs = cl.fetch(host_path, + destr, + determine_wants=destr.object_store.determine_wants_all, + progress=sys.stdout.write) + destr.refs['refs/heads/master'] = entry.get('revision') + dtree = destr[entry.get('revision')].tree + obj_store = destr.object_store + for fname, mode, sha in obj_store.iter_tree_contents(dtree): + fullpath = os.path.join(destname, fname) + try: + f = open(os.path.join(destname, fname), 'wb') + except IOError: + dir = os.path.split(fullpath)[0] + os.makedirs(dir) + f = open(os.path.join(destname, fname), 'wb') + f.write(destr[sha].data) + f.close() + os.chmod(os.path.join(destname, fname), mode) + return True + # FIXME: figure out how to write the git index properly + #iname = "%s/.git/index" % entry.get('name') + #f = open(iname, 'w+') + #entries = obj_store[sha].iteritems() + #try: + # dulwich.index.write_index(f, entries) + #finally: + # f.close() + + def Verifysvn(self, entry, _): + """Verify svn repositories""" + client = pysvn.Client() + try: + cur_rev = str(client.info(entry.get('name')).revision.number) + except: + self.logger.info("Repository %s does not exist" % entry.get('name')) + return False + + if cur_rev != entry.get('revision'): + self.logger.info("At revision %s need to go to revision %s" % + (cur_rev, entry.get('revision'))) + return False + + return True + + def Installsvn(self, entry): + """Checkout contents from a svn repository""" + try: + client = pysvn.Client.update(entry.get('name'), recurse=True) + except: + self.logger.error("Failed to update repository", exc_info=1) + return False + + return True + + def VerifyPath(self, entry, _): + vcs = entry.get('vcstype') + if vcs in missing: + self.logger.error("Missing %s python libraries. Cannot verify" % + vcs) + return False + ret = getattr(self, 'Verify%s' % vcs) + return ret(entry, _) + + def InstallPath(self, entry): + vcs = entry.get('vcstype') + if vcs in missing: + self.logger.error("Missing %s python libraries. " + "Unable to install" % vcs) + return False + ret = getattr(self, 'Install%s' % vcs) + return ret(entry) diff --git a/src/lib/Bcfg2/Client/Tools/YUM24.py b/src/lib/Bcfg2/Client/Tools/YUM24.py new file mode 100644 index 000000000..4e488b9da --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/YUM24.py @@ -0,0 +1,422 @@ +"""This provides bcfg2 support for yum.""" + +import copy +import os.path +import sys +import yum +import Bcfg2.Client.XML +import Bcfg2.Client.Tools.RPMng +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + +YAD = True +CP = ConfigParser.ConfigParser() +try: + if '-C' in sys.argv: + CP.read([sys.argv[sys.argv.index('-C') + 1]]) + else: + CP.read(['/etc/bcfg2.conf']) + if CP.get('YUMng', 'autodep').lower() == 'false': + YAD = False +except: + pass + +if not hasattr(Bcfg2.Client.Tools.RPMng, 'RPMng'): + raise ImportError + + +def build_yname(pkgname, inst): + """Build yum appropriate package name.""" + ypname = pkgname + if inst.get('version') != 'any': + ypname += '-' + if inst.get('epoch', False): + ypname += "%s:" % inst.get('epoch') + if inst.get('version', False) and inst.get('version') != 'any': + ypname += "%s" % (inst.get('version')) + if inst.get('release', False) and inst.get('release') != 'any': + ypname += "-%s" % (inst.get('release')) + if inst.get('arch', False) and inst.get('arch') != 'any': + ypname += ".%s" % (inst.get('arch')) + return ypname + + +class YUM24(Bcfg2.Client.Tools.RPMng.RPMng): + """Support for Yum packages.""" + pkgtype = 'yum' + + name = 'YUM24' + __execs__ = ['/usr/bin/yum', '/var/lib/rpm'] + __handles__ = [('Package', 'yum'), + ('Package', 'rpm'), + ('Path', 'ignore')] + + __req__ = {'Package': ['name', 'version']} + __ireq__ = {'Package': ['name']} + #__ireq__ = {'Package': ['name', 'version']} + + __new_req__ = {'Package': ['name'], + 'Instance': ['version', 'release', 'arch']} + __new_ireq__ = {'Package': ['name'], \ + 'Instance': []} + #__new_ireq__ = {'Package': ['name', 'uri'], \ + # 'Instance': ['simplefile', 'version', 'release', 'arch']} + + __gpg_req__ = {'Package': ['name', 'version']} + __gpg_ireq__ = {'Package': ['name', 'version']} + + __new_gpg_req__ = {'Package': ['name'], + 'Instance': ['version', 'release']} + __new_gpg_ireq__ = {'Package': ['name'], + 'Instance': ['version', 'release']} + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.RPMng.RPMng.__init__(self, logger, setup, config) + self.__important__ = self.__important__ + \ + [entry.get('name') for struct in config \ + for entry in struct \ + if entry.tag in ['Path', 'ConfigFile'] and \ + (entry.get('name').startswith('/etc/yum.d') \ + or entry.get('name').startswith('/etc/yum.repos.d')) \ + or entry.get('name') == '/etc/yum.conf'] + self.yum_avail = dict() + self.yum_installed = dict() + self.yb = yum.YumBase() + self.yb.doConfigSetup() + self.yb.doTsSetup() + self.yb.doRpmDBSetup() + yup = self.yb.doPackageLists(pkgnarrow='updates') + if hasattr(self.yb.rpmdb, 'pkglist'): + yinst = self.yb.rpmdb.pkglist + else: + yinst = self.yb.rpmdb.getPkgList() + for dest, source in [(self.yum_avail, yup.updates), + (self.yum_installed, yinst)]: + for pkg in source: + if dest is self.yum_avail: + pname = pkg.name + data = {pkg.arch: (pkg.epoch, pkg.version, pkg.release)} + else: + pname = pkg[0] + if pkg[1] is None: + a = 'noarch' + else: + a = pkg[1] + if pkg[2] is None: + e = '0' + else: + e = pkg[2] + data = {a: (e, pkg[3], pkg[4])} + if pname in dest: + dest[pname].update(data) + else: + dest[pname] = dict(data) + + def VerifyPackage(self, entry, modlist): + pinned_version = None + if entry.get('version', False) == 'auto': + # old style entry; synthesize Instances from current installed + if entry.get('name') not in self.yum_installed and \ + entry.get('name') not in self.yum_avail: + # new entry; fall back to default + entry.set('version', 'any') + else: + data = copy.copy(self.yum_installed[entry.get('name')]) + if entry.get('name') in self.yum_avail: + # installed but out of date + data.update(self.yum_avail[entry.get('name')]) + for (arch, (epoch, vers, rel)) in list(data.items()): + x = Bcfg2.Client.XML.SubElement(entry, "Instance", + name=entry.get('name'), + version=vers, arch=arch, + release=rel, epoch=epoch) + if 'verify_flags' in entry.attrib: + x.set('verify_flags', entry.get('verify_flags')) + if 'verify' in entry.attrib: + x.set('verify', entry.get('verify')) + + if entry.get('type', False) == 'yum': + # Check for virtual provides or packages. If we don't have + # this package use Yum to resolve it to a real package name + knownPkgs = list(self.yum_installed.keys()) + list(self.yum_avail.keys()) + if entry.get('name') not in knownPkgs: + # If the package name matches something installed + # or available the that's the correct package. + try: + pkgDict = dict([(i.name, i) for i in \ + self.yb.returnPackagesByDep(entry.get('name'))]) + except yum.Errors.YumBaseError: + e = sys.exc_info()[1] + self.logger.error('Yum Error Depsolving for %s: %s' % \ + (entry.get('name'), str(e))) + pkgDict = {} + + if len(pkgDict) > 1: + # What do we do with multiple packages? + s = "YUMng: returnPackagesByDep(%s) returned many packages" + self.logger.info(s % entry.get('name')) + s = "YUMng: matching packages: %s" + self.logger.info(s % str(list(pkgDict.keys()))) + pkgs = set(pkgDict.keys()) & set(self.yum_installed.keys()) + if len(pkgs) > 0: + # Virtual packages matches an installed real package + pkg = pkgDict[pkgs.pop()] + s = "YUMng: chosing: %s" % pkg.name + self.logger.info(s) + else: + # What's the right package? This will fail verify + # and Yum should Do The Right Thing on package install + pkg = None + elif len(pkgDict) == 1: + pkg = list(pkgDict.values())[0] + else: # len(pkgDict) == 0 + s = "YUMng: returnPackagesByDep(%s) returned no results" + self.logger.info(s % entry.get('name')) + pkg = None + + if pkg is not None: + s = "YUMng: remapping virtual package %s to %s" + self.logger.info(s % (entry.get('name'), pkg.name)) + entry.set('name', pkg.name) + + return Bcfg2.Client.Tools.RPMng.RPMng.VerifyPackage(self, entry, + modlist) + + def Install(self, packages, states): + """ + Try and fix everything that RPMng.VerifyPackages() found wrong for + each Package Entry. This can result in individual RPMs being + installed (for the first time), deleted, downgraded + or upgraded. + + NOTE: YUM can not reinstall a package that it thinks is already + installed. + + packages is a list of Package Elements that has + states[<Package Element>] == False + + The following effects occur: + - states{} is conditionally updated for each package. + - self.installed{} is rebuilt, possibly multiple times. + - self.instance_status{} is conditionally updated for each instance + of a package. + - Each package will be added to self.modified[] if its states{} + entry is set to True. + + """ + self.logger.info('Running YUMng.Install()') + + install_pkgs = [] + gpg_keys = [] + upgrade_pkgs = [] + + # Remove extra instances. + # Can not reverify because we don't have a package entry. + if len(self.extra_instances) > 0: + if (self.setup.get('remove') == 'all' or \ + self.setup.get('remove') == 'packages'): + self.RemovePackages(self.extra_instances) + else: + self.logger.info("The following extra package instances will be removed by the '-r' option:") + for pkg in self.extra_instances: + for inst in pkg: + self.logger.info(" %s %s" % \ + ((pkg.get('name'), self.str_evra(inst)))) + + # Figure out which instances of the packages actually need something + # doing to them and place in the appropriate work 'queue'. + for pkg in packages: + insts = [pinst for pinst in pkg \ + if pinst.tag in ['Instance', 'Package']] + if insts: + for inst in insts: + if self.FixInstance(inst, self.instance_status[inst]): + if self.instance_status[inst].get('installed', False) \ + == False: + if pkg.get('name') == 'gpg-pubkey': + gpg_keys.append(inst) + else: + install_pkgs.append(inst) + elif self.instance_status[inst].get('version_fail', \ + False) == True: + upgrade_pkgs.append(inst) + else: + install_pkgs.append(pkg) + + # Install GPG keys. + # Alternatively specify the required keys using 'gpgkey' in the + # repository definition in yum.conf. YUM will install the keys + # automatically. + if len(gpg_keys) > 0: + for inst in gpg_keys: + self.logger.info("Installing GPG keys.") + if inst.get('simplefile') is None: + self.logger.error("GPG key has no simplefile attribute") + continue + key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ + inst.get('simplefile')) + cmdrc, output = self.cmd.run("rpm --import %s" % key_arg) + if cmdrc != 0: + self.logger.debug("Unable to install %s-%s" % \ + (self.instance_status[inst].get('pkg').get('name'), \ + self.str_evra(inst))) + else: + self.logger.debug("Installed %s-%s-%s" % \ + (self.instance_status[inst].get('pkg').get('name'), \ + inst.get('version'), inst.get('release'))) + self.RefreshPackages() + self.gpg_keyids = self.getinstalledgpg() + pkg = self.instance_status[gpg_keys[0]].get('pkg') + states[pkg] = self.VerifyPackage(pkg, []) + + # Install packages. + if len(install_pkgs) > 0: + self.logger.info("Attempting to install packages") + + if YAD: + pkgtool = "/usr/bin/yum -d0 -y install %s" + else: + pkgtool = "/usr/bin/yum -d0 install %s" + + install_args = [] + for inst in install_pkgs: + pkg_arg = self.instance_status[inst].get('pkg').get('name') + install_args.append(build_yname(pkg_arg, inst)) + + cmdrc, output = self.cmd.run(pkgtool % " ".join(install_args)) + if cmdrc == 0: + # The yum command succeeded. All packages installed. + self.logger.info("Single Pass for Install Succeeded") + self.RefreshPackages() + else: + # The yum command failed. No packages installed. + # Try installing instances individually. + self.logger.error("Single Pass Install of Packages Failed") + installed_instances = [] + for inst in install_pkgs: + pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst) + + cmdrc, output = self.cmd.run(pkgtool % pkg_arg) + if cmdrc == 0: + installed_instances.append(inst) + else: + self.logger.debug("%s %s would not install." % \ + (self.instance_status[inst].get('pkg').get('name'), \ + self.str_evra(inst))) + self.RefreshPackages() + + # Fix upgradeable packages. + if len(upgrade_pkgs) > 0: + self.logger.info("Attempting to upgrade packages") + + if YAD: + pkgtool = "/usr/bin/yum -d0 -y update %s" + else: + pkgtool = "/usr/bin/yum -d0 update %s" + + upgrade_args = [] + for inst in upgrade_pkgs: + pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst) + upgrade_args.append(pkg_arg) + + cmdrc, output = self.cmd.run(pkgtool % " ".join(upgrade_args)) + if cmdrc == 0: + # The yum command succeeded. All packages installed. + self.logger.info("Single Pass for Install Succeeded") + self.RefreshPackages() + else: + # The yum command failed. No packages installed. + # Try installing instances individually. + self.logger.error("Single Pass Install of Packages Failed") + installed_instances = [] + for inst in upgrade_pkgs: + pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst) + cmdrc, output = self.cmd.run(pkgtool % pkg_arg) + if cmdrc == 0: + installed_instances.append(inst) + else: + self.logger.debug("%s %s would not install." % \ + (self.instance_status[inst].get('pkg').get('name'), \ + self.str_evra(inst))) + + self.RefreshPackages() + + if not self.setup['kevlar']: + for pkg_entry in [p for p in packages if self.canVerify(p)]: + self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name'))) + states[pkg_entry] = self.VerifyPackage(pkg_entry, \ + self.modlists.get(pkg_entry, [])) + + for entry in [ent for ent in packages if states[ent]]: + self.modified.append(entry) + + def RemovePackages(self, packages): + """ + Remove specified entries. + + packages is a list of Package Entries with Instances generated + by FindExtraPackages(). + """ + self.logger.debug('Running YUMng.RemovePackages()') + + if YAD: + pkgtool = "/usr/bin/yum -d0 -y erase %s" + else: + pkgtool = "/usr/bin/yum -d0 erase %s" + + erase_args = [] + for pkg in packages: + for inst in pkg: + if pkg.get('name') != 'gpg-pubkey': + pkg_arg = pkg.get('name') + '-' + if inst.get('epoch', False): + pkg_arg = pkg_arg + inst.get('epoch') + ':' + pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release') + if inst.get('arch', False): + pkg_arg = pkg_arg + '.' + inst.get('arch') + erase_args.append(pkg_arg) + else: + pkgspec = {'name': pkg.get('name'), + 'version': inst.get('version'), + 'release': inst.get('release')} + self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ + % (pkgspec.get('name'), self.str_evra(pkgspec))) + self.logger.info(" This package will be deleted in a future version of the RPMng driver.") + + cmdrc, output = self.cmd.run(pkgtool % " ".join(erase_args)) + if cmdrc == 0: + self.modified += packages + for pkg in erase_args: + self.logger.info("Deleted %s" % (pkg)) + else: + self.logger.info("Bulk erase failed with errors:") + self.logger.debug("Erase results = %s" % output) + self.logger.info("Attempting individual erase for each package.") + for pkg in packages: + pkg_modified = False + for inst in pkg: + if pkg.get('name') != 'gpg-pubkey': + pkg_arg = pkg.get('name') + '-' + if 'epoch' in inst.attrib: + pkg_arg = pkg_arg + inst.get('epoch') + ':' + pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release') + if 'arch' in inst.attrib: + pkg_arg = pkg_arg + '.' + inst.get('arch') + else: + self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ + % (pkg.get('name'), self.str_evra(pkg))) + self.logger.info(" This package will be deleted in a future version of the RPMng driver.") + continue + + cmdrc, output = self.cmd.run(self.pkgtool % pkg_arg) + if cmdrc == 0: + pkg_modified = True + self.logger.info("Deleted %s" % pkg_arg) + else: + self.logger.error("unable to delete %s" % pkg_arg) + self.logger.debug("Failure = %s" % output) + if pkg_modified == True: + self.modified.append(pkg) + + self.RefreshPackages() + self.extra = self.FindExtraPackages() diff --git a/src/lib/Bcfg2/Client/Tools/YUMng.py b/src/lib/Bcfg2/Client/Tools/YUMng.py new file mode 100644 index 000000000..244b66cf4 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/YUMng.py @@ -0,0 +1,937 @@ +"""This provides bcfg2 support for yum.""" + +import copy +import os.path +import sys +import yum +import yum.packages +import yum.rpmtrans +import yum.callbacks +import yum.Errors +import yum.misc +import rpmUtils.arch +import Bcfg2.Client.XML +import Bcfg2.Client.Tools +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + + +def build_yname(pkgname, inst): + """Build yum appropriate package name.""" + d = {} + if isinstance(inst, yum.packages.PackageObject): + for i in ['name', 'epoch', 'version', 'release', 'arch']: + d[i] = getattr(inst, i) + else: + d['name'] = pkgname + if inst.get('version') != 'any': + d['version'] = inst.get('version') + if inst.get('epoch', False): + d['epoch'] = inst.get('epoch') + if inst.get('release', False) and inst.get('release') != 'any': + d['release'] = inst.get('release') + if inst.get('arch', False) and inst.get('arch') != 'any': + d['arch'] = inst.get('arch') + return d + + +def short_yname(nevra): + d = nevra.copy() + if 'version' in d: + d['ver'] = d['version'] + del d['version'] + if 'release' in d: + d['rel'] = d['release'] + del d['release'] + return d + + +def nevraString(p): + if isinstance(p, yum.packages.PackageObject): + return str(p) + else: + ret = "" + for i, j in [('epoch', '%s:'), ('name', '%s'), ('version', '-%s'), + ('release', '-%s'), ('arch', '.%s')]: + if i in p: + ret = "%s%s" % (ret, j % p[i]) + return ret + + +class Parser(ConfigParser.ConfigParser): + + def get(self, section, option, default): + """ + Override ConfigParser.get: If the request option is not in the + config file then return the value of default rather than raise + an exception. We still raise exceptions on missing sections. + """ + try: + return ConfigParser.ConfigParser.get(self, section, option) + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + return default + + +class RPMDisplay(yum.rpmtrans.RPMBaseCallback): + """We subclass the default RPM transaction callback so that we + can control Yum's verbosity and pipe it through the right logger.""" + + def __init__(self, logger): + yum.rpmtrans.RPMBaseCallback.__init__(self) + self.logger = logger + self.state = None + self.package = None + + def event(self, package, action, te_current, te_total, + ts_current, ts_total): + """ + @param package: A yum package object or simple string of a package name + @param action: A yum.constant transaction set state or in the obscure + rpm repackage case it could be the string 'repackaging' + @param te_current: Current number of bytes processed in the transaction + element being processed + @param te_total: Total number of bytes in the transaction element being + processed + @param ts_current: number of processes completed in whole transaction + @param ts_total: total number of processes in the transaction. + """ + + if self.package != str(package) or action != self.state: + msg = "%s: %s" % (self.action[action], package) + self.logger.info(msg) + self.state = action + self.package = str(package) + + def scriptout(self, package, msgs): + """Handle output from package scripts.""" + + if msgs: + msg = "%s: %s" % (package, msgs) + self.logger.debug(msg) + + def errorlog(self, msg): + """Deal with error reporting.""" + self.logger.error(msg) + + +class YumDisplay(yum.callbacks.ProcessTransBaseCallback): + """Class to handle display of what step we are in the Yum transaction + such as downloading packages, etc.""" + + def __init__(self, logger): + self.logger = logger + + +class YUMng(Bcfg2.Client.Tools.PkgTool): + """Support for Yum packages.""" + pkgtype = 'yum' + + name = 'YUMng' + __execs__ = [] + __handles__ = [('Package', 'yum'), + ('Package', 'rpm'), + ('Path', 'ignore')] + + __req__ = {'Package': ['name'], + 'Path': ['type']} + __ireq__ = {'Package': ['name']} + + conflicts = ['YUM24', 'RPMng'] + + def __init__(self, logger, setup, config): + self._loadYumBase(setup=setup, logger=logger) + Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + self.ignores = [entry.get('name') for struct in config \ + for entry in struct \ + if entry.tag == 'Path' and \ + entry.get('type') == 'ignore'] + self.instance_status = {} + self.extra_instances = [] + self.modlists = {} + self._loadConfig() + self.__important__ = self.__important__ + \ + [entry.get('name') for struct in config \ + for entry in struct \ + if entry.tag == 'Path' and \ + (entry.get('name').startswith('/etc/yum.d') \ + or entry.get('name').startswith('/etc/yum.repos.d')) \ + or entry.get('name') == '/etc/yum.conf'] + self.yum_avail = dict() + self.yum_installed = dict() + + yup = self.yb.doPackageLists(pkgnarrow='updates') + if hasattr(self.yb.rpmdb, 'pkglist'): + yinst = self.yb.rpmdb.pkglist + else: + yinst = self.yb.rpmdb.getPkgList() + for dest, source in [(self.yum_avail, yup.updates), + (self.yum_installed, yinst)]: + for pkg in source: + if dest is self.yum_avail: + pname = pkg.name + data = [(pkg.arch, (pkg.epoch, pkg.version, pkg.release))] + else: + pname = pkg[0] + data = [(pkg[1], (pkg[2], pkg[3], pkg[4]))] + if pname in dest: + dest[pname].update(data) + else: + dest[pname] = dict(data) + + def _loadYumBase(self, setup=None, logger=None): + ''' this may be called before PkgTool.__init__() is called on + this object (when the YUMng object is first instantiated; + PkgTool.__init__() calls RefreshPackages(), which requires a + YumBase object already exist), or after __init__() has + completed, when we reload the yum config before installing + packages. Consequently, we support both methods by allowing + setup and logger, the only object properties we use in this + function, to be passed as keyword arguments or to be omitted + and drawn from the object itself.''' + self.yb = yum.YumBase() + + if setup is None: + setup = self.setup + if logger is None: + logger = self.logger + + if setup['debug']: + debuglevel = 3 + elif setup['verbose']: + debuglevel = 2 + else: + debuglevel = 0 + + try: + self.yb.preconf.debuglevel = debuglevel + self.yb._getConfig() + except AttributeError: + self.yb._getConfig(self.yb.conf.config_file_path, + debuglevel=debuglevel) + + try: + self.yb.doConfigSetup() + self.yb.doTsSetup() + self.yb.doRpmDBSetup() + except yum.Errors.RepoError: + err = sys.exc_info()[1] + logger.error("YUMng Repository error: %s" % err) + raise Bcfg2.Client.Tools.toolInstantiationError + except Exception: + err = sys.exc_info()[1] + logger.error("YUMng error: %s" % err) + raise Bcfg2.Client.Tools.toolInstantiationError + + def _loadConfig(self): + # Process the YUMng section from the config file. + CP = Parser() + CP.read(self.setup.get('setup')) + truth = ['true', 'yes', '1'] + + # These are all boolean flags, either we do stuff or we don't + self.pkg_checks = CP.get(self.name, "pkg_checks", "true").lower() \ + in truth + self.pkg_verify = CP.get(self.name, "pkg_verify", "true").lower() \ + in truth + self.doInstall = CP.get(self.name, "installed_action", + "install").lower() == "install" + self.doUpgrade = CP.get(self.name, + "version_fail_action", "upgrade").lower() == "upgrade" + self.doReinst = CP.get(self.name, "verify_fail_action", + "reinstall").lower() == "reinstall" + self.verifyFlags = CP.get(self.name, "verify_flags", + "").lower().replace(' ', ',') + + self.installOnlyPkgs = self.yb.conf.installonlypkgs + if 'gpg-pubkey' not in self.installOnlyPkgs: + self.installOnlyPkgs.append('gpg-pubkey') + + self.logger.debug("YUMng: Install missing: %s" \ + % self.doInstall) + self.logger.debug("YUMng: pkg_checks: %s" % self.pkg_checks) + self.logger.debug("YUMng: pkg_verify: %s" % self.pkg_verify) + self.logger.debug("YUMng: Upgrade on version fail: %s" \ + % self.doUpgrade) + self.logger.debug("YUMng: Reinstall on verify fail: %s" \ + % self.doReinst) + self.logger.debug("YUMng: installOnlyPkgs: %s" \ + % str(self.installOnlyPkgs)) + self.logger.debug("YUMng: verify_flags: %s" % self.verifyFlags) + + def _fixAutoVersion(self, entry): + # old style entry; synthesize Instances from current installed + if entry.get('name') not in self.yum_installed and \ + entry.get('name') not in self.yum_avail: + # new entry; fall back to default + entry.set('version', 'any') + else: + data = copy.copy(self.yum_installed[entry.get('name')]) + if entry.get('name') in self.yum_avail: + # installed but out of date + data.update(self.yum_avail[entry.get('name')]) + for (arch, (epoch, vers, rel)) in list(data.items()): + x = Bcfg2.Client.XML.SubElement(entry, "Instance", + name=entry.get('name'), + version=vers, arch=arch, + release=rel, epoch=epoch) + if 'verify_flags' in entry.attrib: + x.set('verify_flags', entry.get('verify_flags')) + if 'verify' in entry.attrib: + x.set('verify', entry.get('verify')) + + def _buildInstances(self, entry): + instances = [inst for inst in entry \ + if inst.tag == 'Instance' or inst.tag == 'Package'] + + # XXX: Uniquify instances. Cases where duplicates are returned. + # However, the elements aren't comparable. + + if instances == []: + # We have an old style no Instance entry. Convert it to new style. + instance = Bcfg2.Client.XML.SubElement(entry, 'Package') + for attrib in list(entry.attrib.keys()): + instance.attrib[attrib] = entry.attrib[attrib] + instances = [instance] + + return instances + + def _getGPGKeysAsPackages(self): + """Return a list of the GPG RPM signing keys installed on the + system as a list of Package Objects.""" + + # XXX GPG keys existing in the RPMDB have numbered days + # and newer Yum versions will not return information about them + if hasattr(self.yb.rpmdb, 'returnGPGPubkeyPackages'): + return self.yb.rpmdb.returnGPGPubkeyPackages() + return self.yb.rpmdb.searchNevra(name='gpg-pubkey') + + def _verifyHelper(self, po): + # This code primarly deals with a yum bug where the PO.verify() + # method does not properly take into count multilib sharing of files. + # Neither does RPM proper, really....it just ignores the problem. + def verify(p): + # disabling file checksums is a new feature yum 3.2.17-ish + try: + vResult = p.verify(fast=self.setup.get('quick', False)) + except TypeError: + # Older Yum API + vResult = p.verify() + return vResult + + key = (po.name, po.epoch, po.version, po.release, po.arch) + if key in self.verifyCache: + results = self.verifyCache[key] + else: + results = verify(po) + self.verifyCache[key] = results + if not rpmUtils.arch.isMultiLibArch(): + return results + + # Okay deal with a buggy yum multilib and verify + packages = self.yb.rpmdb.searchNevra(name=po.name, epoch=po.epoch, + ver=po.version, rel=po.release) # find all arches of pkg + if len(packages) == 1: + return results # No mathcing multilib packages + + files = set(po.returnFileEntries()) # Will be the list of common fns + common = {} + for p in packages: + if p != po: + files = files & set(p.returnFileEntries()) + for p in packages: + k = (p.name, p.epoch, p.version, p.release, p.arch) + self.logger.debug("Multilib Verify: comparing %s to %s" \ + % (po, p)) + if k in self.verifyCache: + v = self.verifyCache[k] + else: + v = verify(p) + self.verifyCache[k] = v + + for fn, probs in list(v.items()): + # file problems must exist in ALL multilib packages to be real + if fn in files: + common[fn] = common.get(fn, 0) + 1 + + flag = len(packages) - 1 + for fn, i in list(common.items()): + if i == flag: + # this fn had verify problems in all but one of the multilib + # packages. That means its correct in the package that's + # "on top." Therefore, this is a fake verify problem. + if fn in results: + del results[fn] + + return results + + def RefreshPackages(self): + """ + Creates self.installed{} which is a dict of installed packages. + + The dict items are lists of nevra dicts. This loosely matches the + config from the server and what rpmtools uses to specify pacakges. + + e.g. + + self.installed['foo'] = [ {'name':'foo', 'epoch':None, + 'version':'1', 'release':2, + 'arch':'i386'}, + {'name':'foo', 'epoch':None, + 'version':'1', 'release':2, + 'arch':'x86_64'} ] + """ + + self.installed = {} + packages = self._getGPGKeysAsPackages() + \ + self.yb.rpmdb.returnPackages() + for po in packages: + d = {} + for i in ['name', 'epoch', 'version', 'release', 'arch']: + if i == 'arch' and getattr(po, i) is None: + d[i] = 'noarch' + elif i == 'epoch' and getattr(po, i) is None: + d[i] = '0' + else: + d[i] = getattr(po, i) + self.installed.setdefault(po.name, []).append(d) + + def VerifyPackage(self, entry, modlist, pinned_version=None): + """ + Verify Package status for entry. + Performs the following: + - Checks for the presence of required Package Instances. + - Compares the evra 'version' info against self.installed{}. + - RPM level package verify (rpm --verify). + - Checks for the presence of unrequired package instances. + + Produces the following dict and list for YUMng.Install() to use: + For installs/upgrades/fixes of required instances: + instance_status = { <Instance Element Object>: + { 'installed': True|False, + 'version_fail': True|False, + 'verify_fail': True|False, + 'pkg': <Package Element Object>, + 'modlist': [ <filename>, ... ], + 'verify' : [ <rpm --verify results> ] + }, ...... + } + + For deletions of unrequired instances: + extra_instances = [ <Package Element Object>, ..... ] + + Constructs the text prompts for interactive mode. + """ + + if entry.get('version', False) == 'auto': + self._fixAutoVersion(entry) + + self.logger.debug("Verifying package instances for %s" \ + % entry.get('name')) + + self.verifyCache = {} # Used for checking multilib packages + self.modlists[entry] = modlist + instances = self._buildInstances(entry) + packageCache = [] + package_fail = False + qtext_versions = [] + virtPkg = False + pkg_checks = self.pkg_checks and \ + entry.get('pkg_checks', 'true').lower() == 'true' + pkg_verify = self.pkg_verify and \ + entry.get('pkg_verify', 'true').lower() == 'true' + + if entry.get('name') == 'gpg-pubkey': + POs = self._getGPGKeysAsPackages() + pkg_verify = False # No files here to verify + else: + POs = self.yb.rpmdb.searchNevra(name=entry.get('name')) + if len(POs) == 0: + # Some sort of virtual capability? Try to resolve it + POs = self.yb.rpmdb.searchProvides(entry.get('name')) + if len(POs) > 0: + virtPkg = True + self.logger.info("%s appears to be provided by:" \ + % entry.get('name')) + for p in POs: + self.logger.info(" %s" % p) + + for inst in instances: + nevra = build_yname(entry.get('name'), inst) + snevra = short_yname(nevra) + if nevra in packageCache: + continue # Ignore duplicate instances + else: + packageCache.append(nevra) + + self.logger.debug("Verifying: %s" % nevraString(nevra)) + + # Set some defaults here + stat = self.instance_status.setdefault(inst, {}) + stat['installed'] = True + stat['version_fail'] = False + stat['verify'] = {} + stat['verify_fail'] = False + stat['pkg'] = entry + stat['modlist'] = modlist + verify_flags = inst.get('verify_flags', self.verifyFlags) + verify_flags = verify_flags.lower().replace(' ', ',').split(',') + + if 'arch' in nevra: + # If arch is specified use it to select the package + _POs = [ p for p in POs if p.arch == nevra['arch'] ] + else: + _POs = POs + if len(_POs) == 0: + # Package (name, arch) not installed + self.logger.debug(" %s is not installed" % nevraString(nevra)) + stat['installed'] = False + package_fail = True + qtext_versions.append("I(%s)" % nevra) + continue + + if not pkg_checks: + continue + + # Check EVR + if virtPkg: + self.logger.debug(" Not checking version for virtual package") + _POs = [po for po in POs] # Make a copy + elif entry.get('name') == 'gpg-pubkey': + if 'version' not in nevra: + m = "Skipping verify: gpg-pubkey without an RPM version." + self.logger.warning(m) + continue + if 'release' not in nevra: + m = "Skipping verify: gpg-pubkey without an RPM release." + self.logger.warning(m) + continue + _POs = [p for p in POs if p.version == nevra['version'] \ + and p.release == nevra['release']] + else: + _POs = self.yb.rpmdb.searchNevra(**snevra) + if len(_POs) == 0: + package_fail = True + stat['version_fail'] = True + # Just chose the first pkg for the error message + self.logger.info(" %s: Wrong version installed. " + "Want %s, but have %s" % (entry.get("name"), + nevraString(nevra), + nevraString(POs[0]))) + qtext_versions.append("U(%s)" % str(POs[0])) + continue + + if self.setup.get('quick', False): + # Passed -q on the command line + continue + if not (pkg_verify and \ + inst.get('pkg_verify', 'true').lower() == 'true'): + continue + + # XXX: We ignore GPG sig checking the package as it + # has nothing to do with the individual file hash/size/etc. + # GPG checking the package only eaxmines some header/rpmdb + # wacky-ness, and will not properly detect a compromised rpmdb. + # Yum's verify routine does not support it for that reaosn. + + if len(_POs) > 1: + self.logger.debug(" Verify Instance found many packages:") + for po in _POs: + self.logger.debug(" %s" % str(po)) + + try: + vResult = self._verifyHelper(_POs[0]) + except Exception: + e = sys.exc_info()[1] + # Unknown Yum exception + self.logger.warning(" Verify Exception: %s" % str(e)) + package_fail = True + continue + + # Now take out the Yum specific objects / modlists / unproblmes + ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \ + [ig.get('name') for ig in inst.findall('Ignore')] + \ + self.ignores + for fn, probs in list(vResult.items()): + if fn in modlist: + self.logger.debug(" %s in modlist, skipping" % fn) + continue + if fn in ignores: + self.logger.debug(" %s in ignore list, skipping" % fn) + continue + tmp = [] + for p in probs: + if p.type == 'missing' and os.path.islink(fn): + continue + elif 'no' + p.type in verify_flags: + continue + if p.type not in ['missingok', 'ghost']: + tmp.append((p.type, p.message)) + if tmp != []: + stat['verify'][fn] = tmp + + if stat['verify'] != {}: + stat['verify_fail'] = True + package_fail = True + self.logger.debug("It is suggested that you either manage " + "these files, revert the changes, or ignore " + "false failures:") + self.logger.debug(" Verify Problems:") + for fn, probs in list(stat['verify'].items()): + self.logger.debug(" %s" % fn) + for p in probs: + self.logger.debug(" %s: %s" % p) + + if len(POs) > 0: + # Is this an install only package? We just look at the first one + provides = set([p[0] for p in POs[0].provides] + [POs[0].name]) + install_only = len(set(self.installOnlyPkgs) & provides) > 0 + else: + install_only = False + + if virtPkg or (install_only and not self.setup['kevlar']): + # XXX: virtual capability supplied, we a probably dealing + # with multiple packages of different names. This check + # doesn't make a lot of since in this case + # XXX: install_only: Yum may clean some of these up itself. + # Otherwise having multiple instances of install only packages + # is considered correct + self.extra_instances = None + else: + self.extra_instances = self.FindExtraInstances(entry, POs) + if self.extra_instances is not None: + package_fail = True + + return not package_fail + + def FindExtraInstances(self, entry, POs): + """ + Check for installed instances that are not in the config. + Return a Package Entry with Instances to remove, or None if there + are no Instances to remove. + + """ + if len(POs) == 0: + return None + name = entry.get('name') + extra_entry = Bcfg2.Client.XML.Element('Package', name=name, + type=self.pkgtype) + instances = self._buildInstances(entry) + _POs = [p for p in POs] # Shallow copy + + # Algorythm is sensitive to duplicates, check for them + checked = [] + for inst in instances: + nevra = build_yname(name, inst) + snevra = short_yname(nevra) + pkgs = self.yb.rpmdb.searchNevra(**snevra) + flag = True + if len(pkgs) > 0: + if pkgs[0] in checked: + continue # We've already taken care of this Instance + else: + checked.append(pkgs[0]) + _POs.remove(pkgs[0]) + + for p in _POs: + self.logger.debug(" Extra Instance Found: %s" % str(p)) + Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', + epoch=p.epoch, name=p.name, version=p.version, + release=p.release, arch=p.arch) + + if _POs == []: + return None + else: + return extra_entry + + def FindExtraPackages(self): + """Find extra packages.""" + packages = [e.get('name') for e in self.getSupportedEntries()] + extras = [] + + for p in list(self.installed.keys()): + if p not in packages: + entry = Bcfg2.Client.XML.Element('Package', name=p, + type=self.pkgtype) + for i in self.installed[p]: + inst = Bcfg2.Client.XML.SubElement(entry, + 'Instance', + epoch=i['epoch'], + version=i['version'], + release=i['release'], + arch=i['arch']) + + extras.append(entry) + + return extras + + def _installGPGKey(self, inst, key_file): + """Examine the GPG keys carefully before installation. Avoid + installing duplicate keys. Returns True on successful install.""" + + # RPM Transaction Set + ts = self.yb.rpmdb.readOnlyTS() + + if not os.path.exists(key_file): + self.logger.debug("GPG Key file %s not installed" % key_file) + return False + + rawkey = open(key_file).read() + gpg = yum.misc.getgpgkeyinfo(rawkey) + + ver = yum.misc.keyIdToRPMVer(gpg['keyid']) + rel = yum.misc.keyIdToRPMVer(gpg['timestamp']) + if not (ver == inst.get('version') and rel == inst.get('release')): + self.logger.info("GPG key file %s does not match gpg-pubkey-%s-%s"\ + % (key_file, inst.get('version'), + inst.get('release'))) + return False + + if not yum.misc.keyInstalled(ts, gpg['keyid'], + gpg['timestamp']) == 0: + result = ts.pgpImportPubkey(yum.misc.procgpgkey(rawkey)) + else: + self.logger.debug("gpg-pubkey-%s-%s already installed"\ + % (inst.get('version'), + inst.get('release'))) + return True + + if result != 0: + self.logger.debug("Unable to install %s-%s" % \ + (self.instance_status[inst].get('pkg').get('name'), + self.str_evra(inst))) + return False + else: + self.logger.debug("Installed %s-%s-%s" % \ + (self.instance_status[inst].get('pkg').get('name'), + inst.get('version'), inst.get('release'))) + return True + + def _runYumTransaction(self): + def cleanup(): + self.yb.closeRpmDB() + self.RefreshPackages() + + rDisplay = RPMDisplay(self.logger) + yDisplay = YumDisplay(self.logger) + # Run the Yum Transaction + try: + rescode, restring = self.yb.buildTransaction() + except yum.Errors.YumBaseError: + e = sys.exc_info()[1] + self.logger.error("Yum transaction error: %s" % str(e)) + cleanup() + return + + self.logger.debug("Initial Yum buildTransaction() run said:") + self.logger.debug(" resultcode: %s, msgs: %s" \ + % (rescode, restring)) + + if rescode != 1: + # Transaction built successfully, run it + try: + self.yb.processTransaction(callback=yDisplay, + rpmDisplay=rDisplay) + self.logger.info("Single Pass for Install Succeeded") + except yum.Errors.YumBaseError: + e = sys.exc_info()[1] + self.logger.error("Yum transaction error: %s" % str(e)) + cleanup() + return + else: + # The yum command failed. No packages installed. + # Try installing instances individually. + self.logger.error("Single Pass Install of Packages Failed") + skipBroken = self.yb.conf.skip_broken + self.yb.conf.skip_broken = True + try: + rescode, restring = self.yb.buildTransaction() + if rescode != 1: + self.yb.processTransaction(callback=yDisplay, + rpmDisplay=rDisplay) + self.logger.debug( + "Second pass install did not install all packages") + else: + self.logger.error("Second pass yum install failed.") + self.logger.debug(" %s" % restring) + except yum.Errors.YumBaseError: + e = sys.exc_info()[1] + self.logger.error("Yum transaction error: %s" % str(e)) + + self.yb.conf.skip_broken = skipBroken + + cleanup() + + def Install(self, packages, states): + """ + Try and fix everything that YUMng.VerifyPackages() found wrong for + each Package Entry. This can result in individual RPMs being + installed (for the first time), deleted, downgraded + or upgraded. + + packages is a list of Package Elements that has + states[<Package Element>] == False + + The following effects occur: + - states{} is conditionally updated for each package. + - self.installed{} is rebuilt, possibly multiple times. + - self.instance_status{} is conditionally updated for each instance + of a package. + - Each package will be added to self.modified[] if its states{} + entry is set to True. + + """ + self.logger.debug('Running YUMng.Install()') + + install_pkgs = [] + gpg_keys = [] + upgrade_pkgs = [] + reinstall_pkgs = [] + + def queuePkg(pkg, inst, queue): + if pkg.get('name') == 'gpg-pubkey': + gpg_keys.append(inst) + else: + queue.append(inst) + + # Remove extra instances. + # Can not reverify because we don't have a package entry. + if self.extra_instances is not None and len(self.extra_instances) > 0: + if (self.setup.get('remove') == 'all' or \ + self.setup.get('remove') == 'packages'): + self.RemovePackages(self.extra_instances) + else: + self.logger.info("The following extra package instances will be removed by the '-r' option:") + for pkg in self.extra_instances: + for inst in pkg: + self.logger.info(" %s %s" % \ + ((pkg.get('name'), self.str_evra(inst)))) + + # Figure out which instances of the packages actually need something + # doing to them and place in the appropriate work 'queue'. + for pkg in packages: + insts = [pinst for pinst in pkg \ + if pinst.tag in ['Instance', 'Package']] + if insts: + for inst in insts: + if inst not in self.instance_status: + m = " Asked to install/update package never verified" + p = nevraString(build_yname(pkg.get('name'), inst)) + self.logger.warning("%s: %s" % (m, p)) + continue + status = self.instance_status[inst] + if not status.get('installed', False) and self.doInstall: + queuePkg(pkg, inst, install_pkgs) + elif status.get('version_fail', False) and self.doUpgrade: + queuePkg(pkg, inst, upgrade_pkgs) + elif status.get('verify_fail', False) and self.doReinst: + queuePkg(pkg, inst, reinstall_pkgs) + else: + # Either there was no Install/Version/Verify + # task to be done or the user disabled the actions + # in the configuration. XXX Logging for the latter? + pass + else: + msg = "YUMng: Package tag found where Instance expected: %s" + self.logger.warning(msg % pkg.get('name')) + queuePkg(pkg, pkg, install_pkgs) + + # Install GPG keys. + # Alternatively specify the required keys using 'gpgkey' in the + # repository definition in yum.conf. YUM will install the keys + # automatically. + if len(gpg_keys) > 0: + self.logger.info("Installing GPG keys.") + for inst in gpg_keys: + if inst.get('simplefile') is None: + self.logger.error("GPG key has no simplefile attribute") + continue + key_file = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ + inst.get('simplefile')) + self._installGPGKey(inst, key_file) + + self.RefreshPackages() + pkg = self.instance_status[gpg_keys[0]].get('pkg') + states[pkg] = self.VerifyPackage(pkg, []) + + # We want to reload all Yum configuration in case we've + # deployed new .repo files we should consider + self._loadYumBase() + + # Install packages. + if len(install_pkgs) > 0: + self.logger.info("Attempting to install packages") + + for inst in install_pkgs: + pkg_arg = self.instance_status[inst].get('pkg').get('name') + self.logger.debug("Installing %s" % pkg_arg) + try: + self.yb.install(**build_yname(pkg_arg, inst)) + except yum.Errors.YumBaseError: + yume = sys.exc_info()[1] + self.logger.error("Error installing package %s: %s" % + (pkg_arg, yume)) + + if len(upgrade_pkgs) > 0: + self.logger.info("Attempting to upgrade packages") + + for inst in upgrade_pkgs: + pkg_arg = self.instance_status[inst].get('pkg').get('name') + self.logger.debug("Upgrading %s" % pkg_arg) + try: + self.yb.update(**build_yname(pkg_arg, inst)) + except yum.Errors.YumBaseError: + yume = sys.exc_info()[1] + self.logger.error("Error upgrading package %s: %s" % + (pkg_arg, yume)) + + if len(reinstall_pkgs) > 0: + self.logger.info("Attempting to reinstall packages") + for inst in reinstall_pkgs: + pkg_arg = self.instance_status[inst].get('pkg').get('name') + self.logger.debug("Reinstalling %s" % pkg_arg) + try: + self.yb.reinstall(**build_yname(pkg_arg, inst)) + except yum.Errors.YumBaseError: + yume = sys.exc_info()[1] + self.logger.error("Error reinstalling package %s: %s" % + (pkg_arg, yume)) + + self._runYumTransaction() + + if not self.setup['kevlar']: + for pkg_entry in [p for p in packages if self.canVerify(p)]: + self.logger.debug("Reverifying Failed Package %s" \ + % (pkg_entry.get('name'))) + states[pkg_entry] = self.VerifyPackage(pkg_entry, + self.modlists.get(pkg_entry, [])) + + for entry in [ent for ent in packages if states[ent]]: + self.modified.append(entry) + + def RemovePackages(self, packages): + """ + Remove specified entries. + + packages is a list of Package Entries with Instances generated + by FindExtraPackages(). + """ + self.logger.debug('Running YUMng.RemovePackages()') + + erase_args = [] + for pkg in packages: + for inst in pkg: + nevra = build_yname(pkg.get('name'), inst) + if pkg.get('name') != 'gpg-pubkey': + self.yb.remove(**nevra) + self.modified.append(pkg) + else: + self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s-%s"\ + % (nevra['name'], nevra['version'], nevra['release'])) + self.logger.info(" This package will be deleted in a future version of the YUMng driver.") + + self._runYumTransaction() + self.extra = self.FindExtraPackages() + + def VerifyPath(self, entry, _): + """Do nothing here since we only verify Path type=ignore""" + return True diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py new file mode 100644 index 000000000..c6cb6e239 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/__init__.py @@ -0,0 +1,353 @@ +"""This contains all Bcfg2 Tool modules""" +import os +import stat +import sys +from subprocess import Popen, PIPE +import time + +import Bcfg2.Client.XML + +__all__ = [tool.split('.')[0] \ + for tool in os.listdir(os.path.dirname(__file__)) \ + if tool.endswith(".py") and tool != "__init__.py"] + +drivers = [item for item in __all__ if item not in ['rpmtools']] +default = [item for item in drivers if item not in ['RPM', 'Yum']] + + +class toolInstantiationError(Exception): + """This error is called if the toolset cannot be instantiated.""" + pass + + +class executor: + """This class runs stuff for us""" + + def __init__(self, logger): + self.logger = logger + + def run(self, command): + """Run a command in a pipe dealing with stdout buffer overloads.""" + p = Popen(command, shell=True, bufsize=16384, + stdin=PIPE, stdout=PIPE, close_fds=True) + output = p.communicate()[0] + for line in output.splitlines(): + self.logger.debug('< %s' % line) + return (p.returncode, output.splitlines()) + + +class Tool: + """ + All tools subclass this. It defines all interfaces that need to be defined. + """ + name = 'Tool' + __execs__ = [] + __handles__ = [] + __req__ = {} + __important__ = [] + + def __init__(self, logger, setup, config): + self.__important__ = [entry.get('name') \ + for struct in config for entry in struct \ + if entry.tag == 'Path' and \ + entry.get('important') in ['true', 'True']] + self.setup = setup + self.logger = logger + if not hasattr(self, '__ireq__'): + self.__ireq__ = self.__req__ + self.config = config + self.cmd = executor(logger) + self.modified = [] + self.extra = [] + self.handled = [entry for struct in self.config for entry in struct \ + if self.handlesEntry(entry)] + for filename in self.__execs__: + try: + mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE]) + if mode & stat.S_IEXEC != stat.S_IEXEC: + self.logger.debug("%s: %s not executable" % \ + (self.name, filename)) + raise toolInstantiationError + except OSError: + raise toolInstantiationError + except: + self.logger.debug("%s failed" % filename, exc_info=1) + raise toolInstantiationError + + def BundleUpdated(self, _, states): + """This callback is used when bundle updates occur.""" + return + + def BundleNotUpdated(self, _, states): + """This callback is used when a bundle is not updated.""" + return + + def Inventory(self, states, structures=[]): + """Dispatch verify calls to underlying methods.""" + if not structures: + structures = self.config.getchildren() + mods = self.buildModlist() + for (struct, entry) in [(struct, entry) for struct in structures \ + for entry in struct.getchildren() \ + if self.canVerify(entry)]: + try: + func = getattr(self, "Verify%s" % (entry.tag)) + states[entry] = func(entry, mods) + except: + self.logger.error( + "Unexpected failure of verification method for entry type %s" \ + % (entry.tag), exc_info=1) + self.extra = self.FindExtra() + + def Install(self, entries, states): + """Install all entries in sublist.""" + for entry in entries: + try: + func = getattr(self, "Install%s" % (entry.tag)) + states[entry] = func(entry) + if states[entry]: + self.modified.append(entry) + except: + self.logger.error("Unexpected failure of install method for entry type %s" \ + % (entry.tag), exc_info=1) + + def Remove(self, entries): + """Remove specified extra entries""" + pass + + def getSupportedEntries(self): + """Return a list of supported entries.""" + return [entry for struct in \ + self.config.getchildren() for entry in \ + struct.getchildren() \ + if self.handlesEntry(entry)] + + def handlesEntry(self, entry): + """Return if entry is handled by this tool.""" + return (entry.tag, entry.get('type')) in self.__handles__ + + def buildModlist(self): + '''Build a list of potentially modified POSIX paths for this entry''' + return [entry.get('name') for struct in self.config.getchildren() \ + for entry in struct.getchildren() \ + if entry.tag in ['Ignore', 'Path']] + + def gatherCurrentData(self, entry): + """Default implementation of the information gathering routines.""" + pass + + def canVerify(self, entry): + """Test if entry has enough information to be verified.""" + if not self.handlesEntry(entry): + return False + + if 'failure' in entry.attrib: + self.logger.error("Entry %s:%s reports bind failure: %s" % \ + (entry.tag, + entry.get('name'), + entry.get('failure'))) + return False + + missing = [attr for attr in self.__req__[entry.tag] \ + if attr not in entry.attrib] + if missing: + self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ + % (entry.tag, entry.get('name'))) + self.logger.error("\t... due to absence of %s attribute(s)" % \ + (":".join(missing))) + try: + self.gatherCurrentData(entry) + except: + self.logger.error("Unexpected error in gatherCurrentData", + exc_info=1) + return False + return True + + def FindExtra(self): + """Return a list of extra entries.""" + return [] + + def canInstall(self, entry): + """Test if entry has enough information to be installed.""" + if not self.handlesEntry(entry): + return False + + if 'failure' in entry.attrib: + self.logger.error("Cannot install entry %s:%s with bind failure" % \ + (entry.tag, entry.get('name'))) + return False + + missing = [attr for attr in self.__ireq__[entry.tag] \ + if attr not in entry.attrib or not entry.attrib[attr]] + if missing: + self.logger.error("Incomplete information for entry %s:%s; cannot install" \ + % (entry.tag, entry.get('name'))) + self.logger.error("\t... due to absence of %s attribute" % \ + (":".join(missing))) + return False + return True + + +class PkgTool(Tool): + """ + PkgTool provides a one-pass install with + fallback for use with packaging systems + """ + pkgtool = ('echo %s', ('%s', ['name'])) + pkgtype = 'echo' + name = 'PkgTool' + + def __init__(self, logger, setup, config): + Tool.__init__(self, logger, setup, config) + self.installed = {} + self.Remove = self.RemovePackages + self.FindExtra = self.FindExtraPackages + self.RefreshPackages() + + def VerifyPackage(self, dummy, _): + """Dummy verification method""" + return False + + def Install(self, packages, states): + """ + Run a one-pass install, followed by + single pkg installs in case of failure. + """ + self.logger.info("Trying single pass package install for pkgtype %s" % \ + self.pkgtype) + + data = [tuple([pkg.get(field) for field in self.pkgtool[1][1]]) + for pkg in packages] + pkgargs = " ".join([self.pkgtool[1][0] % datum for datum in data]) + + self.logger.debug("Installing packages: :%s:" % pkgargs) + self.logger.debug("Running command ::%s::" % (self.pkgtool[0] % pkgargs)) + + cmdrc = self.cmd.run(self.pkgtool[0] % pkgargs)[0] + if cmdrc == 0: + self.logger.info("Single Pass Succeded") + # set all package states to true and flush workqueues + pkgnames = [pkg.get('name') for pkg in packages] + for entry in [entry for entry in list(states.keys()) + if entry.tag == 'Package' + and entry.get('type') == self.pkgtype + and entry.get('name') in pkgnames]: + self.logger.debug('Setting state to true for pkg %s' % \ + (entry.get('name'))) + states[entry] = True + self.RefreshPackages() + else: + self.logger.error("Single Pass Failed") + # do single pass installs + self.RefreshPackages() + for pkg in packages: + # handle state tracking updates + if self.VerifyPackage(pkg, []): + self.logger.info("Forcing state to true for pkg %s" % \ + (pkg.get('name'))) + states[pkg] = True + else: + self.logger.info("Installing pkg %s version %s" % + (pkg.get('name'), pkg.get('version'))) + cmdrc = self.cmd.run(self.pkgtool[0] % + (self.pkgtool[1][0] % + tuple([pkg.get(field) for field in self.pkgtool[1][1]]))) + if cmdrc[0] == 0: + states[pkg] = True + else: + self.logger.error("Failed to install package %s" % \ + (pkg.get('name'))) + self.RefreshPackages() + for entry in [ent for ent in packages if states[ent]]: + self.modified.append(entry) + + def RefreshPackages(self): + """Dummy state refresh method.""" + pass + + def RemovePackages(self, packages): + """Dummy implementation of package removal method.""" + pass + + def FindExtraPackages(self): + """Find extra packages.""" + packages = [entry.get('name') for entry in self.getSupportedEntries()] + extras = [data for data in list(self.installed.items()) \ + if data[0] not in packages] + return [Bcfg2.Client.XML.Element('Package', name=name, \ + type=self.pkgtype, version=version) \ + for (name, version) in extras] + + +class SvcTool(Tool): + """This class defines basic Service behavior""" + name = 'SvcTool' + + def __init__(self, logger, setup, config): + Tool.__init__(self, logger, setup, config) + self.restarted = [] + + def get_svc_command(self, service, action): + """Return the basename of the command used to start/stop services.""" + return '/etc/init.d/%s %s' % (service.get('name'), action) + + def start_service(self, service): + self.logger.debug('Starting service %s' % service.get('name')) + return self.cmd.run(self.get_svc_command(service, 'start'))[0] + + def stop_service(self, service): + self.logger.debug('Stopping service %s' % service.get('name')) + return self.cmd.run(self.get_svc_command(service, 'stop'))[0] + + def restart_service(self, service): + self.logger.debug('Restarting service %s' % service.get('name')) + restart_target = service.get('target', 'restart') + return self.cmd.run(self.get_svc_command(service, restart_target))[0] + + def check_service(self, service): + # not supported for this driver + return 0 + + def Remove(self, services): + """ Dummy implementation of service removal method """ + if self.setup['servicemode'] != 'disabled': + for entry in services: + entry.set("status", "off") + self.InstallService(entry) + + def BundleUpdated(self, bundle, states): + """The Bundle has been updated.""" + if self.setup['servicemode'] == 'disabled': + return + + for entry in [ent for ent in bundle if self.handlesEntry(ent)]: + mode = entry.get('mode', 'default') + if (mode == 'manual' or + (mode == 'interactive_only' and + not self.setup['interactive'])): + continue + # need to handle servicemode = (build|default) + # need to handle mode = (default|supervised) + rc = None + if entry.get('status') == 'on': + if self.setup['servicemode'] == 'build': + rc = self.stop_service(entry) + elif entry.get('name') not in self.restarted: + if self.setup['interactive']: + prompt = ('Restart service %s?: (y/N): ' % + entry.get('name')) + # py3k compatibility + try: + ans = raw_input(prompt) + except NameError: + ans = input(prompt) + if ans not in ['y', 'Y']: + continue + rc = self.restart_service(entry) + if not rc: + self.restarted.append(entry.get('name')) + else: + rc = self.stop_service(entry) + if rc: + self.logger.error("Failed to manipulate service %s" % + (entry.get('name'))) diff --git a/src/lib/Bcfg2/Client/Tools/launchd.py b/src/lib/Bcfg2/Client/Tools/launchd.py new file mode 100644 index 000000000..c022d32ae --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/launchd.py @@ -0,0 +1,150 @@ +"""launchd support for Bcfg2.""" + +import os + +import Bcfg2.Client.Tools + + +class launchd(Bcfg2.Client.Tools.Tool): + """Support for Mac OS X launchd services.""" + __handles__ = [('Service', 'launchd')] + __execs__ = ['/bin/launchctl', '/usr/bin/defaults'] + name = 'launchd' + __req__ = {'Service': ['name', 'status']} + + ''' + Currently requires the path to the plist to load/unload, + and Name is acually a reverse-fqdn (or the label). + ''' + + def __init__(self, logger, setup, config): + Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + + '''Locate plist file that provides given reverse-fqdn name + /Library/LaunchAgents Per-user agents provided by the administrator. + /Library/LaunchDaemons System wide daemons provided by the administrator. + /System/Library/LaunchAgents Mac OS X Per-user agents. + /System/Library/LaunchDaemons Mac OS X System wide daemons.''' + plistLocations = ["/Library/LaunchDaemons", + "/System/Library/LaunchDaemons"] + self.plistMapping = {} + for directory in plistLocations: + for daemon in os.listdir(directory): + try: + if daemon.endswith(".plist"): + d = daemon[:-6] + else: + d = daemon + label = self.cmd.run('defaults read %s/%s Label' % + (directory, d))[1][0] + self.plistMapping[label] = "%s/%s" % (directory, daemon) + except KeyError: + self.logger.warning("Could not get label from %s/%s" % + (directory, daemon)) + + def FindPlist(self, entry): + return self.plistMapping.get(entry.get('name'), None) + + def os_version(self): + version = "" + try: + vers = self.cmd.run('sw_vers')[1] + except: + return version + + for line in vers: + if line.startswith("ProductVersion"): + version = line.split()[-1] + return version + + def VerifyService(self, entry, _): + """Verify launchd service entry.""" + try: + services = self.cmd.run("/bin/launchctl list")[1] + except IndexError: + # happens when no services are running (should be never) + services = [] + # launchctl output changed in 10.5 + # It is now three columns, with the last + # column being the name of the # service + version = self.os_version() + if version.startswith('10.5') or version.startswith('10.6'): + services = [s.split()[-1] for s in services] + if entry.get('name') in services: + # doesn't check if non-spawning services are Started + return entry.get('status') == 'on' + else: + self.logger.debug("Launchd: Didn't find service Loaded " + "(launchd running under same user as bcfg)") + return entry.get('status') == 'off' + + try: + # Perhaps add the "-w" flag to load and + # unload to modify the file itself! + self.cmd.run("/bin/launchctl load -w %s" % self.FindPlist(entry)) + except IndexError: + return 'on' + return False + + def InstallService(self, entry): + """Enable or disable launchd item.""" + # don't take any actions for mode='manual' + if entry.get('mode', 'default') == 'manual': + self.logger.info("Service %s mode set to manual. Skipping " + "installation." % (entry.get('name'))) + return False + name = entry.get('name') + if entry.get('status') == 'on': + self.logger.error("Installing service %s" % name) + cmdrc = self.cmd.run("/bin/launchctl load -w %s" % + self.FindPlist(entry)) + cmdrc = self.cmd.run("/bin/launchctl start %s" % name) + else: + self.logger.error("Uninstalling service %s" % name) + cmdrc = self.cmd.run("/bin/launchctl stop %s" % name) + cmdrc = self.cmd.run("/bin/launchctl unload -w %s" % + self.FindPlist(entry)) + return cmdrc[0] == 0 + + def Remove(self, svcs): + """Remove Extra launchd entries.""" + pass + + def FindExtra(self): + """Find Extra launchd services.""" + try: + allsrv = self.cmd.run("/bin/launchctl list")[1] + except IndexError: + allsrv = [] + + [allsrv.remove(svc) for svc in [entry.get("name") for entry + in self.getSupportedEntries()] if svc in allsrv] + return [Bcfg2.Client.XML.Element("Service", + type='launchd', + name=name, + status='on') for name in allsrv] + + def BundleUpdated(self, bundle, states): + """Reload launchd plist.""" + for entry in [entry for entry in bundle if self.handlesEntry(entry)]: + if not self.canInstall(entry): + self.logger.error("Insufficient information to restart service %s" % + (entry.get('name'))) + else: + name = entry.get('name') + if entry.get('status') == 'on' and self.FindPlist(entry): + self.logger.info("Reloading launchd service %s" % name) + # stop? + self.cmd.run("/bin/launchctl stop %s" % name) + # what if it disappeared? how do we stop services + # that are currently running but the plist disappeared?! + self.cmd.run("/bin/launchctl unload -w %s" % + (self.FindPlist(entry))) + self.cmd.run("/bin/launchctl load -w %s" % + (self.FindPlist(entry))) + self.cmd.run("/bin/launchctl start %s" % name) + else: + # only if necessary.... + self.cmd.run("/bin/launchctl stop %s" % name) + self.cmd.run("/bin/launchctl unload -w %s" % + (self.FindPlist(entry))) diff --git a/src/lib/Bcfg2/Client/Tools/rpmtools.py b/src/lib/Bcfg2/Client/Tools/rpmtools.py new file mode 100755 index 000000000..7441b2c06 --- /dev/null +++ b/src/lib/Bcfg2/Client/Tools/rpmtools.py @@ -0,0 +1,1114 @@ +#!/usr/bin/env python +""" + Module that uses rpm-python to implement the following rpm + functionality for the bcfg2 RPM and YUM client drivers: + + rpm -qa + rpm --verify + rpm --erase + + The code closely follows the rpm C code. + + The code was written to be used in the bcfg2 RPM/YUM drivers. + + Some command line options have been provided to assist with + testing and development, but the output isn't pretty and looks + nothing like rpm output. + + Run 'rpmtools' -h for the options. + +""" + +import grp +import optparse +import os +import pwd +import rpm +import stat +import sys +if sys.version_info >= (2, 5): + import hashlib + py24compat = False +else: + # FIXME: Remove when client python dep is 2.5 or greater + py24compat = True + import md5 + +# Determine what prelink tools we have available. +# The isprelink module is a python extension that examines the ELF headers +# to see if the file has been prelinked. If it is not present a lot of files +# are unnecessarily run through the prelink command. +try: + from isprelink import * + isprelink_imported = True +except ImportError: + isprelink_imported = False + #print '*********************** isprelink not loaded ***********************' + +# If the prelink command is installed on the system then we need to do +# prelink -y on files. +if os.access('/usr/sbin/prelink', os.X_OK): + prelink_exists = True +else: + prelink_exists = False + +# If we don't have isprelink then we will use the prelink configuration file to +# filter what we have to put through prelink -y. +import re +blacklist = [] +whitelist = [] +try: + f = open('/etc/prelink.conf', mode='r') + for line in f: + if line.startswith('#'): + continue + option, pattern = line.split() + if pattern.startswith('*.'): + pattern = pattern.replace('*.', '\.') + pattern += '$' + elif pattern.startswith('/'): + pattern = '^' + pattern + if option == '-b': + blacklist.append(pattern) + elif option == '-l': + whitelist.append(pattern) + f.close() +except IOError: + pass + +blacklist_re = re.compile('|'.join(blacklist)) +whitelist_re = re.compile('|'.join(whitelist)) + +# Flags that are not defined in rpm-python. +# They are defined in lib/rpmcli.h +# Bit(s) for verifyFile() attributes. +# +RPMVERIFY_NONE = 0 # /*!< */ +RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */ +RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */ +RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */ +RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */ +RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */ +RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */ +RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */ +RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */ +RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */ +RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */ +RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */ +RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */ +RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */ + +RPMVERIFY_FAILURES = \ + (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \ + RPMVERIFY_LGETFILECONFAIL) + +# Bit(s) to control rpm_verify() operation. +# +VERIFY_DEFAULT = 0, # /*!< */ +VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */ +VERIFY_SIZE = 1 << 1 # /*!< from --nosize */ +VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */ +VERIFY_USER = 1 << 3 # /*!< from --nouser */ +VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */ +VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */ +VERIFY_MODE = 1 << 6 # /*!< from --nomode */ +VERIFY_RDEV = 1 << 7 # /*!< from --nodev */ +# /* bits 8-14 unused, reserved for rpmVerifyAttrs */ +VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */ +VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */ +VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */ +VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */ +VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */ +VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */ +VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */ +VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */ +VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */ +VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */ +VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */ +VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */ +VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */ +# /* bits 28-31 used in rpmVerifyAttrs */ + +# Comes from C cource. lib/rpmcli.h +VERIFY_ATTRS = \ + (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \ + VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS) + +VERIFY_ALL = \ + (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\ + VERIFY_SIGNATURE | VERIFY_HDRCHK) + + +# Some masks for what checks to NOT do on these file types. +# The C code actiually resets these up for every file. +DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \ + RPMVERIFY_LINKTO) + +# These file types all have the same mask, but hopefully this will make the +# code more readable. +FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS + +LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \ + RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP) + +REG_FLAGS = ~(RPMVERIFY_LINKTO) + + +def s_isdev(mode): + """ + Check to see if a file is a device. + + """ + return stat.S_ISBLK(mode) | stat.S_ISCHR(mode) + +def rpmpackagelist(rts): + """ + Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver. + Requires rpmtransactionset() to be run first to get a ts. + Returns a list of pkgspec dicts. + + e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' }, + {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ] + + """ + return [{'name':header[rpm.RPMTAG_NAME], + 'epoch':header[rpm.RPMTAG_EPOCH], + 'version':header[rpm.RPMTAG_VERSION], + 'release':header[rpm.RPMTAG_RELEASE], + 'arch':header[rpm.RPMTAG_ARCH], + 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]} + for header in rts.dbMatch()] + +def getindexbykeyword(index_ts, **kwargs): + """ + Return list of indexs from the rpmdb matching keywords + ex: getHeadersByKeyword(name='foo', version='1', release='1') + + Can be passed any structure that can be indexed by the pkgspec + keyswords as other keys are filtered out. + + """ + lst = [] + name = kwargs.get('name') + if name: + index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name) + else: + index_mi = index_ts.dbMatch() + + if 'epoch' in kwargs: + if kwargs['epoch'] != None and kwargs['epoch'] != 'None': + kwargs['epoch'] = int(kwargs['epoch']) + else: + del(kwargs['epoch']) + + keywords = [key for key in list(kwargs.keys()) \ + if key in ('name', 'epoch', 'version', 'release', 'arch')] + keywords_len = len(keywords) + for hdr in index_mi: + match = 0 + for keyword in keywords: + if hdr[keyword] == kwargs[keyword]: + match += 1 + if match == keywords_len: + lst.append(index_mi.instance()) + del index_mi + return lst + +def getheadersbykeyword(header_ts, **kwargs): + """ + Borrowed parts of this from from Yum. Need to fix it though. + Epoch is not handled right. + + Return list of headers from the rpmdb matching keywords + ex: getHeadersByKeyword(name='foo', version='1', release='1') + + Can be passed any structure that can be indexed by the pkgspec + keyswords as other keys are filtered out. + + """ + lst = [] + name = kwargs.get('name') + if name: + header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name) + else: + header_mi = header_ts.dbMatch() + + if 'epoch' in kwargs: + if kwargs['epoch'] != None and kwargs['epoch'] != 'None': + kwargs['epoch'] = int(kwargs['epoch']) + else: + del(kwargs['epoch']) + + keywords = [key for key in list(kwargs.keys()) \ + if key in ('name', 'epoch', 'version', 'release', 'arch')] + keywords_len = len(keywords) + for hdr in header_mi: + match = 0 + for keyword in keywords: + if hdr[keyword] == kwargs[keyword]: + match += 1 + if match == keywords_len: + lst.append(hdr) + del header_mi + return lst + +def prelink_md5_check(filename): + """ + Checks if a file is prelinked. If it is run it through prelink -y + to get the unprelinked md5 and file size. + + Return 0 if the file was not prelinked, otherwise return the file size. + Always return the md5. + + """ + prelink = False + try: + plf = open(filename, "rb") + except IOError: + return False, 0 + + if prelink_exists: + if isprelink_imported: + plfd = plf.fileno() + if isprelink(plfd): + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + prelink = True + elif whitelist_re.search(filename) and not blacklist_re.search(filename): + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + prelink = True + + fsize = 0 + if py24compat: + chksum = md5.new() + else: + chksum = hashlib.md5() + while 1: + data = plf.read() + if not data: + break + fsize += len(data) + chksum.update(data) + plf.close() + file_md5 = chksum.hexdigest() + if prelink: + return file_md5, fsize + else: + return file_md5, 0 + +def prelink_size_check(filename): + """ + This check is only done if the prelink_md5_check() is not done first. + + Checks if a file is prelinked. If it is run it through prelink -y + to get the unprelinked file size. + + Return 0 if the file was not prelinked, otherwise return the file size. + + """ + fsize = 0 + try: + plf = open(filename, "rb") + except IOError: + return False + + if prelink_exists: + if isprelink_imported: + plfd = plf.fileno() + if isprelink(plfd): + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + + while 1: + data = plf.read() + if not data: + break + fsize += len(data) + + elif whitelist_re.search(filename) and not blacklist_re.search(filename): + # print "***** Warning isprelink extension failed to import ******" + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + + while 1: + data = plf.read() + if not data: + break + fsize += len(data) + + plf.close() + + return fsize + +def debug_verify_flags(vflags): + """ + Decodes the verify flags bits. + """ + if vflags & RPMVERIFY_MD5: + print('RPMVERIFY_MD5') + if vflags & RPMVERIFY_FILESIZE: + print('RPMVERIFY_FILESIZE') + if vflags & RPMVERIFY_LINKTO: + print('RPMVERIFY_LINKTO') + if vflags & RPMVERIFY_USER: + print('RPMVERIFY_USER') + if vflags & RPMVERIFY_GROUP: + print('RPMVERIFY_GROUP') + if vflags & RPMVERIFY_MTIME: + print('RPMVERIFY_MTIME') + if vflags & RPMVERIFY_MODE: + print('RPMVERIFY_MODE') + if vflags & RPMVERIFY_RDEV: + print('RPMVERIFY_RDEV') + if vflags & RPMVERIFY_CONTEXTS: + print('RPMVERIFY_CONTEXTS') + if vflags & RPMVERIFY_READLINKFAIL: + print('RPMVERIFY_READLINKFAIL') + if vflags & RPMVERIFY_READFAIL: + print('RPMVERIFY_READFAIL') + if vflags & RPMVERIFY_LSTATFAIL: + print('RPMVERIFY_LSTATFAIL') + if vflags & RPMVERIFY_LGETFILECONFAIL: + print('RPMVERIFY_LGETFILECONFAIL') + +def debug_file_flags(fflags): + """ + Decodes the file flags bits. + """ + if fflags & rpm.RPMFILE_CONFIG: + print('rpm.RPMFILE_CONFIG') + + if fflags & rpm.RPMFILE_DOC: + print('rpm.RPMFILE_DOC') + + if fflags & rpm.RPMFILE_ICON: + print('rpm.RPMFILE_ICON') + + if fflags & rpm.RPMFILE_MISSINGOK: + print('rpm.RPMFILE_MISSINGOK') + + if fflags & rpm.RPMFILE_NOREPLACE: + print('rpm.RPMFILE_NOREPLACE') + + if fflags & rpm.RPMFILE_GHOST: + print('rpm.RPMFILE_GHOST') + + if fflags & rpm.RPMFILE_LICENSE: + print('rpm.RPMFILE_LICENSE') + + if fflags & rpm.RPMFILE_README: + print('rpm.RPMFILE_README') + + if fflags & rpm.RPMFILE_EXCLUDE: + print('rpm.RPMFILE_EXLUDE') + + if fflags & rpm.RPMFILE_UNPATCHED: + print('rpm.RPMFILE_UNPATCHED') + + if fflags & rpm.RPMFILE_PUBKEY: + print('rpm.RPMFILE_PUBKEY') + +def rpm_verify_file(fileinfo, rpmlinktos, omitmask): + """ + Verify all the files in a package. + + Returns a list of error flags, the file type and file name. The list + entries are strings that are the same as the labels for the bitwise + flags used in the C code. + + """ + (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \ + vflags, fuser, fgroup, fmd5) = fileinfo + + # 1. rpmtsRootDir stuff. What does it do and where to I get it from? + + file_results = [] + flags = vflags + + # Check to see if the file was installed - if not pretend all is ok. + # This is what the rpm C code does! + if fstate != rpm.RPMFILE_STATE_NORMAL: + return file_results + + # Get the installed files stats + try: + lstat = os.lstat(fname) + except OSError: + if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)): + file_results.append('RPMVERIFY_LSTATFAIL') + #file_results.append(fname) + return file_results + + # 5. Contexts? SELinux stuff? + + # Setup what checks to do. This is straight out of the C code. + if stat.S_ISDIR(lstat.st_mode): + flags &= DIR_FLAGS + elif stat.S_ISLNK(lstat.st_mode): + flags &= LINK_FLAGS + elif stat.S_ISFIFO(lstat.st_mode): + flags &= FIFO_FLAGS + elif stat.S_ISCHR(lstat.st_mode): + flags &= CHR_FLAGS + elif stat.S_ISBLK(lstat.st_mode): + flags &= BLK_FLAGS + else: + flags &= REG_FLAGS + + if (fflags & rpm.RPMFILE_GHOST): + flags &= GHOST_FLAGS + + flags &= ~(omitmask | RPMVERIFY_FAILURES) + + # 8. SELinux stuff. + + prelink_size = 0 + if flags & RPMVERIFY_MD5: + prelink_md5, prelink_size = prelink_md5_check(fname) + if prelink_md5 == False: + file_results.append('RPMVERIFY_MD5') + file_results.append('RPMVERIFY_READFAIL') + elif prelink_md5 != fmd5: + file_results.append('RPMVERIFY_MD5') + + if flags & RPMVERIFY_LINKTO: + linkto = os.readlink(fname) + if not linkto: + file_results.append('RPMVERIFY_READLINKFAIL') + file_results.append('RPMVERIFY_LINKTO') + else: + if len(rpmlinktos) == 0 or linkto != rpmlinktos: + file_results.append('RPMVERIFY_LINKTO') + + if flags & RPMVERIFY_FILESIZE: + if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done. + prelink_size = prelink_size_check(fname) + if (prelink_size != 0): # This is a prelinked file. + if (prelink_size != fsize): + file_results.append('RPMVERIFY_FILESIZE') + elif lstat.st_size != fsize: # It wasn't a prelinked file. + file_results.append('RPMVERIFY_FILESIZE') + + if flags & RPMVERIFY_MODE: + metamode = fmode + filemode = lstat.st_mode + + # Comparing the type of %ghost files is meaningless, but perms are ok. + if fflags & rpm.RPMFILE_GHOST: + metamode &= ~0xf000 + filemode &= ~0xf000 + + if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \ + (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)): + file_results.append('RPMVERIFY_MODE') + + if flags & RPMVERIFY_RDEV: + if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or + stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)): + file_results.append('RPMVERIFY_RDEV') + elif (s_isdev(fmode) & s_isdev(lstat.st_mode)): + st_rdev = lstat.st_rdev + if frdev != st_rdev: + file_results.append('RPMVERIFY_RDEV') + + if flags & RPMVERIFY_MTIME: + if lstat.st_mtime != fmtime: + file_results.append('RPMVERIFY_MTIME') + + if flags & RPMVERIFY_USER: + try: + user = pwd.getpwuid(lstat.st_uid)[0] + except KeyError: + user = None + if not user or not fuser or (user != fuser): + file_results.append('RPMVERIFY_USER') + + if flags & RPMVERIFY_GROUP: + try: + group = grp.getgrgid(lstat.st_gid)[0] + except KeyError: + group = None + if not group or not fgroup or (group != fgroup): + file_results.append('RPMVERIFY_GROUP') + + return file_results + +def rpm_verify_dependencies(header): + """ + Check package dependencies. Header is an rpm.hdr. + + Don't like opening another ts to do this, but + it was the only way I could find of clearing the ts + out. + + Have asked on the rpm-maint list on how to do + this the right way (28 Feb 2007). + + ts.check() returns: + + ((name, version, release), (reqname, reqversion), \ + flags, suggest, sense) + + """ + _ts1 = rpmtransactionset() + _ts1.addInstall(header, 'Dep Check', 'i') + dep_errors = _ts1.check() + _ts1.closeDB() + return dep_errors + +def rpm_verify_package(vp_ts, header, verify_options): + """ + Verify a single package specified by header. Header is an rpm.hdr. + + If errors are found it returns a dictionary of errors. + + """ + # Set some transaction level flags. + vsflags = 0 + if 'nodigest' in verify_options: + vsflags |= rpm._RPMVSF_NODIGESTS + if 'nosignature' in verify_options: + vsflags |= rpm._RPMVSF_NOSIGNATURES + ovsflags = vp_ts.setVSFlags(vsflags) + + # Map from the Python options to the rpm bitwise flags. + omitmask = 0 + + if 'nolinkto' in verify_options: + omitmask |= VERIFY_LINKTO + if 'nomd5' in verify_options: + omitmask |= VERIFY_MD5 + if 'nosize' in verify_options: + omitmask |= VERIFY_SIZE + if 'nouser' in verify_options: + omitmask |= VERIFY_USER + if 'nogroup' in verify_options: + omitmask |= VERIFY_GROUP + if 'nomtime' in verify_options: + omitmask |= VERIFY_MTIME + if 'nomode' in verify_options: + omitmask |= VERIFY_MODE + if 'nordev' in verify_options: + omitmask |= VERIFY_RDEV + + omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS) + #print 'omitmask =', omitmask + + package_results = {} + + # Check Signatures and Digests. + # No idea what this might return. Need to break something to see. + # Setting the vsflags above determines what gets checked in the header. + hdr_stat = vp_ts.hdrCheck(header.unload()) + if hdr_stat: + package_results['hdr'] = hdr_stat + + # Check Package Depencies. + if 'nodeps' not in verify_options: + dep_stat = rpm_verify_dependencies(header) + if dep_stat: + package_results['deps'] = dep_stat + + # Check all the package files. + if 'nofiles' not in verify_options: + vp_fi = header.fiFromHeader() + for fileinfo in vp_fi: + # Do not bother doing anything with ghost files. + # This is what RPM does. + if fileinfo[4] & rpm.RPMFILE_GHOST: + continue + + # This is only needed because of an inconsistency in the + # rpm.fi interface. + linktos = vp_fi.FLink() + + file_stat = rpm_verify_file(fileinfo, linktos, omitmask) + + #if len(file_stat) > 0 or options.verbose: + if len(file_stat) > 0: + fflags = fileinfo[4] + if fflags & rpm.RPMFILE_CONFIG: + file_stat.append('c') + elif fflags & rpm.RPMFILE_DOC: + file_stat.append('d') + elif fflags & rpm.RPMFILE_GHOST: + file_stat.append('g') + elif fflags & rpm.RPMFILE_LICENSE: + file_stat.append('l') + elif fflags & rpm.RPMFILE_PUBKEY: + file_stat.append('P') + elif fflags & rpm.RPMFILE_README: + file_stat.append('r') + else: + file_stat.append(' ') + + file_stat.append(fileinfo[0]) # The filename. + package_results.setdefault('files', []).append(file_stat) + + # Run the verify script if there is one. + # Do we want this? + #if 'noscripts' not in verify_options: + # script_stat = rpmVerifyscript() + # if script_stat: + # package_results['script'] = script_stat + + # If there have been any errors, add the package nevra to the result. + if len(package_results) > 0: + package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \ + header[rpm.RPMTAG_EPOCH], \ + header[rpm.RPMTAG_VERSION], \ + header[rpm.RPMTAG_RELEASE], \ + header[rpm.RPMTAG_ARCH])) + else: + package_results = None + + # Put things back the way we found them. + vsflags = vp_ts.setVSFlags(ovsflags) + + return package_results + +def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]): + """ + Requires rpmtransactionset() to be run first to get a ts. + + pkgspec is a dict specifying the package + e.g.: + For a single package + { name='foo', epoch='20', version='1', release='1', arch='x86_64'} + + For all packages + {} + + Or any combination of keywords to select one or more packages to verify. + + options is a list of 'rpm --verify' options. Default is to check everything. + e.g.: + [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature', + 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime', + 'nomode', 'nordev' ] + + Returns a list. One list entry per package. Each list entry is a + dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'. + Entries only get added for the failures. If nothing failed, None is + returned. + + Its all a bit messy and probably needs reviewing. + + [ { 'hdr': [???], + 'deps: [((name, version, release), (reqname, reqversion), + flags, suggest, sense), .... ] + 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ], + ['filename2', 'RPMVERFIY_LSTATFAIL']] + 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] } + { 'hdr': [???], + 'deps: [((name, version, release), (reqname, reqversion), + flags, suggest, sense), .... ] + 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ], + ['filename2', 'RPMVERFIY_LSTATFAIL']] + 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ] + + """ + verify_results = [] + headers = getheadersbykeyword(verify_ts, **verify_pkgspec) + for header in headers: + result = rpm_verify_package(verify_ts, header, verify_options) + if result: + verify_results.append(result) + + return verify_results + +def rpmtransactionset(): + """ + A simple wrapper for rpm.TransactionSet() to keep everthiing together. + Might use it to set some ts level flags later. + + """ + ts = rpm.TransactionSet() + return ts + +class Rpmtscallback(object): + """ + Callback for ts.run(). Used for adding, upgrading and removing packages. + Starting with all possible reasons codes, but bcfg2 will probably only + make use of a few of them. + + Mostly just printing stuff at the moment to understand how the callback + is used. + + """ + def __init__(self): + self.fdnos = {} + + def callback(self, reason, amount, total, key, client_data): + """ + Generic rpmts call back. + """ + if reason == rpm.RPMCALLBACK_INST_OPEN_FILE: + pass + #print 'rpm.RPMCALLBACK_INST_OPEN_FILE' + elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE: + pass + #print 'rpm.RPMCALLBACK_INST_CLOSE_FILE' + elif reason == rpm.RPMCALLBACK_INST_START: + pass + #print 'rpm.RPMCALLBACK_INST_START' + elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \ + reason == rpm.RPMCALLBACK_INST_PROGRESS: + pass + #print 'rpm.RPMCALLBACK_TRANS_PROGRESS or \ + # rpm.RPMCALLBACK_INST_PROGRESS' + elif reason == rpm.RPMCALLBACK_TRANS_START: + pass + #print 'rpm.RPMCALLBACK_TRANS_START' + elif reason == rpm.RPMCALLBACK_TRANS_STOP: + pass + #print 'rpm.RPMCALLBACK_TRANS_STOP' + elif reason == rpm.RPMCALLBACK_REPACKAGE_START: + pass + #print 'rpm.RPMCALLBACK_REPACKAGE_START' + elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: + pass + #print 'rpm.RPMCALLBACK_REPACKAGE_PROGRESS' + elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP: + pass + #print 'rpm.RPMCALLBACK_REPACKAGE_STOP' + elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS: + pass + #print 'rpm.RPMCALLBACK_UNINST_PROGRESS' + elif reason == rpm.RPMCALLBACK_UNINST_START: + pass + #print 'rpm.RPMCALLBACK_UNINST_START' + elif reason == rpm.RPMCALLBACK_UNINST_STOP: + pass + #print 'rpm.RPMCALLBACK_UNINST_STOP' + #print '***Package ', key, ' deleted ***' + # How do we get at this? + # RPM.modified += key + elif reason == rpm.RPMCALLBACK_UNPACK_ERROR: + pass + #print 'rpm.RPMCALLBACK_UNPACK_ERROR' + elif reason == rpm.RPMCALLBACK_CPIO_ERROR: + pass + #print 'rpm.RPMCALLBACK_CPIO_ERROR' + elif reason == rpm.RPMCALLBACK_UNKNOWN: + pass + #print 'rpm.RPMCALLBACK_UNKNOWN' + else: + print('ERROR - Fell through callBack') + + #print reason, amount, total, key, client_data + +def rpm_erase(erase_pkgspecs, erase_flags): + """ + pkgspecs is a list of pkgspec dicts specifying packages + e.g.: + For a single package + { name='foo', epoch='20', version='1', release='1', arch='x86_64'} + + """ + erase_ts_flags = 0 + if 'noscripts' in erase_flags: + erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS + if 'notriggers' in erase_flags: + erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS + if 'repackage' in erase_flags: + erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE + + erase_ts = rpmtransactionset() + erase_ts.setFlags(erase_ts_flags) + + for pkgspec in erase_pkgspecs: + idx_list = getindexbykeyword(erase_ts, **pkgspec) + if len(idx_list) > 1 and not 'allmatches' in erase_flags: + #pass + print('ERROR - Multiple package match for erase', pkgspec) + else: + for idx in idx_list: + erase_ts.addErase(idx) + + #for te in erase_ts: + # print "%s %s:%s-%s.%s" % (te.N(), te.E(), te.V(), te.R(), te.A()) + + erase_problems = [] + if 'nodeps' not in erase_flags: + erase_problems = erase_ts.check() + + if erase_problems == []: + erase_ts.order() + erase_callback = Rpmtscallback() + erase_ts.run(erase_callback.callback, 'Erase') + #else: + # print 'ERROR - Dependency failures on package erase' + # print erase_problems + + erase_ts.closeDB() + del erase_ts + return erase_problems + +def display_verify_file(file_results): + ''' + Display file results similar to rpm --verify. + ''' + filename = file_results[-1] + filetype = file_results[-2] + + result_string = '' + + if 'RPMVERIFY_LSTATFAIL' in file_results: + result_string = 'missing ' + else: + if 'RPMVERIFY_FILESIZE' in file_results: + result_string = result_string + 'S' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_MODE' in file_results: + result_string = result_string + 'M' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_MD5' in file_results: + if 'RPMVERIFY_READFAIL' in file_results: + result_string = result_string + '?' + else: + result_string = result_string + '5' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_RDEV' in file_results: + result_string = result_string + 'D' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_LINKTO' in file_results: + if 'RPMVERIFY_READLINKFAIL' in file_results: + result_string = result_string + '?' + else: + result_string = result_string + 'L' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_USER' in file_results: + result_string = result_string + 'U' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_GROUP' in file_results: + result_string = result_string + 'G' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_MTIME' in file_results: + result_string = result_string + 'T' + else: + result_string = result_string + '.' + + print(result_string + ' ' + filetype + ' ' + filename) + sys.stdout.flush() + +#=============================================================================== +# Some options and output to assist with development and testing. +# These are not intended for normal use. +if __name__ == "__main__": + + p = optparse.OptionParser() + + p.add_option('--name', action='store', \ + default=None, \ + help='''Package name to verify. + + ****************************************** + NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES. + ****************************************** + + The specified operation will be carried out on all + instances of packages that match the package specification + (name, epoch, version, release, arch).''') + + p.add_option('--epoch', action='store', \ + default=None, \ + help='''Package epoch.''') + + p.add_option('--version', action='store', \ + default=None, \ + help='''Package version.''') + + p.add_option('--release', action='store', \ + default=None, \ + help='''Package release.''') + + p.add_option('--arch', action='store', \ + default=None, \ + help='''Package arch.''') + + p.add_option('--erase', '-e', action='store_true', \ + default=None, \ + help='''**************************************************** + REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE + PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT + GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED + INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED + DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN + ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED. + ****************************************************''') + + p.add_option('--list', '-l', action='store_true', \ + help='''List package identity info. rpm -qa ish equivalent + intended for use in RefreshPackages().''') + + p.add_option('--verify', action='store_true', \ + help='''Verify Package(s). Output is only produced after all + packages has been verified. Be patient.''') + + p.add_option('--verbose', '-v', action='store_true', \ + help='''Verbose output for --verify option. Output is the + same as rpm -v --verify.''') + + p.add_option('--nodeps', action='store_true', \ + default=False, \ + help='Do not do dependency testing.') + + p.add_option('--nodigest', action='store_true', \ + help='Do not check package digests.') + + p.add_option('--nofiles', action='store_true', \ + help='Do not do file checks.') + + p.add_option('--noscripts', action='store_true', \ + help='Do not run verification scripts.') + + p.add_option('--nosignature', action='store_true', \ + help='Do not do package signature verification.') + + p.add_option('--nolinkto', action='store_true', \ + help='Do not do symlink tests.') + + p.add_option('--nomd5', action='store_true', \ + help='''Do not do MD5 checksums on files. Note that this does + not work for prelink files yet.''') + + p.add_option('--nosize', action='store_true', \ + help='''Do not do file size tests. Note that this does not work + for prelink files yet.''') + + p.add_option('--nouser', action='store_true', \ + help='Do not check file user ownership.') + + p.add_option('--nogroup', action='store_true', \ + help='Do not check file group ownership.') + + p.add_option('--nomtime', action='store_true', \ + help='Do not check file modification times.') + + p.add_option('--nomode', action='store_true', \ + help='Do not check file modes (permissions).') + + p.add_option('--nordev', action='store_true', \ + help='Do not check device node.') + + p.add_option('--notriggers', action='store_true', \ + help='Do not do not generate triggers on erase.') + + p.add_option('--repackage', action='store_true', \ + help='''Do repackage on erase.i Packages are put + in /var/spool/repackage.''') + + p.add_option('--allmatches', action='store_true', \ + help='''Remove all package instances that match the + pkgspec. + + *************************************************** + NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC + THAT MEANS ALL PACKAGES!!!! + ***************************************************''') + + options, arguments = p.parse_args() + + pkgspec = {} + rpm_options = [] + + if options.nodeps: + rpm_options.append('nodeps') + + if options.nodigest: + rpm_options.append('nodigest') + + if options.nofiles: + rpm_options.append('nofiles') + + if options.noscripts: + rpm_options.append('noscripts') + + if options.nosignature: + rpm_options.append('nosignature') + + if options.nolinkto: + rpm_options.append('nolinkto') + + if options.nomd5: + rpm_options.append('nomd5') + + if options.nosize: + rpm_options.append('nosize') + + if options.nouser: + rpm_options.append('nouser') + + if options.nogroup: + rpm_options.append('nogroup') + + if options.nomtime: + rpm_options.append('nomtime') + + if options.nomode: + rpm_options.append('nomode') + + if options.nordev: + rpm_options.append('nordev') + + if options.repackage: + rpm_options.append('repackage') + + if options.allmatches: + rpm_options.append('allmatches') + + main_ts = rpmtransactionset() + + cmdline_pkgspec = {} + if options.name != 'all': + if options.name: + cmdline_pkgspec['name'] = str(options.name) + if options.epoch: + cmdline_pkgspec['epoch'] = str(options.epoch) + if options.version: + cmdline_pkgspec['version'] = str(options.version) + if options.release: + cmdline_pkgspec['release'] = str(options.release) + if options.arch: + cmdline_pkgspec['arch'] = str(options.arch) + + if options.verify: + results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options) + for r in results: + files = r.get('files', '') + for f in files: + display_verify_file(f) + + elif options.list: + for p in rpmpackagelist(main_ts): + print(p) + + elif options.erase: + if options.name: + rpm_erase([cmdline_pkgspec], rpm_options) + else: + print('You must specify the "--name" option') diff --git a/src/lib/Bcfg2/Client/XML.py b/src/lib/Bcfg2/Client/XML.py new file mode 100644 index 000000000..858479611 --- /dev/null +++ b/src/lib/Bcfg2/Client/XML.py @@ -0,0 +1,36 @@ +'''XML lib compatibility layer for the Bcfg2 client''' + +# library will use lxml, then builtin xml.etree, then ElementTree + +try: + from lxml.etree import Element, SubElement, XML, tostring + from lxml.etree import XMLSyntaxError as ParseError + driver = 'lxml' +except ImportError: + # lxml not available + from xml.parsers.expat import ExpatError as ParseError + try: + import xml.etree.ElementTree + Element = xml.etree.ElementTree.Element + SubElement = xml.etree.ElementTree.SubElement + XML = xml.etree.ElementTree.XML + def tostring(e, encoding=None, xml_declaration=None): + return xml.etree.ElementTree.tostring(e, encoding=encoding) + driver = 'etree-py' + except ImportError: + try: + from elementtree.ElementTree import Element, SubElement, XML, tostring + driver = 'etree' + import elementtree.ElementTree + Element = elementtree.ElementTree.Element + SubElement = elementtree.ElementTree.SubElement + XML = elementtree.ElementTree.XML + def tostring(e, encoding=None, xml_declaration=None): + return elementtree.ElementTree.tostring(e) + + except ImportError: + print("Failed to load lxml, xml.etree and elementtree.ElementTree") + print("Cannot continue") + raise SystemExit(1) + +len([Element, SubElement, XML, tostring, ParseError]) diff --git a/src/lib/Bcfg2/Client/__init__.py b/src/lib/Bcfg2/Client/__init__.py new file mode 100644 index 000000000..6ed37b257 --- /dev/null +++ b/src/lib/Bcfg2/Client/__init__.py @@ -0,0 +1,3 @@ +"""This contains all Bcfg2 Client modules""" + +__all__ = ["Frame", "Tools", "XML"] diff --git a/src/lib/Bcfg2/Component.py b/src/lib/Bcfg2/Component.py new file mode 100644 index 000000000..eb9ea166a --- /dev/null +++ b/src/lib/Bcfg2/Component.py @@ -0,0 +1,284 @@ +"""Cobalt component base.""" + +__all__ = ["Component", "exposed", "automatic", "run_component"] + +import inspect +import logging +import os +import pydoc +import sys +import time +import threading + +import Bcfg2.Logger +from Bcfg2.Statistics import Statistics +from Bcfg2.SSLServer import XMLRPCServer +# Compatibility import +from Bcfg2.Bcfg2Py3k import xmlrpclib, urlparse + +logger = logging.getLogger() + +class NoExposedMethod (Exception): + """There is no method exposed with the given name.""" + +def run_component(component_cls, listen_all, location, daemon, pidfile_name, + to_file, cfile, argv=None, register=True, + state_name=False, cls_kwargs={}, extra_getopt='', time_out=10, + protocol='xmlrpc/ssl', certfile=None, keyfile=None, ca=None): + + # default settings + level = logging.INFO + + logging.getLogger().setLevel(level) + Bcfg2.Logger.setup_logging(component_cls.implementation, + to_console=True, + to_syslog=True, + to_file=to_file, + level=level) + + if daemon: + child_pid = os.fork() + if child_pid != 0: + return + + os.setsid() + + child_pid = os.fork() + if child_pid != 0: + os._exit(0) + + redirect_file = open("/dev/null", "w+") + os.dup2(redirect_file.fileno(), sys.__stdin__.fileno()) + os.dup2(redirect_file.fileno(), sys.__stdout__.fileno()) + os.dup2(redirect_file.fileno(), sys.__stderr__.fileno()) + + os.chdir(os.sep) + + pidfile = open(pidfile_name or "/dev/null", "w") + pidfile.write("%s\n" % os.getpid()) + pidfile.close() + + component = component_cls(cfile=cfile, **cls_kwargs) + up = urlparse(location) + port = tuple(up[1].split(':')) + port = (port[0], int(port[1])) + try: + server = XMLRPCServer(listen_all, + port, + keyfile=keyfile, + certfile=certfile, + register=register, + timeout=time_out, + ca=ca, + protocol=protocol) + except: + logger.error("Server startup failed") + os._exit(1) + server.register_instance(component) + + try: + server.serve_forever() + finally: + server.server_close() + component.shutdown() + +def exposed(func): + """Mark a method to be exposed publically. + + Examples: + class MyComponent (Component): + @expose + def my_method (self, param1, param2): + do_stuff() + + class MyComponent (Component): + def my_method (self, param1, param2): + do_stuff() + my_method = expose(my_method) + + """ + func.exposed = True + return func + +def automatic(func, period=10): + """Mark a method to be run periodically.""" + func.automatic = True + func.automatic_period = period + func.automatic_ts = -1 + return func + +def locking(func): + """Mark a function as being internally thread safe""" + func.locking = True + return func + +def readonly(func): + """Mark a function as read-only -- no data effects in component inst""" + func.readonly = True + return func + +class Component (object): + """Base component. + + Intended to be served as an instance by Cobalt.Component.XMLRPCServer + >>> server = Cobalt.Component.XMLRPCServer(location, keyfile) + >>> component = Cobalt.Component.Component() + >>> server.serve_instance(component) + + Class attributes: + name -- logical component name (e.g., "queue-manager", "process-manager") + implementation -- implementation identifier (e.g., "BlueGene/L", "BlueGene/P") + + Methods: + save -- pickle the component to a file + do_tasks -- perform automatic tasks for the component + + """ + + name = "component" + implementation = "generic" + + def __init__(self, **kwargs): + """Initialize a new component. + + Keyword arguments: + statefile -- file in which to save state automatically + + """ + self.statefile = kwargs.get("statefile", None) + self.logger = logging.getLogger("%s %s" % (self.implementation, self.name)) + self.lock = threading.Lock() + self.instance_statistics = Statistics() + + def do_tasks(self): + """Perform automatic tasks for the component. + + Automatic tasks are member callables with an attribute + automatic == True. + + """ + for name, func in inspect.getmembers(self): + if name == '__call__': + if getattr(func, "automatic", False): + need_to_lock = not getattr(func, 'locking', False) + if (time.time() - func.automatic_ts) > \ + func.automatic_period: + if need_to_lock: + t1 = time.time() + self.lock.acquire() + t2 = time.time() + self.instance_statistics.add_value('component_lock', t2-t1) + try: + mt1 = time.time() + try: + func() + except: + self.logger.error("Automatic method %s failed" \ + % (name), exc_info=1) + finally: + mt2 = time.time() + + if need_to_lock: + self.lock.release() + self.instance_statistics.add_value(name, mt2-mt1) + func.__dict__['automatic_ts'] = time.time() + + def _resolve_exposed_method(self, method_name): + """Resolve an exposed method. + + Arguments: + method_name -- name of the method to resolve + + """ + try: + func = getattr(self, method_name) + except AttributeError: + raise NoExposedMethod(method_name) + if not getattr(func, "exposed", False): + raise NoExposedMethod(method_name) + return func + + def _dispatch(self, method, args, dispatch_dict): + """Custom XML-RPC dispatcher for components. + + method -- XML-RPC method name + args -- tuple of paramaters to method + + """ + need_to_lock = True + if method in dispatch_dict: + method_func = dispatch_dict[method] + else: + try: + method_func = self._resolve_exposed_method(method) + except NoExposedMethod: + self.logger.error("Unknown method %s" % (method)) + raise xmlrpclib.Fault(7, "Unknown method %s" % method) + except Exception: + e = sys.exc_info()[1] + if getattr(e, "log", True): + self.logger.error(e, exc_info=True) + raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e)) + + if getattr(method_func, 'locking', False): + need_to_lock = False + if need_to_lock: + lock_start = time.time() + self.lock.acquire() + lock_done = time.time() + try: + method_start = time.time() + try: + result = method_func(*args) + finally: + method_done = time.time() + if need_to_lock: + self.lock.release() + self.instance_statistics.add_value('component_lock', + lock_done - lock_start) + self.instance_statistics.add_value(method, method_done - method_start) + except xmlrpclib.Fault: + raise + except Exception: + e = sys.exc_info()[1] + if getattr(e, "log", True): + self.logger.error(e, exc_info=True) + raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e)) + return result + + def listMethods(self): + """Custom XML-RPC introspective method list.""" + return [ + name for name, func in inspect.getmembers(self, callable) + if getattr(func, "exposed", False) + ] + listMethods = exposed(listMethods) + + def methodHelp(self, method_name): + """Custom XML-RPC introspective method help. + + Arguments: + method_name -- name of method to get help on + + """ + try: + func = self._resolve_exposed_method(method_name) + except NoExposedMethod: + return "" + return pydoc.getdoc(func) + methodHelp = exposed(methodHelp) + + def get_name(self): + """The name of the component.""" + return self.name + get_name = exposed(get_name) + + def get_implementation(self): + """The implementation of the component.""" + return self.implementation + get_implementation = exposed(get_implementation) + + def get_statistics(self, _): + """Get current statistics about component execution""" + return self.instance_statistics.display() + get_statistics = exposed(get_statistics) diff --git a/src/lib/Bcfg2/Logger.py b/src/lib/Bcfg2/Logger.py new file mode 100644 index 000000000..06aae615e --- /dev/null +++ b/src/lib/Bcfg2/Logger.py @@ -0,0 +1,180 @@ +"""Bcfg2 logging support""" + +import copy +import fcntl +import logging +import logging.handlers +import math +import socket +import struct +import sys +import termios + +logging.raiseExceptions = 0 + + +class TermiosFormatter(logging.Formatter): + """The termios formatter displays output + in a terminal-sensitive fashion. + """ + + def __init__(self, fmt=None, datefmt=None): + logging.Formatter.__init__(self, fmt, datefmt) + if sys.stdout.isatty(): + # now get termios info + try: + self.width = struct.unpack('hhhh', + fcntl.ioctl(0, + termios.TIOCGWINSZ, + "\000" * 8))[1] + if self.width == 0: + self.width = 80 + except: + self.width = 80 + else: + # output to a pipe + self.width = 32768 + + def format(self, record): + '''format a record for display''' + returns = [] + line_len = self.width + if isinstance(record.msg, str): + for line in record.msg.split('\n'): + if len(line) <= line_len: + returns.append(line) + else: + inner_lines = int(math.floor(float(len(line)) / line_len)) + 1 + for i in range(inner_lines): + returns.append("%s" % (line[i * line_len:(i + 1) * line_len])) + elif isinstance(record.msg, list): + if not record.msg: + return '' + record.msg.sort() + msgwidth = self.width + columnWidth = max([len(item) for item in record.msg]) + columns = int(math.floor(float(msgwidth) / (columnWidth + 2))) + lines = int(math.ceil(float(len(record.msg)) / columns)) + for lineNumber in range(lines): + indices = [idx for idx in [(colNum * lines) + lineNumber + for colNum in range(columns)] if idx < len(record.msg)] + format = (len(indices) * (" %%-%ds " % columnWidth)) + returns.append(format % tuple([record.msg[idx] for idx in indices])) + else: + returns.append(str(record.msg)) + if record.exc_info: + returns.append(self.formatException(record.exc_info)) + return '\n'.join(returns) + + +class FragmentingSysLogHandler(logging.handlers.SysLogHandler): + """ + This handler fragments messages into + chunks smaller than 250 characters + """ + + def __init__(self, procname, path, facility): + self.procname = procname + self.unixsocket = False + logging.handlers.SysLogHandler.__init__(self, path, facility) + + def emit(self, record): + """Chunk and deliver records.""" + record.name = self.procname + if isinstance(record.msg, str): + msgs = [] + error = record.exc_info + record.exc_info = None + msgdata = record.msg + while msgdata: + newrec = copy.copy(record) + newrec.msg = msgdata[:250] + msgs.append(newrec) + msgdata = msgdata[250:] + msgs[0].exc_info = error + else: + msgs = [record] + for newrec in msgs: + msg = self.log_format_string % (self.encodePriority(self.facility, + newrec.levelname.lower()), + self.format(newrec)) + try: + self.socket.send(msg.encode('ascii')) + except socket.error: + for i in range(10): + try: + if isinstance(self.address, tuple): + self.socket = socket.socket(socket.AF_INET, + socket.SOCK_DGRAM) + self.socket.connect(self.address) + else: + self._connect_unixsocket(self.address) + break + except socket.error: + continue + try: + self.socket.send("Reconnected to syslog") + self.socket.send(msg) + except: + """ + If we still fail then drop it. Running bcfg2-server as non-root can + trigger permission denied exceptions. + """ + pass + + +def add_console_handler(level): + """Add a logging handler that logs at a level to sys.stdout.""" + console = logging.StreamHandler(sys.stdout) + if level is True: + console.setLevel(logging.DEBUG) + else: + console.setLevel(level) + # tell the handler to use this format + console.setFormatter(TermiosFormatter()) + logging.root.addHandler(console) + + +def add_syslog_handler(procname, syslog_facility): + """Add a logging handler that logs as procname to syslog_facility.""" + try: + try: + syslog = FragmentingSysLogHandler(procname, + '/dev/log', + syslog_facility) + except socket.error: + syslog = FragmentingSysLogHandler(procname, + ('localhost', 514), + syslog_facility) + syslog.setLevel(logging.DEBUG) + syslog.setFormatter(logging.Formatter('%(name)s[%(process)d]: %(message)s')) + logging.root.addHandler(syslog) + except socket.error: + logging.root.error("failed to activate syslogging") + except: + print("Failed to activate syslogging") + + +def add_file_handler(to_file): + """Add a logging handler that logs to to_file.""" + filelog = logging.FileHandler(to_file) + filelog.setLevel(logging.DEBUG) + filelog.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s')) + logging.root.addHandler(filelog) + + +def setup_logging(procname, to_console=True, to_syslog=True, + syslog_facility='daemon', level=0, to_file=None): + """Setup logging for Bcfg2 software.""" + if hasattr(logging, 'already_setup'): + return + + if to_console: + add_console_handler(to_console) + if to_syslog: + add_syslog_handler(procname, syslog_facility) + if to_file is not None: + add_file_handler(to_file) + + logging.root.setLevel(level) + logging.already_setup = True diff --git a/src/lib/Bcfg2/Options.py b/src/lib/Bcfg2/Options.py new file mode 100644 index 000000000..dc14eaf00 --- /dev/null +++ b/src/lib/Bcfg2/Options.py @@ -0,0 +1,399 @@ +"""Option parsing library for utilities.""" + +import getopt +import re +import os +import sys +import shlex +import Bcfg2.Client.Tools +# Compatibility imports +from Bcfg2.Bcfg2Py3k import ConfigParser + +def bool_cook(x): + if x: + return True + else: + return False + +class OptionFailure(Exception): + pass + +DEFAULT_CONFIG_LOCATION = '/etc/bcfg2.conf' #/etc/bcfg2.conf +DEFAULT_INSTALL_PREFIX = '/usr' #/usr + +class Option(object): + cfpath = DEFAULT_CONFIG_LOCATION + __cfp = False + + def getCFP(self): + if not self.__cfp: + self.__cfp = ConfigParser.ConfigParser() + self.__cfp.readfp(open(self.cfpath)) + return self.__cfp + cfp = property(getCFP) + + def get_cooked_value(self, value): + if self.boolean: + return True + if self.cook: + return self.cook(value) + else: + return value + + def __init__(self, desc, default, cmd=False, odesc=False, + env=False, cf=False, cook=False, long_arg=False): + self.desc = desc + self.default = default + self.cmd = cmd + self.long = long_arg + if not self.long: + if cmd and (cmd[0] != '-' or len(cmd) != 2): + raise OptionFailure("Poorly formed command %s" % cmd) + else: + if cmd and (not cmd.startswith('--')): + raise OptionFailure("Poorly formed command %s" % cmd) + self.odesc = odesc + self.env = env + self.cf = cf + self.boolean = False + if not odesc and not cook: + self.boolean = True + self.cook = cook + + def buildHelpMessage(self): + msg = '' + if self.cmd: + if not self.long: + msg = self.cmd.ljust(3) + else: + msg = self.cmd + if self.odesc: + if self.long: + msg = "%-28s" % ("%s=%s" % (self.cmd, self.odesc)) + else: + msg += '%-25s' % (self.odesc) + else: + msg += '%-25s' % ('') + msg += "%s\n" % self.desc + return msg + + def buildGetopt(self): + gstr = '' + if self.long: + return gstr + if self.cmd: + gstr = self.cmd[1] + if self.odesc: + gstr += ':' + return gstr + + def buildLongGetopt(self): + if self.odesc: + return self.cmd[2:]+'=' + else: + return self.cmd[2:] + + def parse(self, opts, rawopts): + if self.cmd and opts: + # Processing getopted data + optinfo = [opt[1] for opt in opts if opt[0] == self.cmd] + if optinfo: + if optinfo[0]: + self.value = self.get_cooked_value(optinfo[0]) + else: + self.value = True + return + if self.cmd and self.cmd in rawopts: + data = rawopts[rawopts.index(self.cmd) + 1] + self.value = self.get_cooked_value(data) + return + # No command line option found + if self.env and self.env in os.environ: + self.value = self.get_cooked_value(os.environ[self.env]) + return + if self.cf: + # FIXME: This is potentially masking a lot of errors + try: + self.value = self.get_cooked_value(self.cfp.get(*self.cf)) + return + except: + pass + # Default value not cooked + self.value = self.default + +class OptionSet(dict): + def __init__(self, *args): + dict.__init__(self, *args) + self.hm = self.buildHelpMessage() + + def buildGetopt(self): + return ''.join([opt.buildGetopt() for opt in list(self.values())]) + + def buildLongGetopt(self): + return [opt.buildLongGetopt() for opt in list(self.values()) if opt.long] + + def buildHelpMessage(self): + if hasattr(self, 'hm'): + return self.hm + hlist = [] # list of _non-empty_ help messages + for opt in list(self.values()): + hm = opt.buildHelpMessage() + if hm != '': + hlist.append(hm) + return ' '.join(hlist) + + def helpExit(self, msg='', code=1): + if msg: + print(msg) + print("Usage:\n %s" % self.buildHelpMessage()) + raise SystemExit(code) + + def parse(self, argv, do_getopt=True): + '''Parse options from command line.''' + if do_getopt: + try: + opts, args = getopt.getopt(argv, self.buildGetopt(), + self.buildLongGetopt()) + except getopt.GetoptError: + err = sys.exc_info()[1] + self.helpExit(err) + if '-h' in argv: + self.helpExit('', 0) + self['args'] = args + for key in list(self.keys()): + if key == 'args': + continue + option = self[key] + if do_getopt: + option.parse(opts, []) + else: + option.parse([], argv) + if hasattr(option, 'value'): + val = option.value + self[key] = val + +def list_split(c_string): + if c_string: + return re.split("\s*,\s*", c_string) + return [] + +def colon_split(c_string): + if c_string: + return c_string.split(':') + return [] + +def get_bool(s): + # these values copied from ConfigParser.RawConfigParser.getboolean + # with the addition of True and False + truelist = ["1", "yes", "True", "true", "on"] + falselist = ["0", "no", "False", "false", "off"] + if s in truelist: + return True + elif s in falselist: + return False + else: + raise ValueError + +# General options +CFILE = Option('Specify configuration file', DEFAULT_CONFIG_LOCATION, cmd='-C', + odesc='<conffile>') +LOCKFILE = Option('Specify lockfile', + "/var/lock/bcfg2.run", + cf=('components', 'lockfile'), + odesc='<Path to lockfile>') +HELP = Option('Print this usage message', False, cmd='-h') +DEBUG = Option("Enable debugging output", False, cmd='-d') +VERBOSE = Option("Enable verbose output", False, cmd='-v') +DAEMON = Option("Daemonize process, storing pid", False, + cmd='-D', odesc="<pidfile>") +INSTALL_PREFIX = Option('Installation location', cf=('server', 'prefix'), + default=DEFAULT_INSTALL_PREFIX, odesc='</path>') +SENDMAIL_PATH = Option('Path to sendmail', cf=('reports', 'sendmailpath'), + default='/usr/lib/sendmail') +INTERACTIVE = Option('Run interactively, prompting the user for each change', + default=False, + cmd='-I', ) +ENCODING = Option('Encoding of cfg files', + default='UTF-8', + cmd='-E', + odesc='<encoding>', + cf=('components', 'encoding')) +PARANOID_PATH = Option('Specify path for paranoid file backups', + default='/var/cache/bcfg2', cf=('paranoid', 'path'), + odesc='<paranoid backup path>') +PARANOID_MAX_COPIES = Option('Specify the number of paranoid copies you want', + default=1, cf=('paranoid', 'max_copies'), + odesc='<max paranoid copies>') +OMIT_LOCK_CHECK = Option('Omit lock check', default=False, cmd='-O') +CORE_PROFILE = Option('profile', + default=False, cmd='-p', ) +FILES_ON_STDIN = Option('Operate on a list of files supplied on stdin', + cmd='--stdin', default=False, long_arg=True) +SCHEMA_PATH = Option('Path to XML Schema files', cmd='--schema', + odesc='<schema path>', + default="%s/share/bcfg2/schemas" % DEFAULT_INSTALL_PREFIX, + long_arg=True) +REQUIRE_SCHEMA = Option("Require property files to have matching schema files", + cmd="--require-schema", default=False, long_arg=True) + +# Metadata options +MDATA_OWNER = Option('Default Path owner', + default='root', cf=('mdata', 'owner'), + odesc='owner permissions') +MDATA_GROUP = Option('Default Path group', + default='root', cf=('mdata', 'group'), + odesc='group permissions') +MDATA_IMPORTANT = Option('Default Path priority (importance)', + default='False', cf=('mdata', 'important'), + odesc='Important entries are installed first') +MDATA_PERMS = Option('Default Path permissions', + '644', cf=('mdata', 'perms'), + odesc='octal permissions') +MDATA_PARANOID = Option('Default Path paranoid setting', + 'false', cf=('mdata', 'paranoid'), + odesc='Path paranoid setting') +MDATA_SENSITIVE = Option('Default Path sensitive setting', + 'false', cf=('mdata', 'sensitive'), + odesc='Path sensitive setting') + +# Server options +SERVER_REPOSITORY = Option('Server repository path', '/var/lib/bcfg2', + cf=('server', 'repository'), cmd='-Q', + odesc='<repository path>') +SERVER_PLUGINS = Option('Server plugin list', cf=('server', 'plugins'), + # default server plugins + default=[ + 'Bundler', + 'Cfg', + 'Metadata', + 'Pkgmgr', + 'Rules', + 'SSHbase', + ], + cook=list_split) +SERVER_MCONNECT = Option('Server Metadata Connector list', cook=list_split, + cf=('server', 'connectors'), default=['Probes'], ) +SERVER_FILEMONITOR = Option('Server file monitor', cf=('server', 'filemonitor'), + default='default', odesc='File monitoring driver') +SERVER_LISTEN_ALL = Option('Listen on all interfaces', + cf=('server', 'listen_all'), + cmd='--listen-all', + default=False, + long_arg=True, + cook=get_bool, + odesc='True|False') +SERVER_LOCATION = Option('Server Location', cf=('components', 'bcfg2'), + default='https://localhost:6789', cmd='-S', + odesc='https://server:port') +SERVER_STATIC = Option('Server runs on static port', cf=('components', 'bcfg2'), + default=False, cook=bool_cook) +SERVER_KEY = Option('Path to SSL key', cf=('communication', 'key'), + default=False, cmd='--ssl-key', odesc='<ssl key>', + long_arg=True) +SERVER_CERT = Option('Path to SSL certificate', default='/etc/bcfg2.key', + cf=('communication', 'certificate'), odesc='<ssl cert>') +SERVER_CA = Option('Path to SSL CA Cert', default=None, + cf=('communication', 'ca'), odesc='<ca cert>') +SERVER_PASSWORD = Option('Communication Password', cmd='-x', odesc='<password>', + cf=('communication', 'password'), default=False) +SERVER_PROTOCOL = Option('Server Protocol', cf=('communication', 'procotol'), + default='xmlrpc/ssl') +# Client options +CLIENT_KEY = Option('Path to SSL key', cf=('communication', 'key'), + default=None, cmd="--ssl-key", odesc='<ssl key>', + long_arg=True) +CLIENT_CERT = Option('Path to SSL certificate', default=None, cmd="--ssl-cert", + cf=('communication', 'certificate'), odesc='<ssl cert>', + long_arg=True) +CLIENT_CA = Option('Path to SSL CA Cert', default=None, cmd="--ca-cert", + cf=('communication', 'ca'), odesc='<ca cert>', + long_arg=True) +CLIENT_SCNS = Option('List of server commonNames', default=None, cmd="--ssl-cns", + cf=('communication', 'serverCommonNames'), + odesc='<commonName1:commonName2>', cook=list_split, + long_arg=True) +CLIENT_PROFILE = Option('Assert the given profile for the host', + default=False, cmd='-p', odesc="<profile>") +CLIENT_RETRIES = Option('The number of times to retry network communication', + default='3', cmd='-R', cf=('communication', 'retries'), + odesc="<retry count>") +CLIENT_DRYRUN = Option('Do not actually change the system', + default=False, cmd='-n', ) +CLIENT_EXTRA_DISPLAY = Option('enable extra entry output', + default=False, cmd='-e', ) +CLIENT_PARANOID = Option('Make automatic backups of config files', + default=False, + cmd='-P', + cook=get_bool, + cf=('client', 'paranoid')) +CLIENT_DRIVERS = Option('Specify tool driver set', cmd='-D', + cf=('client', 'drivers'), + odesc="<driver1,driver2>", cook=list_split, + default=Bcfg2.Client.Tools.default) +CLIENT_CACHE = Option('Store the configuration in a file', + default=False, cmd='-c', odesc="<cache path>") +CLIENT_REMOVE = Option('Force removal of additional configuration items', + default=False, cmd='-r', odesc="<entry type|all>") +CLIENT_BUNDLE = Option('Only configure the given bundle(s)', default=[], + cmd='-b', odesc='<bundle:bundle>', cook=colon_split) +CLIENT_BUNDLEQUICK = Option('only verify/configure the given bundle(s)', default=False, + cmd='-Q') +CLIENT_INDEP = Option('Only configure independent entries, ignore bundles', default=False, + cmd='-z') +CLIENT_KEVLAR = Option('Run in kevlar (bulletproof) mode', default=False, + cmd='-k', ) +CLIENT_DLIST = Option('Run client in server decision list mode', default='none', + cf=('client', 'decision'), + cmd='-l', odesc='<whitelist|blacklist|none>') +CLIENT_FILE = Option('Configure from a file rather than querying the server', + default=False, cmd='-f', odesc='<specification path>') +CLIENT_QUICK = Option('Disable some checksum verification', default=False, + cmd='-q', ) +CLIENT_USER = Option('The user to provide for authentication', default='root', + cmd='-u', cf=('communication', 'user'), odesc='<user>') +CLIENT_SERVICE_MODE = Option('Set client service mode', default='default', + cmd='-s', odesc='<default|disabled|build>') +CLIENT_TIMEOUT = Option('Set the client XML-RPC timeout', default=90, + cmd='-t', cf=('communication', 'timeout'), + odesc='<timeout>') + +# bcfg2-test options +TEST_NOSEOPTS = Option('Options to pass to nosetests', default=[], + cmd='--nose-options', cf=('bcfg2_test', 'nose_options'), + odesc='<opts>', long_arg=True, cook=shlex.split) +TEST_IGNORE = Option('Ignore these entries if they fail to build.', default=[], + cmd='--ignore', + cf=('bcfg2_test', 'ignore_entries'), long_arg=True, + odesc='<Type>:<name>,<Type>:<name>', cook=list_split) + +# APT client tool options +CLIENT_APT_TOOLS_INSTALL_PATH = Option('Apt tools install path', + cf=('APT', 'install_path'), + default='/usr') +CLIENT_APT_TOOLS_VAR_PATH = Option('Apt tools var path', + cf=('APT', 'var_path'), default='/var') +CLIENT_SYSTEM_ETC_PATH = Option('System etc path', cf=('APT', 'etc_path'), + default='/etc') + +# Logging options +LOGGING_FILE_PATH = Option('Set path of file log', default=None, + cmd='-o', odesc='<path>', cf=('logging', 'path')) + +class OptionParser(OptionSet): + """ + OptionParser bootstraps option parsing, + getting the value of the config file + """ + def __init__(self, args): + self.Bootstrap = OptionSet([('configfile', CFILE)]) + self.Bootstrap.parse(sys.argv[1:], do_getopt=False) + if self.Bootstrap['configfile'] != Option.cfpath: + Option.cfpath = self.Bootstrap['configfile'] + Option.__cfp = False + OptionSet.__init__(self, args) + try: + f = open(Option.cfpath, 'r') + f.close() + except IOError: + e = sys.exc_info()[1] + print("Warning! Unable to read specified configuration file: %s" % e) diff --git a/src/lib/Bcfg2/Proxy.py b/src/lib/Bcfg2/Proxy.py new file mode 100644 index 000000000..422d642db --- /dev/null +++ b/src/lib/Bcfg2/Proxy.py @@ -0,0 +1,368 @@ +"""RPC client access to cobalt components. + +Classes: +ComponentProxy -- an RPC client proxy to Cobalt components + +Functions: +load_config -- read configuration files + +""" + +import logging +import re +import socket + +# The ssl module is provided by either Python 2.6 or a separate ssl +# package that works on older versions of Python (see +# http://pypi.python.org/pypi/ssl). If neither can be found, look for +# M2Crypto instead. +try: + import ssl + SSL_LIB = 'py26_ssl' + SSL_ERROR = ssl.SSLError +except ImportError: + from M2Crypto import SSL + import M2Crypto.SSL.Checker + SSL_LIB = 'm2crypto' + SSL_ERROR = SSL.SSLError + + +import sys +import time + +# Compatibility imports +from Bcfg2.Bcfg2Py3k import httplib, xmlrpclib, urlparse + +version = sys.version_info[:2] +has_py23 = version >= (2, 3) +has_py26 = version >= (2, 6) + +__all__ = ["ComponentProxy", + "RetryMethod", + "SSLHTTPConnection", + "XMLRPCTransport"] + + +class ProxyError(Exception): + """ ProxyError provides a consistent reporting interface to + the various xmlrpclib errors that might arise (mainly + ProtocolError and Fault) """ + def __init__(self, err): + msg = None + if isinstance(err, xmlrpclib.ProtocolError): + # cut out the password in the URL + url = re.sub(r'([^:]+):(.*?)@([^@]+:\d+/)', r'\1:******@\3', + err.url) + msg = "XML-RPC Protocol Error for %s: %s (%s)" % (url, + err.errmsg, + err.errcode) + elif isinstance(err, xmlrpclib.Fault): + msg = "XML-RPC Fault: %s (%s)" % (err.faultString, + err.faultCode) + else: + msg = str(err) + Exception.__init__(self, msg) + +class CertificateError(Exception): + def __init__(self, commonName): + self.commonName = commonName + def __str__(self): + return ("Got unallowed commonName %s from server" + % self.commonName) + + +class RetryMethod(xmlrpclib._Method): + """Method with error handling and retries built in.""" + log = logging.getLogger('xmlrpc') + max_retries = 4 + + def __call__(self, *args): + for retry in range(self.max_retries): + try: + return xmlrpclib._Method.__call__(self, *args) + except xmlrpclib.ProtocolError: + err = sys.exc_info()[1] + self.log.error("Server failure: Protocol Error: %s %s" % \ + (err.errcode, err.errmsg)) + raise xmlrpclib.Fault(20, "Server Failure") + except xmlrpclib.Fault: + raise + except socket.error: + err = sys.exc_info()[1] + if hasattr(err, 'errno') and err.errno == 336265218: + self.log.error("SSL Key error") + break + if hasattr(err, 'errno') and err.errno == 185090050: + self.log.error("SSL CA error") + break + if retry == 3: + self.log.error("Server failure: %s" % err) + raise xmlrpclib.Fault(20, err) + except CertificateError: + ce = sys.exc_info()[1] + self.log.error("Got unallowed commonName %s from server" \ + % ce.commonName) + break + except KeyError: + self.log.error("Server disallowed connection") + break + except: + self.log.error("Unknown failure", exc_info=1) + break + time.sleep(0.5) + raise xmlrpclib.Fault(20, "Server Failure") + +# sorry jon +_Method = RetryMethod + + +class SSLHTTPConnection(httplib.HTTPConnection): + """Extension of HTTPConnection that + implements SSL and related behaviors. + """ + + logger = logging.getLogger('Bcfg2.Proxy.SSLHTTPConnection') + + def __init__(self, host, port=None, strict=None, timeout=90, key=None, + cert=None, ca=None, scns=None, protocol='xmlrpc/ssl'): + """Initializes the `httplib.HTTPConnection` object and stores security + parameters + + Parameters + ---------- + host : string + Name of host to contact + port : int, optional + Port on which to contact the host. If none is specified, + the default port of 80 will be used unless the `host` + string has a port embedded in the form host:port. + strict : Boolean, optional + Passed to the `httplib.HTTPConnection` constructor and if + True, causes the `BadStatusLine` exception to be raised if + the status line cannot be parsed as a valid HTTP 1.0 or + 1.1 status. + timeout : int, optional + Causes blocking operations to timeout after `timeout` + seconds. + key : string, optional + The file system path to the local endpoint's SSL key. May + specify the same file as `cert` if using a file that + contains both. See + http://docs.python.org/library/ssl.html#ssl-certificates + for details. Required if using xmlrpc/ssl with client + certificate authentication. + cert : string, optional + The file system path to the local endpoint's SSL + certificate. May specify the same file as `cert` if using + a file that contains both. See + http://docs.python.org/library/ssl.html#ssl-certificates + for details. Required if using xmlrpc/ssl with client + certificate authentication. + ca : string, optional + The file system path to a set of concatenated certificate + authority certs, which are used to validate certificates + passed from the other end of the connection. + scns : array-like, optional + List of acceptable server commonNames. The peer cert's + common name must appear in this list, otherwise the + connect() call will throw a `CertificateError`. + protocol : {'xmlrpc/ssl', 'xmlrpc/tlsv1'}, optional + Communication protocol to use. + + """ + if not has_py26: + httplib.HTTPConnection.__init__(self, host, port, strict) + else: + httplib.HTTPConnection.__init__(self, host, port, strict, timeout) + self.key = key + self.cert = cert + self.ca = ca + self.scns = scns + self.protocol = protocol + self.timeout = timeout + + def connect(self): + """Initiates a connection using previously set attributes.""" + if SSL_LIB == 'py26_ssl': + self._connect_py26ssl() + elif SSL_LIB == 'm2crypto': + self._connect_m2crypto() + else: + raise Exception("No SSL module support") + + def _connect_py26ssl(self): + """Initiates a connection using the ssl module.""" + rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.protocol == 'xmlrpc/ssl': + ssl_protocol_ver = ssl.PROTOCOL_SSLv23 + elif self.protocol == 'xmlrpc/tlsv1': + ssl_protocol_ver = ssl.PROTOCOL_TLSv1 + else: + self.logger.error("Unknown protocol %s" % (self.protocol)) + raise Exception("unknown protocol %s" % self.protocol) + if self.ca: + other_side_required = ssl.CERT_REQUIRED + else: + other_side_required = ssl.CERT_NONE + self.logger.warning("No ca is specified. Cannot authenticate the server with SSL.") + if self.cert and not self.key: + self.logger.warning("SSL cert specfied, but no key. Cannot authenticate this client with SSL.") + self.cert = None + if self.key and not self.cert: + self.logger.warning("SSL key specfied, but no cert. Cannot authenticate this client with SSL.") + self.key = None + + if has_py23: + rawsock.settimeout(self.timeout) + self.sock = ssl.SSLSocket(rawsock, cert_reqs=other_side_required, + ca_certs=self.ca, suppress_ragged_eofs=True, + keyfile=self.key, certfile=self.cert, + ssl_version=ssl_protocol_ver) + self.sock.connect((self.host, self.port)) + peer_cert = self.sock.getpeercert() + if peer_cert and self.scns: + scn = [x[0][1] for x in peer_cert['subject'] if x[0][0] == 'commonName'][0] + if scn not in self.scns: + raise CertificateError(scn) + self.sock.closeSocket = True + + def _connect_m2crypto(self): + """Initiates a connection using the M2Crypto module.""" + + if self.protocol == 'xmlrpc/ssl': + ctx = SSL.Context('sslv23') + elif self.protocol == 'xmlrpc/tlsv1': + ctx = SSL.Context('tlsv1') + else: + self.logger.error("Unknown protocol %s" % (self.protocol)) + raise Exception("unknown protocol %s" % self.protocol) + + if self.ca: + # Use the certificate authority to validate the cert + # presented by the server + ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, depth=9) + if ctx.load_verify_locations(self.ca) != 1: + raise Exception('No CA certs') + else: + self.logger.warning("No ca is specified. Cannot authenticate the server with SSL.") + + if self.cert and self.key: + # A cert/key is defined, use them to support client + # authentication to the server + ctx.load_cert(self.cert, self.key) + elif self.cert: + self.logger.warning("SSL cert specfied, but no key. Cannot authenticate this client with SSL.") + elif self.key: + self.logger.warning("SSL key specfied, but no cert. Cannot authenticate this client with SSL.") + + self.sock = SSL.Connection(ctx) + if re.match('\\d+\\.\\d+\\.\\d+\\.\\d+', self.host): + # host is ip address + try: + hostname = socket.gethostbyaddr(self.host)[0] + except: + # fall back to ip address + hostname = self.host + else: + hostname = self.host + try: + self.sock.connect((hostname, self.port)) + # automatically checks cert matches host + except M2Crypto.SSL.Checker.WrongHost: + wr = sys.exc_info()[1] + raise CertificateError(wr) + + +class XMLRPCTransport(xmlrpclib.Transport): + def __init__(self, key=None, cert=None, ca=None, + scns=None, use_datetime=0, timeout=90): + if hasattr(xmlrpclib.Transport, '__init__'): + xmlrpclib.Transport.__init__(self, use_datetime) + self.key = key + self.cert = cert + self.ca = ca + self.scns = scns + self.timeout = timeout + + def make_connection(self, host): + host, self._extra_headers = self.get_host_info(host)[0:2] + http = SSLHTTPConnection(host, + key=self.key, + cert=self.cert, + ca=self.ca, + scns=self.scns, + timeout=self.timeout) + https = httplib.HTTP() + https._setup(http) + return https + + def request(self, host, handler, request_body, verbose=0): + """Send request to server and return response.""" + h = self.make_connection(host) + + try: + self.send_request(h, handler, request_body) + self.send_host(h, host) + self.send_user_agent(h) + self.send_content(h, request_body) + errcode, errmsg, headers = h.getreply() + except (socket.error, SSL_ERROR): + err = sys.exc_info()[1] + raise ProxyError(xmlrpclib.ProtocolError(host + handler, + 408, + str(err), + self._extra_headers)) + + if errcode != 200: + raise ProxyError(xmlrpclib.ProtocolError(host + handler, + errcode, + errmsg, + headers)) + + self.verbose = verbose + msglen = int(headers.dict['content-length']) + return self._get_response(h.getfile(), msglen) + + def _get_response(self, fd, length): + # read response from input file/socket, and parse it + recvd = 0 + + p, u = self.getparser() + + while recvd < length: + rlen = min(length - recvd, 1024) + response = fd.read(rlen) + recvd += len(response) + if not response: + break + if self.verbose: + print("body:", repr(response), len(response)) + p.feed(response) + + fd.close() + p.close() + + return u.close() + + +def ComponentProxy(url, user=None, password=None, + key=None, cert=None, ca=None, + allowedServerCNs=None, timeout=90): + + """Constructs proxies to components. + + Arguments: + component_name -- name of the component to connect to + + Additional arguments are passed to the ServerProxy constructor. + + """ + + if user and password: + method, path = urlparse(url)[:2] + newurl = "%s://%s:%s@%s" % (method, user, password, path) + else: + newurl = url + ssl_trans = XMLRPCTransport(key, cert, ca, + allowedServerCNs, timeout=float(timeout)) + return xmlrpclib.ServerProxy(newurl, allow_none=True, transport=ssl_trans) diff --git a/src/lib/Bcfg2/SSLServer.py b/src/lib/Bcfg2/SSLServer.py new file mode 100644 index 000000000..418e259cc --- /dev/null +++ b/src/lib/Bcfg2/SSLServer.py @@ -0,0 +1,461 @@ +"""Bcfg2 SSL server.""" + +__all__ = [ + "SSLServer", "XMLRPCRequestHandler", "XMLRPCServer", +] + +import os +import sys +import socket +import base64 +import select +import signal +import logging +import ssl +import threading +import time +import types +# Compatibility imports +from Bcfg2.Bcfg2Py3k import xmlrpclib, SimpleXMLRPCServer, SocketServer + + +class ForkedChild(Exception): + pass + + +class XMLRPCDispatcher (SimpleXMLRPCServer.SimpleXMLRPCDispatcher): + logger = logging.getLogger("Cobalt.Server.XMLRPCDispatcher") + + def __init__(self, allow_none, encoding): + try: + SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self, + allow_none, + encoding) + except: + # Python 2.4? + SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self) + + self.allow_none = allow_none + self.encoding = encoding + + def _marshaled_dispatch(self, address, data): + method_func = None + params, method = xmlrpclib.loads(data) + try: + if '.' not in method: + params = (address, ) + params + response = self.instance._dispatch(method, params, self.funcs) + # py3k compatibility + if type(response) not in [bool, str, list, dict, types.NoneType]: + response = (response.decode('utf-8'), ) + else: + response = (response, ) + raw_response = xmlrpclib.dumps(response, methodresponse=1, + allow_none=self.allow_none, + encoding=self.encoding) + except xmlrpclib.Fault: + fault = sys.exc_info()[1] + raw_response = xmlrpclib.dumps(fault, + allow_none=self.allow_none, + encoding=self.encoding) + except: + self.logger.error("Unexpected handler error", exc_info=1) + # report exception back to server + raw_response = xmlrpclib.dumps( + xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)), + allow_none=self.allow_none, encoding=self.encoding) + return raw_response + + +class SSLServer (SocketServer.TCPServer, object): + + """TCP server supporting SSL encryption. + + Methods: + handshake -- perform a SSL/TLS handshake + + Properties: + url -- A url pointing to this server. + + """ + + allow_reuse_address = True + logger = logging.getLogger("Cobalt.Server.TCPServer") + + def __init__(self, listen_all, server_address, RequestHandlerClass, + keyfile=None, certfile=None, reqCert=False, ca=None, + timeout=None, protocol='xmlrpc/ssl'): + + """Initialize the SSL-TCP server. + + Arguments: + server_address -- address to bind to the server + RequestHandlerClass -- class to handle requests + + Keyword arguments: + keyfile -- private encryption key filename (enables ssl encryption) + certfile -- certificate file (enables ssl encryption) + reqCert -- client must present certificate + timeout -- timeout for non-blocking request handling + + """ + + if listen_all: + listen_address = ('', server_address[1]) + else: + listen_address = (server_address[0], server_address[1]) + try: + SocketServer.TCPServer.__init__(self, listen_address, + RequestHandlerClass) + except socket.error: + self.logger.error("Failed to bind to socket") + raise + + self.timeout = timeout + self.socket.settimeout(timeout) + self.keyfile = keyfile + if keyfile != None: + if keyfile == False or not os.path.exists(keyfile): + self.logger.error("Keyfile %s does not exist" % keyfile) + raise Exception("keyfile doesn't exist") + self.certfile = certfile + if certfile != None: + if certfile == False or not os.path.exists(certfile): + self.logger.error("Certfile %s does not exist" % certfile) + raise Exception("certfile doesn't exist") + self.ca = ca + if ca != None: + if ca == False or not os.path.exists(ca): + self.logger.error("CA %s does not exist" % ca) + raise Exception("ca doesn't exist") + self.reqCert = reqCert + if ca and certfile: + self.mode = ssl.CERT_OPTIONAL + else: + self.mode = ssl.CERT_NONE + if protocol == 'xmlrpc/ssl': + self.ssl_protocol = ssl.PROTOCOL_SSLv23 + elif protocol == 'xmlrpc/tlsv1': + self.ssl_protocol = ssl.PROTOCOL_TLSv1 + else: + self.logger.error("Unknown protocol %s" % (protocol)) + raise Exception("unknown protocol %s" % protocol) + + def get_request(self): + (sock, sockinfo) = self.socket.accept() + sock.settimeout(self.timeout) + sslsock = ssl.wrap_socket(sock, + server_side=True, + certfile=self.certfile, + keyfile=self.keyfile, + cert_reqs=self.mode, + ca_certs=self.ca, + ssl_version=self.ssl_protocol) + return sslsock, sockinfo + + def close_request(self, request): + try: + request.unwrap() + except: + pass + try: + request.close() + except: + pass + + def _get_url(self): + port = self.socket.getsockname()[1] + hostname = socket.gethostname() + protocol = "https" + return "%s://%s:%i" % (protocol, hostname, port) + url = property(_get_url) + + +class XMLRPCRequestHandler (SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): + + """Component XML-RPC request handler. + + Adds support for HTTP authentication. + + Exceptions: + CouldNotAuthenticate -- client did not present acceptable authentication information + + Methods: + authenticate -- prompt a check of a client's provided username and password + handle_one_request -- handle a single rpc (optionally authenticating) + + """ + logger = logging.getLogger("Cobalt.Server.XMLRPCRequestHandler") + + def authenticate(self): + try: + header = self.headers['Authorization'] + except KeyError: + self.logger.error("No authentication data presented") + return False + auth_type, auth_content = header.split() + try: + # py3k compatibility + auth_content = base64.standard_b64decode(auth_content) + except TypeError: + auth_content = base64.standard_b64decode(bytes(auth_content.encode('ascii'))) + try: + # py3k compatibility + try: + username, password = auth_content.split(":") + except TypeError: + username, pw = auth_content.split(bytes(":", encoding='utf-8')) + password = pw.decode('utf-8') + except ValueError: + username = auth_content + password = "" + cert = self.request.getpeercert() + client_address = self.request.getpeername() + return self.server.instance.authenticate(cert, username, + password, client_address) + + def parse_request(self): + """Extends parse_request. + + Optionally check HTTP authentication when parsing. + + """ + if not SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.parse_request(self): + return False + try: + if not self.authenticate(): + self.logger.error("Authentication Failure") + self.send_error(401, self.responses[401][0]) + return False + except: + self.logger.error("Unexpected Authentication Failure", exc_info=1) + self.send_error(401, self.responses[401][0]) + return False + return True + + ### need to override do_POST here + def do_POST(self): + try: + max_chunk_size = 10 * 1024 * 1024 + size_remaining = int(self.headers["content-length"]) + L = [] + while size_remaining: + try: + select.select([self.rfile.fileno()], [], [], 3) + except select.error: + print("got select timeout") + raise + chunk_size = min(size_remaining, max_chunk_size) + L.append(self.rfile.read(chunk_size).decode('utf-8')) + size_remaining -= len(L[-1]) + data = ''.join(L) + response = self.server._marshaled_dispatch(self.client_address, + data) + if sys.hexversion >= 0x03000000: + response = response.encode('utf-8') + except: + try: + self.send_response(500) + self.end_headers() + except: + (type, msg) = sys.exc_info()[:2] + self.logger.error("Error sending 500 response (%s): %s" % \ + (type, msg)) + raise + else: + # got a valid XML RPC response + try: + self.send_response(200) + self.send_header("Content-type", "text/xml") + self.send_header("Content-length", str(len(response))) + self.end_headers() + failcount = 0 + while True: + try: + # If we hit SSL3_WRITE_PENDING here try to resend. + self.wfile.write(response) + break + except ssl.SSLError: + e = sys.exc_info()[1] + if str(e).find("SSL3_WRITE_PENDING") < 0: + raise + self.logger.error("SSL3_WRITE_PENDING") + failcount += 1 + if failcount < 5: + continue + raise + except: + (type, msg) = sys.exc_info()[:2] + if str(type) == 'socket.error' and msg[0] == 32: + self.logger.warning("Connection dropped from %s" % + self.client_address[0]) + elif str(type) == 'socket.error' and msg[0] == 104: + self.logger.warning("Connection reset by peer: %s" % + self.client_address[0]) + elif str(type) == 'ssl.SSLError': + self.logger.warning("SSLError handling client %s: %s" % + (self.client_address[0], msg)) + else: + self.logger.error("Error sending response (%s): %s" % + (type, msg)) + + def finish(self): + # shut down the connection + if not self.wfile.closed: + try: + self.wfile.flush() + self.wfile.close() + except socket.error: + err = sys.exc_info()[1] + self.logger.warning("Error closing connection: %s" % err) + self.rfile.close() + + +class XMLRPCServer (SocketServer.ThreadingMixIn, SSLServer, + XMLRPCDispatcher, object): + + """Component XMLRPCServer. + + Methods: + serve_daemon -- serve_forever in a daemonized process + serve_forever -- handle_one_request until not self.serve + shutdown -- stop serve_forever (by setting self.serve = False) + ping -- return all arguments received + + RPC methods: + ping + + (additional system.* methods are inherited from base dispatcher) + + Properties: + require_auth -- the request handler is requiring authorization + credentials -- valid credentials being used for authentication + + """ + + def __init__(self, listen_all, server_address, RequestHandlerClass=None, + keyfile=None, certfile=None, ca=None, protocol='xmlrpc/ssl', + timeout=10, + logRequests=False, + register=True, allow_none=True, encoding=None): + + """Initialize the XML-RPC server. + + Arguments: + server_address -- address to bind to the server + RequestHandlerClass -- request handler used by TCP server (optional) + + Keyword arguments: + keyfile -- private encryption key filename + certfile -- certificate file + logRequests -- log all requests (default False) + register -- presence should be reported to service-location (default True) + allow_none -- allow None values in xml-rpc + encoding -- encoding to use for xml-rpc (default UTF-8) + + """ + + XMLRPCDispatcher.__init__(self, allow_none, encoding) + + if not RequestHandlerClass: + class RequestHandlerClass (XMLRPCRequestHandler): + """A subclassed request handler to prevent class-attribute conflicts.""" + + SSLServer.__init__(self, + listen_all, + server_address, + RequestHandlerClass, + ca=ca, + timeout=timeout, + keyfile=keyfile, + certfile=certfile, + protocol=protocol) + self.logRequests = logRequests + self.serve = False + self.register = register + self.register_introspection_functions() + self.register_function(self.ping) + self.logger.info("service available at %s" % self.url) + self.timeout = timeout + + def _tasks_thread(self): + try: + while self.serve: + try: + if self.instance and hasattr(self.instance, 'do_tasks'): + self.instance.do_tasks() + except: + self.logger.error("Unexpected task failure", exc_info=1) + time.sleep(self.timeout) + except: + self.logger.error("tasks_thread failed", exc_info=1) + + def server_close(self): + SSLServer.server_close(self) + self.logger.info("server_close()") + + def _get_require_auth(self): + return getattr(self.RequestHandlerClass, "require_auth", False) + + def _set_require_auth(self, value): + self.RequestHandlerClass.require_auth = value + require_auth = property(_get_require_auth, _set_require_auth) + + def _get_credentials(self): + try: + return self.RequestHandlerClass.credentials + except AttributeError: + return dict() + + def _set_credentials(self, value): + self.RequestHandlerClass.credentials = value + credentials = property(_get_credentials, _set_credentials) + + def register_instance(self, instance, *args, **kwargs): + XMLRPCDispatcher.register_instance(self, instance, *args, **kwargs) + try: + name = instance.name + except AttributeError: + name = "unknown" + if hasattr(instance, 'plugins'): + for pname, pinst in list(instance.plugins.items()): + for mname in pinst.__rmi__: + xmname = "%s.%s" % (pname, mname) + fn = getattr(pinst, mname) + self.register_function(fn, name=xmname) + self.logger.info("serving %s at %s" % (name, self.url)) + + def serve_forever(self): + """Serve single requests until (self.serve == False).""" + self.serve = True + self.task_thread = threading.Thread(target=self._tasks_thread) + self.task_thread.start() + self.logger.info("serve_forever() [start]") + signal.signal(signal.SIGINT, self._handle_shutdown_signal) + signal.signal(signal.SIGTERM, self._handle_shutdown_signal) + + try: + while self.serve: + try: + self.handle_request() + except socket.timeout: + pass + except select.error: + pass + except: + self.logger.error("Got unexpected error in handle_request", + exc_info=1) + finally: + self.logger.info("serve_forever() [stop]") + + def shutdown(self): + """Signal that automatic service should stop.""" + self.serve = False + + def _handle_shutdown_signal(self, *_): + self.shutdown() + + def ping(self, *args): + """Echo response.""" + self.logger.info("ping(%s)" % (", ".join([repr(arg) for arg in args]))) + return args diff --git a/src/lib/Bcfg2/Server/Admin/Backup.py b/src/lib/Bcfg2/Server/Admin/Backup.py new file mode 100644 index 000000000..3744abca3 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Backup.py @@ -0,0 +1,25 @@ +import os +import sys +import time +import tarfile +import Bcfg2.Server.Admin +import Bcfg2.Options + + +class Backup(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Make a backup of the Bcfg2 repository" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin backup\n") + #"\n\nbcfg2-admin backup restore") + __usage__ = ("bcfg2-admin backup") + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + self.datastore = self.setup['repo'] + timestamp = time.strftime('%Y%m%d%H%M%S') + format = 'gz' + mode = 'w:' + format + filename = timestamp + '.tar' + '.' + format + out = tarfile.open(self.datastore + '/' + filename, mode=mode) + out.add(self.datastore, os.path.basename(self.datastore)) + out.close() + print("Archive %s was stored under %s" % (filename, self.datastore)) diff --git a/src/lib/Bcfg2/Server/Admin/Bundle.py b/src/lib/Bcfg2/Server/Admin/Bundle.py new file mode 100644 index 000000000..89c099602 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Bundle.py @@ -0,0 +1,96 @@ +import lxml.etree +import glob +import sys +import re +import Bcfg2.Server.Admin +import Bcfg2.Options +from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError + + +class Bundle(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Create or delete bundle entries" + # TODO: add/del functions + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin bundle list-xml" + "\nbcfg2-admin bundle list-genshi" + "\nbcfg2-admin bundle show\n") + __usage__ = ("bcfg2-admin bundle [options] [add|del] [group]") + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + rg = re.compile(r'([^.]+\.(?:[a-z][a-z\-]+))(?![\w\.])', + re.IGNORECASE | re.DOTALL) + + # Get all bundles out of the Bundle/ directory + repo = self.setup['repo'] + xml_list = glob.glob("%s/Bundler/*.xml" % repo) + genshi_list = glob.glob("%s/Bundler/*.genshi" % repo) + + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin bundle help for usage.") +# if args[0] == 'add': +# try: +# self.metadata.add_bundle(args[1]) +# except MetadataConsistencyError: +# print("Error in adding bundle.") +# raise SystemExit(1) +# elif args[0] in ['delete', 'remove', 'del', 'rm']: +# try: +# self.metadata.remove_bundle(args[1]) +# except MetadataConsistencyError: +# print("Error in deleting bundle.") +# raise SystemExit(1) + # Lists all available xml bundles + elif args[0] in ['list-xml', 'ls-xml']: + bundle_name = [] + for bundle_path in xml_list: + bundle_name.append(rg.search(bundle_path).group(1)) + for bundle in bundle_name: + print(bundle.split('.')[0]) + # Lists all available genshi bundles + elif args[0] in ['list-genshi', 'ls-gen']: + bundle_name = [] + for bundle_path in genshi_list: + bundle_name.append(rg.search(bundle_path).group(1)) + for bundle in bundle_name: + print(bundle.split('.')[0]) + # Shows a list of all available bundles and prints bundle + # details after the user choose one bundle. + # FIXME: Add support for detailed output of genshi bundles + # FIXME: This functionality is almost identical with + # bcfg2-info bundles + elif args[0] in ['show']: + bundle_name = [] + bundle_list = xml_list + genshi_list + for bundle_path in bundle_list: + print "matching %s" % bundle_path + bundle_name.append(rg.search(bundle_path).group(1)) + text = "Available bundles (Number of bundles: %s)" % \ + (len(bundle_list)) + print(text) + print("%s" % (len(text) * "-")) + for i in range(len(bundle_list)): + print("[%i]\t%s" % (i, bundle_name[i])) + try: + lineno = raw_input("Enter the line number of a bundle for details: ") + except NameError: + lineno = input("Enter the line number of a bundle for details: ") + if int(lineno) >= int(len(bundle_list)): + print("No line with this number.") + else: + if '%s/Bundler/%s' % \ + (repo, bundle_name[int(lineno)]) in genshi_list: + print("Detailed output for *.genshi bundles is not supported.") + else: + print('Details for the "%s" bundle:' % \ + (bundle_name[int(lineno)].split('.')[0])) + tree = lxml.etree.parse(bundle_list[int(lineno)]) + #Prints bundle content + #print(lxml.etree.tostring(tree)) + names = ['Action', 'Package', 'Path', 'Service'] + for name in names: + for node in tree.findall("//" + name): + print("%s:\t%s" % (name, node.attrib["name"])) + else: + print("No command specified") + raise SystemExit(1) diff --git a/src/lib/Bcfg2/Server/Admin/Client.py b/src/lib/Bcfg2/Server/Admin/Client.py new file mode 100644 index 000000000..4d580c54c --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Client.py @@ -0,0 +1,64 @@ +import lxml.etree +import Bcfg2.Server.Admin +from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError + + +class Client(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Create, delete, or modify client entries" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin client add <client> " + "attr1=val1 attr2=val2" + "\nbcfg2-admin client update <client> " + "attr1=val1 attr2=val2" + "\nbcfg2-admin client list" + "\nbcfg2-admin client del <client>\n") + __usage__ = ("bcfg2-admin client [options] [add|del|update|list] [attr=val]") + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin client help for usage.") + if args[0] == 'add': + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'uuid', 'password', + 'location', 'secure', 'address', + 'auth']: + print("Attribute %s unknown" % attr) + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.add_client(args[1], attr_d) + except MetadataConsistencyError: + print("Error in adding client") + raise SystemExit(1) + elif args[0] in ['update', 'up']: + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'uuid', 'password', + 'location', 'secure', 'address', + 'auth']: + print("Attribute %s unknown" % attr) + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.update_client(args[1], attr_d) + except MetadataConsistencyError: + print("Error in updating client") + raise SystemExit(1) + elif args[0] in ['delete', 'remove', 'del', 'rm']: + try: + self.metadata.remove_client(args[1]) + except MetadataConsistencyError: + print("Error in deleting client") + raise SystemExit(1) + elif args[0] in ['list', 'ls']: + tree = lxml.etree.parse(self.metadata.data + "/clients.xml") + tree.xinclude() + for node in tree.findall("//Client"): + print(node.attrib["name"]) + else: + print("No command specified") + raise SystemExit(1) diff --git a/src/lib/Bcfg2/Server/Admin/Compare.py b/src/lib/Bcfg2/Server/Admin/Compare.py new file mode 100644 index 000000000..050dd69f8 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Compare.py @@ -0,0 +1,151 @@ +import lxml.etree +import os + +import Bcfg2.Server.Admin + + +class Compare(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("Determine differences between files or " + "directories of client specification instances") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin compare <file1> <file2>" + "\nbcfg2-admin compare -r <dir1> <dir2>") + __usage__ = ("bcfg2-admin compare <old> <new>\n\n" + " -r\trecursive") + + def __init__(self, setup): + Bcfg2.Server.Admin.Mode.__init__(self, setup) + self.important = {'Path': ['name', 'type', 'owner', 'group', 'perms', + 'important', 'paranoid', 'sensitive', + 'dev_type', 'major', 'minor', 'prune', + 'encoding', 'empty', 'to', 'recursive', + 'vcstype', 'sourceurl', 'revision'], + 'Package': ['name', 'type', 'version', 'simplefile', + 'verify'], + 'Service': ['name', 'type', 'status', 'mode', + 'target', 'sequence', 'parameters'], + 'Action': ['name', 'timing', 'when', 'status', + 'command'], + 'PostInstall': ['name'] + } + + def compareStructures(self, new, old): + if new.tag == 'Independent': + bundle = 'Base' + else: + bundle = new.get('name') + + identical = True + + for child in new.getchildren(): + if child.tag not in self.important: + print(" %s in (new) bundle %s:\n tag type not handled!" % + (child.tag, bundle)) + continue + equiv = old.xpath('%s[@name="%s"]' % + (child.tag, child.get('name'))) + if len(equiv) == 0: + print(" %s %s in bundle %s:\n only in new configuration" % + (child.tag, child.get('name'), bundle)) + identical = False + continue + diff = [] + if child.tag == 'Path' and child.get('type') == 'file' and \ + child.text != equiv[0].text: + diff.append('contents') + attrdiff = [field for field in self.important[child.tag] if \ + child.get(field) != equiv[0].get(field)] + if attrdiff: + diff.append('attributes (%s)' % ', '.join(attrdiff)) + if diff: + print(" %s %s in bundle %s:\n %s differ" % (child.tag, \ + child.get('name'), bundle, ' and '.join(diff))) + identical = False + + for child in old.getchildren(): + if child.tag not in self.important: + print(" %s in (old) bundle %s:\n tag type not handled!" % + (child.tag, bundle)) + elif len(new.xpath('%s[@name="%s"]' % + (child.tag, child.get('name')))) == 0: + print(" %s %s in bundle %s:\n only in old configuration" % + (child.tag, child.get('name'), bundle)) + identical = False + + return identical + + def compareSpecifications(self, path1, path2): + try: + new = lxml.etree.parse(path1).getroot() + except IOError: + print("Failed to read %s" % (path1)) + raise SystemExit(1) + + try: + old = lxml.etree.parse(path2).getroot() + except IOError: + print("Failed to read %s" % (path2)) + raise SystemExit(1) + + for src in [new, old]: + for bundle in src.findall('./Bundle'): + if bundle.get('name')[-4:] == '.xml': + bundle.set('name', bundle.get('name')[:-4]) + + identical = True + + for bundle in old.findall('./Bundle'): + if len(new.xpath('Bundle[@name="%s"]' % (bundle.get('name')))) == 0: + print(" Bundle %s only in old configuration" % + bundle.get('name')) + identical = False + for bundle in new.findall('./Bundle'): + equiv = old.xpath('Bundle[@name="%s"]' % (bundle.get('name'))) + if len(equiv) == 0: + print(" Bundle %s only in new configuration" % + bundle.get('name')) + identical = False + elif not self.compareStructures(bundle, equiv[0]): + identical = False + + i1 = lxml.etree.Element('Independent') + i2 = lxml.etree.Element('Independent') + i1.extend(new.findall('./Independent/*')) + i2.extend(old.findall('./Independent/*')) + if not self.compareStructures(i1, i2): + identical = False + + return identical + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin compare help for usage.") + if '-r' in args: + args = list(args) + args.remove('-r') + (oldd, newd) = args + (old, new) = [os.listdir(spot) for spot in args] + old_extra = [] + for item in old: + if item not in new: + old_extra.append(item) + continue + print("File: %s" % item) + state = self.__call__([oldd + '/' + item, newd + '/' + item]) + new.remove(item) + if state: + print("File %s is good" % item) + else: + print("File %s is bad" % item) + if new: + print("%s has extra files: %s" % (newd, ', '.join(new))) + if old_extra: + print("%s has extra files: %s" % (oldd, ', '.join(old_extra))) + return + try: + (old, new) = args + return self.compareSpecifications(new, old) + except IndexError: + print(self.__call__.__doc__) + raise SystemExit(1) diff --git a/src/lib/Bcfg2/Server/Admin/Group.py b/src/lib/Bcfg2/Server/Admin/Group.py new file mode 100644 index 000000000..16a773d6f --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Group.py @@ -0,0 +1,63 @@ +import lxml.etree +import Bcfg2.Server.Admin +from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError + + +class Group(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Create, delete, or modify group entries" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin group add <group> " + "attr1=val1 attr2=val2" + "\nbcfg2-admin group update <group> " + "attr1=val1 attr2=val2" + "\nbcfg2-admin group list" + "\nbcfg2-admin group del <group>\n") + __usage__ = ("bcfg2-admin group [options] [add|del|update|list] [attr=val]") + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin group help for usage.") + if args[0] == 'add': + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'public', 'default', + 'name', 'auth', 'toolset', 'category', + 'comment']: + print("Attribute %s unknown" % attr) + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.add_group(args[1], attr_d) + except MetadataConsistencyError: + print("Error in adding group") + raise SystemExit(1) + elif args[0] in ['update', 'up']: + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'public', 'default', + 'name', 'auth', 'toolset', 'category', + 'comment']: + print("Attribute %s unknown" % attr) + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.update_group(args[1], attr_d) + except MetadataConsistencyError: + print("Error in updating group") + raise SystemExit(1) + elif args[0] in ['delete', 'remove', 'del', 'rm']: + try: + self.metadata.remove_group(args[1]) + except MetadataConsistencyError: + print("Error in deleting group") + raise SystemExit(1) + elif args[0] in ['list', 'ls']: + tree = lxml.etree.parse(self.metadata.data + "/groups.xml") + for node in tree.findall("//Group"): + print(node.attrib["name"]) + else: + print("No command specified") + raise SystemExit(1) diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py new file mode 100644 index 000000000..c1f9ed484 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Init.py @@ -0,0 +1,359 @@ +import getpass +import os +import random +import socket +import stat +import string +import sys +import subprocess +import Bcfg2.Server.Admin +import Bcfg2.Server.Plugin +import Bcfg2.Options + +# default config file +config = ''' +[server] +repository = %s +plugins = %s + +[statistics] +sendmailpath = %s +database_engine = sqlite3 +# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'. +database_name = +# Or path to database file if using sqlite3. +#<repository>/etc/brpt.sqlite is default path if left empty +database_user = +# Not used with sqlite3. +database_password = +# Not used with sqlite3. +database_host = +# Not used with sqlite3. +database_port = + +[communication] +protocol = %s +password = %s +certificate = %s +key = %s +ca = %s + +[components] +bcfg2 = %s +''' + +# Default groups +groups = '''<Groups version='3.0'> + <Group profile='true' public='true' default='true' name='basic'> + <Group name='%s'/> + </Group> + <Group name='ubuntu'/> + <Group name='debian'/> + <Group name='freebsd'/> + <Group name='gentoo'/> + <Group name='redhat'/> + <Group name='suse'/> + <Group name='mandrake'/> + <Group name='solaris'/> + <Group name='arch'/> +</Groups> +''' + +# Default contents of clients.xml +clients = '''<Clients version="3.0"> + <Client profile="basic" pingable="Y" pingtime="0" name="%s"/> +</Clients> +''' + +# Mapping of operating system names to groups +os_list = [('Red Hat/Fedora/RHEL/RHAS/Centos', 'redhat'), + ('SUSE/SLES', 'suse'), + ('Mandrake', 'mandrake'), + ('Debian', 'debian'), + ('Ubuntu', 'ubuntu'), + ('Gentoo', 'gentoo'), + ('FreeBSD', 'freebsd'), + ('Arch', 'arch')] + +# Complete list of plugins +plugin_list = ['Account', + 'Base', + 'Bundler', + 'Bzr', + 'Cfg', + 'Decisions', + 'Deps', + 'Git', + 'Guppy', + 'Hg', + 'Metadata', + 'NagiosGen', + 'Ohai', + 'Packages', + 'Pkgmgr', + 'Probes', + 'Properties', + 'Rules', + 'Snapshots', + 'SSHbase', + 'SSLCA', + 'Statistics', + 'Svcmgr', + 'TCheetah', + 'TGenshi'] + +# Default list of plugins to use +default_plugins = Bcfg2.Options.SERVER_PLUGINS.default + + +def get_input(prompt): + """py3k compatible function to get input""" + try: + return raw_input(prompt) + except NameError: + return input(prompt) + + +def gen_password(length): + """Generates a random alphanumeric password with length characters.""" + chars = string.letters + string.digits + newpasswd = '' + for i in range(length): + newpasswd = newpasswd + random.choice(chars) + return newpasswd + + +def create_key(hostname, keypath, certpath, country, state, location): + """Creates a bcfg2.key at the directory specifed by keypath.""" + kcstr = ("openssl req -batch -x509 -nodes -subj '/C=%s/ST=%s/L=%s/CN=%s' " + "-days 1000 -newkey rsa:2048 -keyout %s -noout" % (country, + state, + location, + hostname, + keypath)) + subprocess.call((kcstr), shell=True) + ccstr = ("openssl req -batch -new -subj '/C=%s/ST=%s/L=%s/CN=%s' -key %s " + "| openssl x509 -req -days 1000 -signkey %s -out %s" % (country, + state, + location, + hostname, + keypath, + keypath, + certpath)) + subprocess.call((ccstr), shell=True) + os.chmod(keypath, stat.S_IRUSR | stat.S_IWUSR) # 0600 + + +def create_conf(confpath, confdata, keypath): + # Don't overwrite existing bcfg2.conf file + if os.path.exists(confpath): + result = get_input("\nWarning: %s already exists. " + "Overwrite? [y/N]: " % confpath) + if result not in ['Y', 'y']: + print("Leaving %s unchanged" % confpath) + return + try: + open(confpath, "w").write(confdata) + os.chmod(confpath, stat.S_IRUSR | stat.S_IWUSR) # 0600 + except Exception: + e = sys.exc_info()[1] + print("Error %s occured while trying to write configuration " + "file to '%s'.\n" % + (e, confpath)) + raise SystemExit(1) + + +class Init(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("Interactively initialize a new repository.") + __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin init" + __usage__ = "bcfg2-admin init" + options = {'configfile': Bcfg2.Options.CFILE, + 'plugins': Bcfg2.Options.SERVER_PLUGINS, + 'proto': Bcfg2.Options.SERVER_PROTOCOL, + 'repo': Bcfg2.Options.SERVER_REPOSITORY, + 'sendmail': Bcfg2.Options.SENDMAIL_PATH} + repopath = "" + response = "" + + def _set_defaults(self): + """Set default parameters.""" + self.configfile = self.opts['configfile'] + self.repopath = self.opts['repo'] + self.password = gen_password(8) + self.server_uri = "https://%s:6789" % socket.getfqdn() + self.plugins = default_plugins + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + + # Parse options + self.opts = Bcfg2.Options.OptionParser(self.options) + self.opts.parse(args) + self._set_defaults() + + # Prompt the user for input + self._prompt_config() + self._prompt_repopath() + self._prompt_password() + self._prompt_hostname() + self._prompt_server() + self._prompt_groups() + # self._prompt_plugins() + self._prompt_certificate() + + # Initialize the repository + self.init_repo() + + def _prompt_hostname(self): + """Ask for the server hostname.""" + data = get_input("What is the server's hostname [%s]: " % + socket.getfqdn()) + if data != '': + self.shostname = data + else: + self.shostname = socket.getfqdn() + + def _prompt_config(self): + """Ask for the configuration file path.""" + newconfig = get_input("Store Bcfg2 configuration in [%s]: " % + self.configfile) + if newconfig != '': + self.configfile = os.path.abspath(newconfig) + + def _prompt_repopath(self): + """Ask for the repository path.""" + while True: + newrepo = get_input("Location of Bcfg2 repository [%s]: " % + self.repopath) + if newrepo != '': + self.repopath = os.path.abspath(newrepo) + if os.path.isdir(self.repopath): + response = get_input("Directory %s exists. Overwrite? [y/N]:" \ + % self.repopath) + if response.lower().strip() == 'y': + break + else: + break + + def _prompt_password(self): + """Ask for a password or generate one if none is provided.""" + newpassword = getpass.getpass( + "Input password used for communication verification " + "(without echoing; leave blank for a random): ").strip() + if len(newpassword) != 0: + self.password = newpassword + + def _prompt_server(self): + """Ask for the server name.""" + newserver = get_input("Input the server location [%s]: " % + self.server_uri) + if newserver != '': + self.server_uri = newserver + + def _prompt_groups(self): + """Create the groups.xml file.""" + prompt = '''Input base Operating System for clients:\n''' + for entry in os_list: + prompt += "%d: %s\n" % (os_list.index(entry) + 1, entry[0]) + prompt += ': ' + while True: + try: + osidx = int(get_input(prompt)) + self.os_sel = os_list[osidx - 1][1] + break + except ValueError: + continue + + def _prompt_plugins(self): + default = get_input("Use default plugins? (%s) [Y/n]: " % + ''.join(default_plugins)).lower() + if default != 'y' or default != '': + while True: + plugins_are_valid = True + plug_str = get_input("Specify plugins: ") + plugins = plug_str.split(',') + for plugin in plugins: + plugin = plugin.strip() + if not plugin in plugin_list: + plugins_are_valid = False + print("ERROR: Plugin %s not recognized" % plugin) + if plugins_are_valid: + break + + def _prompt_certificate(self): + """Ask for the key details (country, state, and location).""" + print("The following questions affect SSL certificate generation.") + print("If no data is provided, the default values are used.") + newcountry = get_input("Country name (2 letter code) for certificate: ") + if newcountry != '': + if len(newcountry) == 2: + self.country = newcountry + else: + while len(newcountry) != 2: + newcountry = get_input("2 letter country code (eg. US): ") + if len(newcountry) == 2: + self.country = newcountry + break + else: + self.country = 'US' + + newstate = get_input("State or Province Name (full name) for certificate: ") + if newstate != '': + self.state = newstate + else: + self.state = 'Illinois' + + newlocation = get_input("Locality Name (eg, city) for certificate: ") + if newlocation != '': + self.location = newlocation + else: + self.location = 'Argonne' + + def _init_plugins(self): + """Initialize each plugin-specific portion of the repository.""" + for plugin in self.plugins: + if plugin == 'Metadata': + Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(self.repopath, + groups, + self.os_sel, + clients) + else: + try: + module = __import__("Bcfg2.Server.Plugins.%s" % plugin, '', + '', ["Bcfg2.Server.Plugins"]) + cls = getattr(module, plugin) + cls.init_repo(self.repopath) + except Exception: + e = sys.exc_info()[1] + print("Plugin setup for %s failed: %s\n" + "Check that dependencies are installed?" % (plugin, e)) + + def init_repo(self): + """Setup a new repo and create the content of the configuration file.""" + keypath = os.path.dirname(self.configfile) + kpath = os.path.join(keypath, 'bcfg2.key') + cpath = os.path.join(keypath, 'bcfg2.crt') + + confdata = config % (self.repopath, + ','.join(self.plugins), + self.opts['sendmail'], + self.opts['proto'], + self.password, + cpath, + kpath, + cpath, + self.server_uri) + + # Create the configuration file and SSL key + create_conf(self.configfile, confdata, keypath) + create_key(self.shostname, kpath, cpath, self.country, + self.state, self.location) + + # Create the repository + path = os.path.join(self.repopath, 'etc') + try: + os.makedirs(path) + self._init_plugins() + print("Repository created successfuly in %s" % (self.repopath)) + except OSError: + print("Failed to create %s." % path) diff --git a/src/lib/Bcfg2/Server/Admin/Minestruct.py b/src/lib/Bcfg2/Server/Admin/Minestruct.py new file mode 100644 index 000000000..b929a9a8c --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Minestruct.py @@ -0,0 +1,65 @@ +import getopt +import lxml.etree +import sys + +import Bcfg2.Server.Admin + +class Minestruct(Bcfg2.Server.Admin.StructureMode): + """Pull extra entries out of statistics.""" + __shorthelp__ = "Extract extra entry lists from statistics" + __longhelp__ = (__shorthelp__ + + "\n\nbcfg2-admin minestruct [-f filename] " + "[-g groups] client\n") + __usage__ = ("bcfg2-admin minestruct [options] <client>\n\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-f <filename>", + "build a particular file", + "-g <groups>", + "only build config for groups")) + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin minestruct help for usage.") + try: + (opts, args) = getopt.getopt(args, 'f:g:h') + except: + self.log.error(self.__shorthelp__) + raise SystemExit(1) + + client = args[0] + output = sys.stdout + groups = [] + + for (opt, optarg) in opts: + if opt == '-f': + try: + output = open(optarg, 'w') + except IOError: + self.log.error("Failed to open file: %s" % (optarg)) + raise SystemExit(1) + elif opt == '-g': + groups = optarg.split(':') + + try: + extra = set() + for source in self.bcore.pull_sources: + for item in source.GetExtra(client): + extra.add(item) + except: + self.log.error("Failed to find extra entry info for client %s" % + client) + raise SystemExit(1) + root = lxml.etree.Element("Base") + self.log.info("Found %d extra entries" % (len(extra))) + add_point = root + for g in groups: + add_point = lxml.etree.SubElement(add_point, "Group", name=g) + for tag, name in extra: + self.log.info("%s: %s" % (tag, name)) + lxml.etree.SubElement(add_point, tag, name=name) + + tree = lxml.etree.ElementTree(root) + tree.write(output, pretty_print=True) diff --git a/src/lib/Bcfg2/Server/Admin/Perf.py b/src/lib/Bcfg2/Server/Admin/Perf.py new file mode 100644 index 000000000..411442698 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Perf.py @@ -0,0 +1,37 @@ +import sys + +import Bcfg2.Options +import Bcfg2.Proxy +import Bcfg2.Server.Admin + + +class Perf(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("Query server for performance data") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin perf\n") + __usage__ = ("bcfg2-admin perf") + + def __call__(self, args): + output = [('Name', 'Min', 'Max', 'Mean', 'Count')] + optinfo = { + 'ca': Bcfg2.Options.CLIENT_CA, + 'certificate': Bcfg2.Options.CLIENT_CERT, + 'key': Bcfg2.Options.SERVER_KEY, + 'password': Bcfg2.Options.SERVER_PASSWORD, + 'server': Bcfg2.Options.SERVER_LOCATION, + 'user': Bcfg2.Options.CLIENT_USER, + 'timeout': Bcfg2.Options.CLIENT_TIMEOUT, + } + setup = Bcfg2.Options.OptionParser(optinfo) + setup.parse(sys.argv[1:]) + proxy = Bcfg2.Proxy.ComponentProxy(setup['server'], + setup['user'], + setup['password'], + key=setup['key'], + cert=setup['certificate'], + ca=setup['ca'], + timeout=setup['timeout']) + data = proxy.get_statistics() + for key, value in list(data.items()): + data = tuple(["%.06f" % (item) for item in value[:-1]] + [value[-1]]) + output.append((key, ) + data) + self.print_table(output) diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py new file mode 100644 index 000000000..daf353107 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Pull.py @@ -0,0 +1,154 @@ +import getopt +import sys + +import Bcfg2.Server.Admin + + +class Pull(Bcfg2.Server.Admin.MetadataCore): + """Pull mode retrieves entries from clients and + integrates the information into the repository. + """ + __shorthelp__ = ("Integrate configuration information " + "from clients into the server repository") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin pull [-v] [-f][-I] [-s] " + "<client> <entry type> <entry name>\n") + __usage__ = ("bcfg2-admin pull [options] <client> <entry type> " + "<entry name>\n\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-v", + "be verbose", + "-f", + "force", + "-I", + "interactive", + "-s", + "stdin")) + allowed = ['Metadata', 'BB', "DBStats", "Statistics", "Cfg", "SSHbase"] + + def __init__(self, setup): + Bcfg2.Server.Admin.MetadataCore.__init__(self, setup) + self.log = False + self.mode = 'interactive' + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + use_stdin = False + try: + opts, gargs = getopt.getopt(args, 'vfIs') + except: + print(self.__shorthelp__) + raise SystemExit(1) + for opt in opts: + if opt[0] == '-v': + self.log = True + elif opt[0] == '-f': + self.mode = 'force' + elif opt[0] == '-I': + self.mode == 'interactive' + elif opt[0] == '-s': + use_stdin = True + + if use_stdin: + for line in sys.stdin: + try: + self.PullEntry(*line.split(None, 3)) + except SystemExit: + print(" for %s" % line) + except: + print("Bad entry: %s" % line.strip()) + elif len(gargs) < 3: + print(self.__longhelp__) + raise SystemExit(1) + else: + self.PullEntry(gargs[0], gargs[1], gargs[2]) + + def BuildNewEntry(self, client, etype, ename): + """Construct a new full entry for + given client/entry from statistics. + """ + new_entry = {'type': etype, 'name': ename} + for plugin in self.bcore.pull_sources: + try: + (owner, group, perms, contents) = \ + plugin.GetCurrentEntry(client, etype, ename) + break + except Bcfg2.Server.Plugin.PluginExecutionError: + if plugin == self.bcore.pull_sources[-1]: + print("Pull Source failure; could not fetch current state") + raise SystemExit(1) + + try: + data = {'owner': owner, + 'group': group, + 'perms': perms, + 'text': contents} + except UnboundLocalError: + print("Unable to build entry. " + "Do you have a statistics plugin enabled?") + raise SystemExit(1) + for k, v in list(data.items()): + if v: + new_entry[k] = v + #print new_entry + return new_entry + + def Choose(self, choices): + """Determine where to put pull data.""" + if self.mode == 'interactive': + for choice in choices: + print("Plugin returned choice:") + if id(choice) == id(choices[0]): + print("(current entry) ") + if choice.all: + print(" => global entry") + elif choice.group: + print(" => group entry: %s (prio %d)" % + (choice.group, choice.prio)) + else: + print(" => host entry: %s" % (choice.hostname)) + # py3k compatibility + try: + ans = raw_input("Use this entry? [yN]: ") in ['y', 'Y'] + except NameError: + ans = input("Use this entry? [yN]: ") in ['y', 'Y'] + if ans: + return choice + return False + else: + # mode == 'force' + if not choices: + return False + return choices[0] + + def PullEntry(self, client, etype, ename): + """Make currently recorded client state correct for entry.""" + new_entry = self.BuildNewEntry(client, etype, ename) + + meta = self.bcore.build_metadata(client) + # Find appropriate plugin in bcore + glist = [gen for gen in self.bcore.generators if + ename in gen.Entries.get(etype, {})] + if len(glist) != 1: + self.errExit("Got wrong numbers of matching generators for entry:" \ + + "%s" % ([g.name for g in glist])) + plugin = glist[0] + if not isinstance(plugin, Bcfg2.Server.Plugin.PullTarget): + self.errExit("Configuration upload not supported by plugin %s" \ + % (plugin.name)) + try: + choices = plugin.AcceptChoices(new_entry, meta) + specific = self.Choose(choices) + if specific: + plugin.AcceptPullData(specific, new_entry, self.log) + except Bcfg2.Server.Plugin.PluginExecutionError: + self.errExit("Configuration upload not supported by plugin %s" \ + % (plugin.name)) + # Commit if running under a VCS + for vcsplugin in list(self.bcore.plugins.values()): + if isinstance(vcsplugin, Bcfg2.Server.Plugin.Version): + files = "%s/%s" % (plugin.data, ename) + comment = 'file "%s" pulled from host %s' % (files, client) + vcsplugin.commit_data([files], comment) diff --git a/src/lib/Bcfg2/Server/Admin/Query.py b/src/lib/Bcfg2/Server/Admin/Query.py new file mode 100644 index 000000000..3dd326645 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Query.py @@ -0,0 +1,72 @@ +import sys +import logging +import Bcfg2.Logger +import Bcfg2.Server.Admin + + +class Query(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Query clients" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin query [-n] [-c] " + "[-f filename] g=group p=profile") + __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile>\n\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-n", + "query results delimited with newlines", + "-c", + "query results delimited with commas", + "-f filename", + "write query to file")) + + def __init__(self, setup): + Bcfg2.Server.Admin.MetadataCore.__init__(self, setup) + logging.root.setLevel(100) + Bcfg2.Logger.setup_logging(100, to_console=False, to_syslog=False) + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + clients = list(self.metadata.clients.keys()) + filename_arg = False + filename = None + for arg in args: + if filename_arg == True: + filename = arg + filename_arg = False + continue + if arg in ['-n', '-c']: + continue + if arg in ['-f']: + filename_arg = True + continue + try: + k, v = arg.split('=') + except: + print("Unknown argument %s" % arg) + continue + if k == 'p': + nc = self.metadata.get_client_names_by_profiles(v.split(',')) + elif k == 'g': + nc = self.metadata.get_client_names_by_groups(v.split(',')) + # add probed groups (if present) + for conn in self.bcore.connectors: + if isinstance(conn, Bcfg2.Server.Plugins.Probes.Probes): + for c, glist in list(conn.cgroups.items()): + for g in glist: + if g in v.split(','): + nc.append(c) + else: + print("One of g= or p= must be specified") + raise SystemExit(1) + clients = [c for c in clients if c in nc] + if '-n' in args: + for client in clients: + print(client) + else: + print(','.join(clients)) + if '-f' in args: + f = open(filename, "w") + for client in clients: + f.write(client + "\n") + f.close() + print("Wrote results to %s" % (filename)) diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py new file mode 100644 index 000000000..974cdff9d --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Reports.py @@ -0,0 +1,376 @@ +'''Admin interface for dynamic reports''' +import Bcfg2.Logger +import Bcfg2.Server.Admin +import datetime +import os +import logging +import pickle +import platform +import sys +import traceback +from lxml.etree import XML, XMLSyntaxError + +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + +# FIXME: Remove when server python dep is 2.5 or greater +if sys.version_info >= (2, 5): + from hashlib import md5 +else: + from md5 import md5 + +# Prereq issues can be signaled with ImportError, so no try needed +# FIXME - settings file uses a hardcoded path for /etc/bcfg2.conf +import Bcfg2.Server.Reports.settings + +# Load django and reports stuff _after_ we know we can load settings +import django.core.management +from Bcfg2.Server.Reports.importscript import load_stats +from Bcfg2.Server.Reports.updatefix import update_database +from Bcfg2.Server.Reports.utils import * + +project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__) +project_name = os.path.basename(project_directory) +sys.path.append(os.path.join(project_directory, '..')) +project_module = __import__(project_name, '', '', ['']) +sys.path.pop() + +# Set DJANGO_SETTINGS_MODULE appropriately. +os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name +from django.db import connection, transaction + +from Bcfg2.Server.Reports.reports.models import Client, Interaction, Entries, \ + Entries_interactions, Performance, \ + Reason, Ping + + +def printStats(fn): + """ + Print db stats. + + Decorator for purging. Prints database statistics after a run. + """ + def print_stats(self, *data): + start_client = Client.objects.count() + start_i = Interaction.objects.count() + start_ei = Entries_interactions.objects.count() + start_perf = Performance.objects.count() + start_ping = Ping.objects.count() + + fn(self, *data) + + self.log.info("Clients removed: %s" % + (start_client - Client.objects.count())) + self.log.info("Interactions removed: %s" % + (start_i - Interaction.objects.count())) + self.log.info("Interactions->Entries removed: %s" % + (start_ei - Entries_interactions.objects.count())) + self.log.info("Metrics removed: %s" % + (start_perf - Performance.objects.count())) + self.log.info("Ping metrics removed: %s" % + (start_ping - Ping.objects.count())) + + return print_stats + + +class Reports(Bcfg2.Server.Admin.Mode): + '''Admin interface for dynamic reports''' + __shorthelp__ = "Manage dynamic reports" + __longhelp__ = (__shorthelp__) + django_commands = ['syncdb', 'sqlall', 'validate'] + __usage__ = ("bcfg2-admin reports [command] [options]\n" + " -v|--verbose Be verbose\n" + " -q|--quiet Print only errors\n" + "\n" + " Commands:\n" + " init Initialize the database\n" + " load_stats Load statistics data\n" + " -s|--stats Path to statistics.xml file\n" + " -c|--clients-file Path to clients.xml file\n" + " -O3 Fast mode. Duplicates data!\n" + " purge Purge records\n" + " --client [n] Client to operate on\n" + " --days [n] Records older then n days\n" + " --expired Expired clients only\n" + " scrub Scrub the database for duplicate reasons and orphaned entries\n" + " update Apply any updates to the reporting database\n" + "\n" + " Django commands:\n " + "\n ".join(django_commands)) + + def __init__(self, setup): + Bcfg2.Server.Admin.Mode.__init__(self, setup) + self.log.setLevel(logging.INFO) + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0 or args[0] == '-h': + print(self.__usage__) + raise SystemExit(0) + + verb = 0 + + if '-v' in args or '--verbose' in args: + self.log.setLevel(logging.DEBUG) + verb = 1 + if '-q' in args or '--quiet' in args: + self.log.setLevel(logging.WARNING) + + # FIXME - dry run + + if args[0] in self.django_commands: + self.django_command_proxy(args[0]) + elif args[0] == 'scrub': + self.scrub() + elif args[0] == 'init': + update_database() + elif args[0] == 'update': + update_database() + elif args[0] == 'load_stats': + quick = '-O3' in args + stats_file = None + clients_file = None + i = 1 + while i < len(args): + if args[i] == '-s' or args[i] == '--stats': + stats_file = args[i + 1] + if stats_file[0] == '-': + self.errExit("Invalid statistics file: %s" % stats_file) + elif args[i] == '-c' or args[i] == '--clients-file': + clients_file = args[i + 1] + if clients_file[0] == '-': + self.errExit("Invalid clients file: %s" % clients_file) + i = i + 1 + self.load_stats(stats_file, clients_file, verb, quick) + elif args[0] == 'purge': + expired = False + client = None + maxdate = None + state = None + i = 1 + while i < len(args): + if args[i] == '-c' or args[i] == '--client': + if client: + self.errExit("Only one client per run") + client = args[i + 1] + print(client) + i = i + 1 + elif args[i] == '--days': + if maxdate: + self.errExit("Max date specified multiple times") + try: + maxdate = datetime.datetime.now() - datetime.timedelta(days=int(args[i + 1])) + except: + self.log.error("Invalid number of days: %s" % args[i + 1]) + raise SystemExit(-1) + i = i + 1 + elif args[i] == '--expired': + expired = True + i = i + 1 + if expired: + if state: + self.log.error("--state is not valid with --expired") + raise SystemExit(-1) + self.purge_expired(maxdate) + else: + self.purge(client, maxdate, state) + else: + print("Unknown command: %s" % args[0]) + + @transaction.commit_on_success + def scrub(self): + ''' Perform a thorough scrub and cleanup of the database ''' + + # Currently only reasons are a problem + try: + start_count = Reason.objects.count() + except Exception: + e = sys.exc_info()[1] + self.log.error("Failed to load reason objects: %s" % e) + return + dup_reasons = [] + + cmp_reasons = dict() + batch_update = [] + for reason in BatchFetch(Reason.objects): + ''' Loop through each reason and create a key out of the data. \ + This lets us take advantage of a fast hash lookup for \ + comparisons ''' + id = reason.id + reason.id = None + key = md5(pickle.dumps(reason)).hexdigest() + reason.id = id + + if key in cmp_reasons: + self.log.debug("Update interactions from %d to %d" \ + % (reason.id, cmp_reasons[key])) + dup_reasons.append([reason.id]) + batch_update.append([cmp_reasons[key], reason.id]) + else: + cmp_reasons[key] = reason.id + self.log.debug("key %d" % reason.id) + + self.log.debug("Done with updates, deleting dupes") + try: + cursor = connection.cursor() + cursor.executemany('update reports_entries_interactions set reason_id=%s where reason_id=%s', batch_update) + cursor.executemany('delete from reports_reason where id = %s', dup_reasons) + transaction.set_dirty() + except Exception: + ex = sys.exc_info()[1] + self.log.error("Failed to delete reasons: %s" % ex) + raise + + self.log.info("Found %d dupes out of %d" % (len(dup_reasons), start_count)) + + # Cleanup orphans + start_count = Reason.objects.count() + Reason.prune_orphans() + self.log.info("Pruned %d Reason records" % (start_count - Reason.objects.count())) + + start_count = Entries.objects.count() + Entries.prune_orphans() + self.log.info("Pruned %d Entries records" % (start_count - Entries.objects.count())) + + def django_command_proxy(self, command): + '''Call a django command''' + if command == 'sqlall': + django.core.management.call_command(command, 'reports') + else: + django.core.management.call_command(command) + + def load_stats(self, stats_file=None, clientspath=None, verb=0, quick=False): + '''Load statistics data into the database''' + location = '' + + if not stats_file: + try: + stats_file = "%s/etc/statistics.xml" % self.cfp.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + self.errExit("Could not read bcfg2.conf; exiting") + try: + statsdata = XML(open(stats_file).read()) + except (IOError, XMLSyntaxError): + self.errExit("StatReports: Failed to parse %s" % (stats_file)) + + try: + encoding = self.cfp.get('components', 'encoding') + except: + encoding = 'UTF-8' + + if not clientspath: + try: + clientspath = "%s/Metadata/clients.xml" % \ + self.cfp.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + self.errExit("Could not read bcfg2.conf; exiting") + try: + clientsdata = XML(open(clientspath).read()) + except (IOError, XMLSyntaxError): + self.errExit("StatReports: Failed to parse %s" % (clientspath)) + + try: + load_stats(clientsdata, + statsdata, + encoding, + verb, + self.log, + quick=quick, + location=platform.node()) + except: + pass + + @printStats + def purge(self, client=None, maxdate=None, state=None): + '''Purge historical data from the database''' + + filtered = False # indicates whether or not a client should be deleted + + if not client and not maxdate and not state: + self.errExit("Reports.prune: Refusing to prune all data") + + ipurge = Interaction.objects + if client: + try: + cobj = Client.objects.get(name=client) + ipurge = ipurge.filter(client=cobj) + except Client.DoesNotExist: + self.log.error("Client %s not in database" % client) + raise SystemExit(-1) + self.log.debug("Filtering by client: %s" % client) + + if maxdate: + filtered = True + if not isinstance(maxdate, datetime.datetime): + raise TypeError("maxdate is not a DateTime object") + self.log.debug("Filtering by maxdate: %s" % maxdate) + ipurge = ipurge.filter(timestamp__lt=maxdate) + + # Handle ping data as well + ping = Ping.objects.filter(endtime__lt=maxdate) + if client: + ping = ping.filter(client=cobj) + ping.delete() + + if state: + filtered = True + if state not in ('dirty', 'clean', 'modified'): + raise TypeError("state is not one of the following values " + \ + "('dirty','clean','modified')") + self.log.debug("Filtering by state: %s" % state) + ipurge = ipurge.filter(state=state) + + count = ipurge.count() + rnum = 0 + try: + while rnum < count: + grp = list(ipurge[:1000].values("id")) + # just in case... + if not grp: + break + Interaction.objects.filter(id__in=[x['id'] for x in grp]).delete() + rnum += len(grp) + self.log.debug("Deleted %s of %s" % (rnum, count)) + except: + self.log.error("Failed to remove interactions") + (a, b, c) = sys.exc_info() + msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1] + del a, b, c + self.log.error(msg) + + # bulk operations bypass the Interaction.delete method + self.log.debug("Pruning orphan Performance objects") + Performance.prune_orphans() + self.log.debug("Pruning orphan Reason objects") + Reason.prune_orphans() + + if client and not filtered: + '''Delete the client, ping data is automatic''' + try: + self.log.debug("Purging client %s" % client) + cobj.delete() + except: + self.log.error("Failed to delete client %s" % client) + (a, b, c) = sys.exc_info() + msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1] + del a, b, c + self.log.error(msg) + + @printStats + def purge_expired(self, maxdate=None): + '''Purge expired clients from the database''' + + if maxdate: + if not isinstance(maxdate, datetime.datetime): + raise TypeError("maxdate is not a DateTime object") + self.log.debug("Filtering by maxdate: %s" % maxdate) + clients = Client.objects.filter(expiration__lt=maxdate) + else: + clients = Client.objects.filter(expiration__isnull=False) + + for client in clients: + self.log.debug("Purging client %s" % client) + Interaction.objects.filter(client=client).delete() + client.delete() + self.log.debug("Pruning orphan Performance objects") + Performance.prune_orphans() diff --git a/src/lib/Bcfg2/Server/Admin/Snapshots.py b/src/lib/Bcfg2/Server/Admin/Snapshots.py new file mode 100644 index 000000000..8bc56f1f1 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Snapshots.py @@ -0,0 +1,165 @@ +from datetime import date +import sys + +# Prereq issues can be signaled with ImportError, so no try needed +import sqlalchemy, sqlalchemy.orm +import Bcfg2.Server.Admin +import Bcfg2.Server.Snapshots +import Bcfg2.Server.Snapshots.model +from Bcfg2.Server.Snapshots.model import Snapshot, Client, Metadata, Base, \ + File, Group, Package, Service +# Compatibility import +from Bcfg2.Bcfg2Py3k import u_str + +class Snapshots(Bcfg2.Server.Admin.Mode): + __shorthelp__ = "Interact with the Snapshots system" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin snapshots init" + "\nbcfg2-admin query qtype\n") + __usage__ = ("bcfg2-admin snapshots [init|query qtype]") + + q_dispatch = {'client': Client, + 'group': Group, + 'metadata': Metadata, + 'package': Package, + 'snapshot': Snapshot} + + def __init__(self, setup): + Bcfg2.Server.Admin.Mode.__init__(self, setup) + self.session = Bcfg2.Server.Snapshots.setup_session(self.configfile) + self.cfile = self.configfile + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0 or args[0] == '-h': + print(self.__usage__) + raise SystemExit(0) + + if args[0] == 'query': + if args[1] in self.q_dispatch: + q_obj = self.q_dispatch[args[1]] + if q_obj == Client: + rows = [] + labels = ('Client', 'Active') + for host in \ + self.session.query(q_obj).filter(q_obj.active == False): + rows.append([host.name, 'No']) + for host in \ + self.session.query(q_obj).filter(q_obj.active == True): + rows.append([host.name, 'Yes']) + self.print_table([labels]+rows, + justify='left', + hdr=True, + vdelim=" ", + padding=1) + elif q_obj == Group: + print("Groups:") + for group in self.session.query(q_obj).all(): + print(" %s" % group.name) + else: + results = self.session.query(q_obj).all() + else: + print('error') + raise SystemExit(1) + elif args[0] == 'init': + # Initialize the Snapshots database + dbpath = Bcfg2.Server.Snapshots.db_from_config(self.cfile) + engine = sqlalchemy.create_engine(dbpath, echo=True) + metadata = Base.metadata + metadata.create_all(engine) + Session = sqlalchemy.orm.sessionmaker() + Session.configure(bind=engine) + session = Session() + session.commit() + elif args[0] == 'dump': + client = args[1] + snap = Snapshot.get_current(self.session, u_str(client)) + if not snap: + print("Current snapshot for %s not found" % client) + sys.exit(1) + print("Client %s last run at %s" % (client, snap.timestamp)) + for pkg in snap.packages: + print("C:", pkg.correct, 'M:', pkg.modified) + print("start", pkg.start.name, pkg.start.version) + print("end", pkg.end.name, pkg.end.version) + elif args[0] == 'reports': + # bcfg2-admin reporting interface for Snapshots + if '-a' in args[1:]: + # Query all hosts for Name, Status, Revision, Timestamp + q = self.session.query(Client.name, + Snapshot.correct, + Snapshot.revision, + Snapshot.timestamp)\ + .filter(Client.id==Snapshot.client_id)\ + .group_by(Client.id) + rows = [] + labels = ('Client', 'Correct', 'Revision', 'Time') + for item in q.all(): + cli, cor, time, rev = item + rows.append([cli, cor, time, rev]) + self.print_table([labels]+rows, + justify='left', + hdr=True, vdelim=" ", + padding=1) + elif '-b' in args[1:]: + # Query a single host for bad entries + if len(args) < 3: + print("Usage: bcfg2-admin snapshots -b <client>") + return + client = args[2] + snap = Snapshot.get_current(self.session, u_str(client)) + if not snap: + print("Current snapshot for %s not found" % client) + sys.exit(1) + print("Bad entries:") + bad_pkgs = [self.session.query(Package) + .filter(Package.id==p.start_id).one().name \ + for p in snap.packages if p.correct == False] + for p in bad_pkgs: + print(" Package:%s" % p) + bad_files = [self.session.query(File) + .filter(File.id==f.start_id).one().name \ + for f in snap.files if f.correct == False] + for filename in bad_files: + print(" File:%s" % filename) + bad_svcs = [self.session.query(Service) + .filter(Service.id==s.start_id).one().name \ + for s in snap.services if s.correct == False] + for svc in bad_svcs: + print(" Service:%s" % svc) + elif '-e' in args[1:]: + # Query a single host for extra entries + client = args[2] + snap = Snapshot.get_current(self.session, u_str(client)) + if not snap: + print("Current snapshot for %s not found" % client) + sys.exit(1) + print("Extra entries:") + for pkg in snap.extra_packages: + print(" Package:%s" % pkg.name) + # FIXME: Do we know about extra files yet? + for f in snap.extra_files: + print(" File:%s" % f.name) + for svc in snap.extra_services: + print(" Service:%s" % svc.name) + elif '--date' in args[1:]: + year, month, day = args[2:] + timestamp = date(int(year), int(month), int(day)) + snaps = [] + for client in self.session.query(Client).filter(Client.active == True): + snaps.append(Snapshot.get_by_date(self.session, + client.name, + timestamp)) + rows = [] + labels = ('Client', 'Correct', 'Revision', 'Time') + for snap in snaps: + rows.append([snap.client.name, + snap.correct, + snap.revision, + snap.timestamp]) + self.print_table([labels]+rows, + justify='left', + hdr=True, + vdelim=" ", + padding=1) + else: + print("Unknown options: ", args[1:]) diff --git a/src/lib/Bcfg2/Server/Admin/Tidy.py b/src/lib/Bcfg2/Server/Admin/Tidy.py new file mode 100644 index 000000000..82319b93e --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Tidy.py @@ -0,0 +1,69 @@ +import os +import re +import socket + +import Bcfg2.Server.Admin + + +class Tidy(Bcfg2.Server.Admin.Mode): + __shorthelp__ = "Clean up useless files in the repo" + __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin tidy [-f] [-I]\n" + __usage__ = ("bcfg2-admin tidy [options]\n\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-f", + "force", + "-I", + "interactive")) + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + badfiles = self.buildTidyList() + if '-f' in args or '-I' in args: + if '-I' in args: + for name in badfiles[:]: + # py3k compatibility + try: + answer = raw_input("Unlink file %s? [yN] " % name) + except NameError: + answer = input("Unlink file %s? [yN] " % name) + if answer not in ['y', 'Y']: + badfiles.remove(name) + for name in badfiles: + try: + os.unlink(name) + except IOError: + print("Failed to unlink %s" % name) + else: + for name in badfiles: + print(name) + + def buildTidyList(self): + """Clean up unused or unusable files from the repository.""" + hostmatcher = re.compile('.*\.H_(\S+)$') + to_remove = [] + good = [] + bad = [] + + # clean up unresolvable hosts in SSHbase + for name in os.listdir("%s/SSHbase" % self.setup['repo']): + if hostmatcher.match(name): + hostname = hostmatcher.match(name).group(1) + if hostname in good + bad: + continue + try: + socket.gethostbyname(hostname) + good.append(hostname) + except: + bad.append(hostname) + for name in os.listdir("%s/SSHbase" % self.setup['repo']): + if not hostmatcher.match(name): + to_remove.append("%s/SSHbase/%s" % (self.setup['repo'], + name)) + else: + if hostmatcher.match(name).group(1) in bad: + to_remove.append("%s/SSHbase/%s" % + (self.setup['repo'], name)) + # clean up file~ + # clean up files without parsable names in Cfg + return to_remove diff --git a/src/lib/Bcfg2/Server/Admin/Viz.py b/src/lib/Bcfg2/Server/Admin/Viz.py new file mode 100644 index 000000000..2faa423c1 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Viz.py @@ -0,0 +1,119 @@ +import getopt +from subprocess import Popen, PIPE +import sys +import pipes +import Bcfg2.Server.Admin + + +class Viz(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Produce graphviz diagrams of metadata structures" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin viz [--includehosts] " + "[--includebundles] [--includekey] " + "[--only-client clientname] " + "[-o output.<ext>]\n") + __usage__ = ("bcfg2-admin viz [options]\n\n" + " %-32s%s\n" + " %-32s%s\n" + " %-32s%s\n" + " %-32s%s\n" + " %-32s%s\n" % + ("-H, --includehosts", + "include hosts in the viz output", + "-b, --includebundles", + "include bundles in the viz output", + "-k, --includekey", + "show a key for different digraph shapes", + "-c, --only-client <clientname>", + "show only the groups, bundles for the named client", + "-o, --outfile <file>", + "write viz output to an output file")) + + colors = ['steelblue1', 'chartreuse', 'gold', 'magenta', + 'indianred1', 'limegreen', 'orange1', 'lightblue2', + 'green1', 'blue1', 'yellow1', 'darkturquoise', 'gray66'] + + __plugin_blacklist__ = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr', 'Packages', + 'Rules', 'Account', 'Decisions', 'Deps', 'Git', + 'Svn', 'Fossil', 'Bzr', 'Bundler', 'TGenshi', + 'SGenshi', 'Base'] + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + # First get options to the 'viz' subcommand + try: + opts, args = getopt.getopt(args, 'Hbkc:o:', + ['includehosts', 'includebundles', + 'includekey', 'only-client=', 'outfile=']) + except getopt.GetoptError: + msg = sys.exc_info()[1] + print(msg) + print(self.__longhelp__) + raise SystemExit(1) + + hset = False + bset = False + kset = False + only_client = None + outputfile = False + for opt, arg in opts: + if opt in ("-H", "--includehosts"): + hset = True + elif opt in ("-b", "--includebundles"): + bset = True + elif opt in ("-k", "--includekey"): + kset = True + elif opt in ("-c", "--only-client"): + only_client = arg + elif opt in ("-o", "--outfile"): + outputfile = arg + + data = self.Visualize(self.setup['repo'], hset, bset, + kset, only_client, outputfile) + if data: + print(data) + raise SystemExit(0) + + def Visualize(self, repopath, hosts=False, + bundles=False, key=False, only_client=None, output=False): + """Build visualization of groups file.""" + if output: + format = output.split('.')[-1] + else: + format = 'png' + + cmd = ["dot", "-T", format] + if output: + cmd.extend(["-o", output]) + try: + dotpipe = Popen(cmd, stdin=PIPE, stdout=PIPE, close_fds=True) + except OSError: + # on some systems (RHEL 6), you cannot run dot with + # shell=True. on others (Gentoo with Python 2.7), you + # must. In yet others (RHEL 5), either way works. I have + # no idea what the difference is, but it's kind of a PITA. + cmd = ["dot", "-T", pipes.quote(format)] + if output: + cmd.extend(["-o", pipes.quote(output)]) + dotpipe = Popen(cmd, shell=True, + stdin=PIPE, stdout=PIPE, close_fds=True) + try: + dotpipe.stdin.write("digraph groups {\n") + except: + print("write to dot process failed. Is graphviz installed?") + raise SystemExit(1) + dotpipe.stdin.write('\trankdir="LR";\n') + dotpipe.stdin.write(self.metadata.viz(hosts, bundles, + key, only_client, self.colors)) + if key: + dotpipe.stdin.write("\tsubgraph cluster_key {\n") + dotpipe.stdin.write('''\tstyle="filled";\n''') + dotpipe.stdin.write('''\tcolor="lightblue";\n''') + dotpipe.stdin.write('''\tBundle [ shape="septagon" ];\n''') + dotpipe.stdin.write('''\tGroup [shape="ellipse"];\n''') + dotpipe.stdin.write('''\tProfile [style="bold", shape="ellipse"];\n''') + dotpipe.stdin.write('''\tHblock [label="Host1|Host2|Host3", shape="record"];\n''') + dotpipe.stdin.write('''\tlabel="Key";\n''') + dotpipe.stdin.write("\t}\n") + dotpipe.stdin.write("}\n") + dotpipe.stdin.close() + return dotpipe.stdout.read() diff --git a/src/lib/Bcfg2/Server/Admin/Xcmd.py b/src/lib/Bcfg2/Server/Admin/Xcmd.py new file mode 100644 index 000000000..140465468 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/Xcmd.py @@ -0,0 +1,60 @@ +import sys + +import Bcfg2.Options +import Bcfg2.Proxy +import Bcfg2.Server.Admin + +# Compatibility import +from Bcfg2.Bcfg2Py3k import xmlrpclib + + +class Xcmd(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("XML-RPC Command Interface") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin xcmd command\n") + __usage__ = ("bcfg2-admin xcmd <command>") + + def __call__(self, args): + optinfo = { + 'server': Bcfg2.Options.SERVER_LOCATION, + 'user': Bcfg2.Options.CLIENT_USER, + 'password': Bcfg2.Options.SERVER_PASSWORD, + 'key': Bcfg2.Options.SERVER_KEY, + 'certificate': Bcfg2.Options.CLIENT_CERT, + 'ca': Bcfg2.Options.CLIENT_CA, + 'timeout': Bcfg2.Options.CLIENT_TIMEOUT, + } + setup = Bcfg2.Options.OptionParser(optinfo) + setup.parse(args) + Bcfg2.Proxy.RetryMethod.max_retries = 1 + proxy = Bcfg2.Proxy.ComponentProxy(setup['server'], + setup['user'], + setup['password'], + key=setup['key'], + cert=setup['certificate'], + ca=setup['ca'], + timeout=setup['timeout']) + if len(setup['args']) == 0: + print("Usage: xcmd <xmlrpc method> <optional arguments>") + return + cmd = setup['args'][0] + args = () + if len(setup['args']) > 1: + args = tuple(setup['args'][1:]) + try: + data = getattr(proxy, cmd)(*args) + except xmlrpclib.Fault: + flt = sys.exc_info()[1] + if flt.faultCode == 7: + print("Unknown method %s" % cmd) + return + elif flt.faultCode == 20: + return + else: + raise + except Bcfg2.Proxy.ProxyError: + err = sys.exc_info()[1] + print("Proxy Error: %s" % err) + return + + if data != None: + print(data) diff --git a/src/lib/Bcfg2/Server/Admin/__init__.py b/src/lib/Bcfg2/Server/Admin/__init__.py new file mode 100644 index 000000000..fdb9a0972 --- /dev/null +++ b/src/lib/Bcfg2/Server/Admin/__init__.py @@ -0,0 +1,136 @@ +__all__ = [ + 'Backup', + 'Bundle', + 'Client', + 'Compare', + 'Group', + 'Init', + 'Minestruct', + 'Perf', + 'Pull', + 'Query', + 'Reports', + 'Snapshots', + 'Tidy', + 'Viz', + 'Xcmd' + ] + +import logging +import lxml.etree +import sys + +import Bcfg2.Server.Core +import Bcfg2.Options +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + + +class ModeOperationError(Exception): + pass + + +class Mode(object): + """Help message has not yet been added for mode.""" + __shorthelp__ = 'Shorthelp not defined yet' + __longhelp__ = 'Longhelp not defined yet' + __usage__ = None + __args__ = [] + + def __init__(self, setup): + self.setup = setup + self.configfile = setup['configfile'] + self.__cfp = False + self.log = logging.getLogger('Bcfg2.Server.Admin.Mode') + if self.__usage__ is not None: + setup.hm = self.__usage__ + + def getCFP(self): + if not self.__cfp: + self.__cfp = ConfigParser.ConfigParser() + self.__cfp.read(self.configfile) + return self.__cfp + + cfp = property(getCFP) + + def __call__(self, args): + pass + + def errExit(self, emsg): + print(emsg) + raise SystemExit(1) + + def load_stats(self, client): + stats = lxml.etree.parse("%s/etc/statistics.xml" % self.setup['repo']) + hostent = stats.xpath('//Node[@name="%s"]' % client) + if not hostent: + self.errExit("Could not find stats for client %s" % (client)) + return hostent[0] + + def print_table(self, rows, justify='left', hdr=True, vdelim=" ", padding=1): + """Pretty print a table + + rows - list of rows ([[row 1], [row 2], ..., [row n]]) + hdr - if True the first row is treated as a table header + vdelim - vertical delimiter between columns + padding - # of spaces around the longest element in the column + justify - may be left,center,right + + """ + hdelim = "=" + justify = {'left': str.ljust, + 'center': str.center, + 'right': str.rjust}[justify.lower()] + + """ + Calculate column widths (longest item in each column + plus padding on both sides) + + """ + cols = list(zip(*rows)) + colWidths = [max([len(str(item)) + 2 * padding for \ + item in col]) for col in cols] + borderline = vdelim.join([w * hdelim for w in colWidths]) + + # Print out the table + print(borderline) + for row in rows: + print(vdelim.join([justify(str(item), width) for \ + (item, width) in zip(row, colWidths)])) + if hdr: + print(borderline) + hdr = False + + +class MetadataCore(Mode): + """Base class for admin-modes that handle metadata.""" + __plugin_whitelist__ = None + __plugin_blacklist__ = None + + def __init__(self, setup): + Mode.__init__(self, setup) + if self.__plugin_whitelist__ is not None: + setup['plugins'] = [p for p in setup['plugins'] + if p in self.__plugin_whitelist__] + elif self.__plugin_blacklist__ is not None: + setup['plugins'] = [p for p in setup['plugins'] + if p not in self.__plugin_blacklist__] + + try: + self.bcore = \ + Bcfg2.Server.Core.Core(setup['repo'], + setup['plugins'], + setup['password'], + setup['encoding'], + filemonitor=setup['filemonitor']) + if setup['event debug']: + self.bcore.fam.debug = True + except Bcfg2.Server.Core.CoreInitError: + msg = sys.exc_info()[1] + self.errExit("Core load failed: %s" % msg) + self.bcore.fam.handle_events_in_interval(5) + self.metadata = self.bcore.metadata + + +class StructureMode(MetadataCore): + pass diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py new file mode 100644 index 000000000..4321c060b --- /dev/null +++ b/src/lib/Bcfg2/Server/Core.py @@ -0,0 +1,482 @@ +"""Bcfg2.Server.Core provides the runtime support for Bcfg2 modules.""" + +import atexit +import logging +import select +import sys +import threading +import time +from traceback import format_exc + +try: + import lxml.etree +except ImportError: + print("Failed to import lxml dependency. Shutting down server.") + raise SystemExit(1) + +from Bcfg2.Component import Component, exposed +from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError +import Bcfg2.Server.FileMonitor +import Bcfg2.Server.Plugins.Metadata +# Compatibility imports +from Bcfg2.Bcfg2Py3k import xmlrpclib +if sys.hexversion >= 0x03000000: + from functools import reduce + +logger = logging.getLogger('Bcfg2.Server.Core') + + +def critical_error(operation): + """Log and err, traceback and return an xmlrpc fault to client.""" + logger.error(operation, exc_info=1) + raise xmlrpclib.Fault(7, "Critical unexpected failure: %s" % (operation)) + +try: + import psyco + psyco.full() +except: + pass + + +def sort_xml(node, key=None): + for child in node: + sort_xml(child, key) + + sorted_children = sorted(node, key=key) + node[:] = sorted_children + + +class CoreInitError(Exception): + """This error is raised when the core cannot be initialized.""" + pass + + +class Core(Component): + """The Core object is the container for all + Bcfg2 Server logic and modules. + """ + name = 'bcfg2-server' + implementation = 'bcfg2-server' + + def __init__(self, repo, plugins, password, encoding, + cfile='/etc/bcfg2.conf', ca=None, + filemonitor='default', start_fam_thread=False): + Component.__init__(self) + self.datastore = repo + if filemonitor not in Bcfg2.Server.FileMonitor.available: + logger.error("File monitor driver %s not available; " + "forcing to default" % filemonitor) + filemonitor = 'default' + try: + self.fam = Bcfg2.Server.FileMonitor.available[filemonitor]() + except IOError: + logger.error("Failed to instantiate fam driver %s" % filemonitor, + exc_info=1) + raise CoreInitError("failed to instantiate fam driver (used %s)" % \ + filemonitor) + self.pubspace = {} + self.cfile = cfile + self.cron = {} + self.plugins = {} + self.plugin_blacklist = {} + self.revision = '-1' + self.password = password + self.encoding = encoding + atexit.register(self.shutdown) + # Create an event to signal worker threads to shutdown + self.terminate = threading.Event() + + if '' in plugins: + plugins.remove('') + + for plugin in plugins: + if not plugin in self.plugins: + self.init_plugins(plugin) + # Remove blacklisted plugins + for p, bl in list(self.plugin_blacklist.items()): + if len(bl) > 0: + logger.error("The following plugins conflict with %s;" + "Unloading %s" % (p, bl)) + for plug in bl: + del self.plugins[plug] + # This section logs the experimental plugins + expl = [plug for (name, plug) in list(self.plugins.items()) + if plug.experimental] + if expl: + logger.info("Loading experimental plugin(s): %s" % \ + (" ".join([x.name for x in expl]))) + logger.info("NOTE: Interfaces subject to change") + # This section logs the deprecated plugins + depr = [plug for (name, plug) in list(self.plugins.items()) + if plug.deprecated] + if depr: + logger.info("Loading deprecated plugin(s): %s" % \ + (" ".join([x.name for x in depr]))) + + mlist = self.plugins_by_type(Bcfg2.Server.Plugin.Metadata) + if len(mlist) == 1: + self.metadata = mlist[0] + else: + logger.error("No Metadata Plugin loaded; failed to instantiate Core") + raise CoreInitError("No Metadata Plugin") + self.statistics = self.plugins_by_type(Bcfg2.Server.Plugin.Statistics) + self.pull_sources = self.plugins_by_type(Bcfg2.Server.Plugin.PullSource) + self.generators = self.plugins_by_type(Bcfg2.Server.Plugin.Generator) + self.structures = self.plugins_by_type(Bcfg2.Server.Plugin.Structure) + self.connectors = self.plugins_by_type(Bcfg2.Server.Plugin.Connector) + self.ca = ca + self.fam_thread = threading.Thread(target=self._file_monitor_thread) + if start_fam_thread: + self.fam_thread.start() + + def plugins_by_type(self, base_cls): + """Return a list of loaded plugins that match the passed type. + + The returned list is sorted in ascending order by the Plugins' + sort_order value. The sort_order defaults to 500 in Plugin.py, + but can be overridden by individual plugins. Plugins with the + same numerical sort_order value are sorted in alphabetical + order by their name. + """ + return sorted([plugin for plugin in self.plugins.values() + if isinstance(plugin, base_cls)], + key=lambda p: (p.sort_order, p.name)) + + def _file_monitor_thread(self): + """The thread for monitor the files.""" + famfd = self.fam.fileno() + terminate = self.terminate + while not terminate.isSet(): + try: + if famfd: + select.select([famfd], [], [], 2) + else: + if not self.fam.pending(): + terminate.wait(15) + self.fam.handle_event_set(self.lock) + except: + continue + # VCS plugin periodic updates + for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Version): + self.revision = plugin.get_revision() + + def init_plugins(self, plugin): + """Handling for the plugins.""" + try: + mod = getattr(__import__("Bcfg2.Server.Plugins.%s" % + (plugin)).Server.Plugins, plugin) + except ImportError: + try: + mod = __import__(plugin) + except: + logger.error("Failed to load plugin %s" % (plugin)) + return + plug = getattr(mod, plugin) + # Blacklist conflicting plugins + cplugs = [conflict for conflict in plug.conflicts + if conflict in self.plugins] + self.plugin_blacklist[plug.name] = cplugs + try: + self.plugins[plugin] = plug(self, self.datastore) + except PluginInitError: + logger.error("Failed to instantiate plugin %s" % (plugin)) + except: + logger.error("Unexpected instantiation failure for plugin %s" % + (plugin), exc_info=1) + + def shutdown(self): + """Shutting down the plugins.""" + if not self.terminate.isSet(): + self.terminate.set() + for plugin in list(self.plugins.values()): + plugin.shutdown() + + def validate_structures(self, metadata, data): + """Checks the data structure.""" + for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.StructureValidator): + try: + plugin.validate_structures(metadata, data) + except Bcfg2.Server.Plugin.ValidationError: + err = sys.exc_info()[1] + logger.error("Plugin %s structure validation failed: %s" \ + % (plugin.name, err.message)) + raise + except: + logger.error("Plugin %s: unexpected structure validation failure" \ + % (plugin.name), exc_info=1) + + def validate_goals(self, metadata, data): + """Checks that the config matches the goals enforced by the plugins.""" + for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.GoalValidator): + try: + plugin.validate_goals(metadata, data) + except Bcfg2.Server.Plugin.ValidationError: + err = sys.exc_info()[1] + logger.error("Plugin %s goal validation failed: %s" \ + % (plugin.name, err.message)) + raise + except: + logger.error("Plugin %s: unexpected goal validation failure" \ + % (plugin.name), exc_info=1) + + def GetStructures(self, metadata): + """Get all structures for client specified by metadata.""" + structures = reduce(lambda x, y: x + y, + [struct.BuildStructures(metadata) for struct \ + in self.structures], []) + sbundles = [b.get('name') for b in structures if b.tag == 'Bundle'] + missing = [b for b in metadata.bundles if b not in sbundles] + if missing: + logger.error("Client %s configuration missing bundles: %s" \ + % (metadata.hostname, ':'.join(missing))) + return structures + + def BindStructure(self, structure, metadata): + """Bind a complete structure.""" + for entry in structure.getchildren(): + if entry.tag.startswith("Bound"): + entry.tag = entry.tag[5:] + continue + try: + self.Bind(entry, metadata) + except PluginExecutionError, exc: + if 'failure' not in entry.attrib: + entry.set('failure', 'bind error: %s' % format_exc()) + logger.error("Failed to bind entry: %s %s" % \ + (entry.tag, entry.get('name'))) + except Exception, exc: + if 'failure' not in entry.attrib: + entry.set('failure', 'bind error: %s' % format_exc()) + logger.error("Unexpected failure in BindStructure: %s %s" \ + % (entry.tag, entry.get('name')), exc_info=1) + + def Bind(self, entry, metadata): + """Bind an entry using the appropriate generator.""" + if 'altsrc' in entry.attrib: + oldname = entry.get('name') + entry.set('name', entry.get('altsrc')) + entry.set('realname', oldname) + del entry.attrib['altsrc'] + try: + ret = self.Bind(entry, metadata) + entry.set('name', oldname) + del entry.attrib['realname'] + return ret + except: + entry.set('name', oldname) + logger.error("Failed binding entry %s:%s with altsrc %s" \ + % (entry.tag, entry.get('name'), + entry.get('altsrc'))) + logger.error("Falling back to %s:%s" % (entry.tag, + entry.get('name'))) + + glist = [gen for gen in self.generators if + entry.get('name') in gen.Entries.get(entry.tag, {})] + if len(glist) == 1: + return glist[0].Entries[entry.tag][entry.get('name')](entry, + metadata) + elif len(glist) > 1: + generators = ", ".join([gen.name for gen in glist]) + logger.error("%s %s served by multiple generators: %s" % \ + (entry.tag, entry.get('name'), generators)) + g2list = [gen for gen in self.generators if + gen.HandlesEntry(entry, metadata)] + if len(g2list) == 1: + return g2list[0].HandleEntry(entry, metadata) + entry.set('failure', 'no matching generator') + raise PluginExecutionError("No matching generator: %s:%s" % + (entry.tag, entry.get('name'))) + + def BuildConfiguration(self, client): + """Build configuration for clients.""" + start = time.time() + config = lxml.etree.Element("Configuration", version='2.0', \ + revision=self.revision) + try: + meta = self.build_metadata(client) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + logger.error("Metadata consistency error for client %s" % client) + return lxml.etree.Element("error", type='metadata error') + + try: + structures = self.GetStructures(meta) + except: + logger.error("error in GetStructures", exc_info=1) + return lxml.etree.Element("error", type='structure error') + + self.validate_structures(meta, structures) + + # Perform altsrc consistency checking + esrcs = {} + for struct in structures: + for entry in struct: + key = (entry.tag, entry.get('name')) + if key in esrcs: + if esrcs[key] != entry.get('altsrc'): + logger.error("Found inconsistent altsrc mapping for entry %s:%s" % key) + else: + esrcs[key] = entry.get('altsrc', None) + del esrcs + + for astruct in structures: + try: + self.BindStructure(astruct, meta) + config.append(astruct) + except: + logger.error("error in BindStructure", exc_info=1) + self.validate_goals(meta, config) + + sort_xml(config, key=lambda e: e.get('name')) + + logger.info("Generated config for %s in %.03f seconds" % \ + (client, time.time() - start)) + return config + + def GetDecisions(self, metadata, mode): + """Get data for the decision list.""" + result = [] + for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Decision): + try: + result += plugin.GetDecisions(metadata, mode) + except: + logger.error("Plugin: %s failed to generate decision list" \ + % plugin.name, exc_info=1) + return result + + def build_metadata(self, client_name): + """Build the metadata structure.""" + if not hasattr(self, 'metadata'): + # some threads start before metadata is even loaded + raise Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError + imd = self.metadata.get_initial_metadata(client_name) + for conn in self.connectors: + grps = conn.get_additional_groups(imd) + self.metadata.merge_additional_groups(imd, grps) + for conn in self.connectors: + data = conn.get_additional_data(imd) + self.metadata.merge_additional_data(imd, conn.name, data) + imd.query.by_name = self.build_metadata + return imd + + def process_statistics(self, client_name, statistics): + """Proceed statistics for client.""" + meta = self.build_metadata(client_name) + state = statistics.find(".//Statistics") + if state.get('version') >= '2.0': + for plugin in self.statistics: + try: + plugin.process_statistics(meta, statistics) + except: + logger.error("Plugin %s failed to process stats from %s" \ + % (plugin.name, meta.hostname), + exc_info=1) + + logger.info("Client %s reported state %s" % (client_name, + state.get('state'))) + # XMLRPC handlers start here + + @exposed + def GetProbes(self, address): + """Fetch probes for a particular client.""" + resp = lxml.etree.Element('probes') + try: + name = self.metadata.resolve_client(address, cleanup_cache=True) + meta = self.build_metadata(name) + + for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Probing): + for probe in plugin.GetProbes(meta): + resp.append(probe) + return lxml.etree.tostring(resp, encoding='UTF-8', + xml_declaration=True) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + warning = 'Client metadata resolution error for %s' % address[0] + self.logger.warning(warning) + raise xmlrpclib.Fault(6, warning + "; check server log") + except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError: + err_msg = 'Metadata system runtime failure' + self.logger.error(err_msg) + raise xmlrpclib.Fault(6, err_msg) + except: + critical_error("Error determining client probes") + + @exposed + def RecvProbeData(self, address, probedata): + """Receive probe data from clients.""" + try: + name = self.metadata.resolve_client(address) + meta = self.build_metadata(name) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + warning = 'Metadata consistency error' + self.logger.warning(warning) + raise xmlrpclib.Fault(6, warning) + # clear dynamic groups + self.metadata.cgroups[meta.hostname] = [] + try: + xpdata = lxml.etree.XML(probedata.encode('utf-8')) + except: + self.logger.error("Failed to parse probe data from client %s" % \ + (address[0])) + return False + + sources = [] + [sources.append(data.get('source')) for data in xpdata + if data.get('source') not in sources] + for source in sources: + if source not in self.plugins: + self.logger.warning("Failed to locate plugin %s" % (source)) + continue + dl = [data for data in xpdata if data.get('source') == source] + try: + self.plugins[source].ReceiveData(meta, dl) + except: + logger.error("Failed to process probe data from client %s" % \ + (address[0]), exc_info=1) + return True + + @exposed + def AssertProfile(self, address, profile): + """Set profile for a client.""" + try: + client = self.metadata.resolve_client(address) + self.metadata.set_profile(client, profile, address) + except (Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError, + Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError): + warning = 'Metadata consistency error' + self.logger.warning(warning) + raise xmlrpclib.Fault(6, warning) + return True + + @exposed + def GetConfig(self, address, checksum=False): + """Build config for a client.""" + try: + client = self.metadata.resolve_client(address) + config = self.BuildConfiguration(client) + return lxml.etree.tostring(config, encoding='UTF-8', + xml_declaration=True) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + self.logger.warning("Metadata consistency failure for %s" % (address)) + raise xmlrpclib.Fault(6, "Metadata consistency failure") + + @exposed + def RecvStats(self, address, stats): + """Act on statistics upload.""" + sdata = lxml.etree.XML(stats.encode('utf-8')) + client = self.metadata.resolve_client(address) + self.process_statistics(client, sdata) + return "<ok/>" + + def authenticate(self, cert, user, password, address): + if self.ca: + acert = cert + else: + # No ca, so no cert validation can be done + acert = None + return self.metadata.AuthenticateConnection(acert, user, password, address) + + @exposed + def GetDecisionList(self, address, mode): + """Get the data of the decision list.""" + client = self.metadata.resolve_client(address) + meta = self.build_metadata(client) + return self.GetDecisions(meta, mode) diff --git a/src/lib/Bcfg2/Server/FileMonitor.py b/src/lib/Bcfg2/Server/FileMonitor.py new file mode 100644 index 000000000..d6b313e6b --- /dev/null +++ b/src/lib/Bcfg2/Server/FileMonitor.py @@ -0,0 +1,315 @@ +"""Bcfg2.Server.FileMonitor provides the support for monitorung files.""" + +import logging +import os +import stat +from time import sleep, time + +logger = logging.getLogger('Bcfg2.Server.FileMonitor') + + +def ShouldIgnore(event): + """Test if the event should be suppresed.""" + # FIXME should move event suppression out of the core + if event.filename.split('/')[-1] == '.svn': + return True + if event.filename.endswith('~') or \ + event.filename.startswith('#') or event.filename.startswith('.#'): + #logger.error("Suppressing event for file %s" % (event.filename)) + return True + return False + + +class Event(object): + def __init__(self, request_id, filename, code): + self.requestID = request_id + self.filename = filename + self.action = code + + def code2str(self): + """return static code for event""" + return self.action + +available = {} + + +class FileMonitor(object): + """File Monitor baseclass.""" + def __init__(self, debug=False): + object.__init__(self) + self.debug = debug + self.handles = dict() + + def get_event(self): + return None + + def pending(self): + return False + + def fileno(self): + return 0 + + def handle_one_event(self, event): + if ShouldIgnore(event): + return + if event.requestID not in self.handles: + logger.info("Got event for unexpected id %s, file %s" % + (event.requestID, event.filename)) + return + if self.debug: + logger.info("Dispatching event %s %s to obj %s" \ + % (event.code2str(), event.filename, + self.handles[event.requestID])) + try: + self.handles[event.requestID].HandleEvent(event) + except: + logger.error("error in handling of gamin event for %s" % \ + (event.filename), exc_info=1) + + def handle_event_set(self, lock=None): + count = 1 + event = self.get_event() + start = time() + if lock: + lock.acquire() + try: + self.handle_one_event(event) + while self.pending(): + self.handle_one_event(self.get_event()) + count += 1 + except: + pass + if lock: + lock.release() + end = time() + logger.info("Handled %d events in %.03fs" % (count, (end - start))) + + def handle_events_in_interval(self, interval): + end = time() + interval + while time() < end: + if self.pending(): + self.handle_event_set() + end = time() + interval + else: + sleep(0.5) + + +class FamFam(object): + """The fam object is a set of callbacks for + file alteration events (FAM support). + """ + + def __init__(self): + object.__init__(self) + self.fm = _fam.open() + self.users = {} + self.handles = {} + self.debug = False + + def fileno(self): + """Return fam file handle number.""" + return self.fm.fileno() + + def handle_event_set(self, _): + self.Service() + + def handle_events_in_interval(self, interval): + now = time() + while (time() - now) < interval: + if self.Service(): + now = time() + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.HandleEvent.""" + mode = os.stat(path)[stat.ST_MODE] + if stat.S_ISDIR(mode): + handle = self.fm.monitorDirectory(path, None) + else: + handle = self.fm.monitorFile(path, None) + self.handles[handle.requestID()] = handle + if obj != None: + self.users[handle.requestID()] = obj + return handle.requestID() + + def Service(self, interval=0.50): + """Handle all fam work.""" + count = 0 + collapsed = 0 + rawevents = [] + start = time() + now = time() + while (time() - now) < interval: + if self.fm.pending(): + while self.fm.pending(): + count += 1 + rawevents.append(self.fm.nextEvent()) + now = time() + unique = [] + bookkeeping = [] + for event in rawevents: + if ShouldIgnore(event): + continue + if event.code2str() != 'changed': + # process all non-change events + unique.append(event) + else: + if (event.filename, event.requestID) not in bookkeeping: + bookkeeping.append((event.filename, event.requestID)) + unique.append(event) + else: + collapsed += 1 + for event in unique: + if event.requestID in self.users: + try: + self.users[event.requestID].HandleEvent(event) + except: + logger.error("handling event for file %s" % (event.filename), exc_info=1) + end = time() + logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" % + (count, (end - start), collapsed)) + return count + + +class Fam(FileMonitor): + """ + The fam object is a set of callbacks for + file alteration events (FAM support). + """ + + def __init__(self, debug=False): + FileMonitor.__init__(self, debug) + self.fm = _fam.open() + + def fileno(self): + return self.fm.fileno() + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.HandleEvent.""" + mode = os.stat(path)[stat.ST_MODE] + if stat.S_ISDIR(mode): + handle = self.fm.monitorDirectory(path, None) + else: + handle = self.fm.monitorFile(path, None) + if obj != None: + self.handles[handle.requestID()] = obj + return handle.requestID() + + def pending(self): + return self.fm.pending() + + def get_event(self): + return self.fm.nextEvent() + + +class Pseudo(FileMonitor): + """ + The fam object is a set of callbacks for + file alteration events (static monitor support). + """ + + def __init__(self, debug=False): + FileMonitor.__init__(self, debug=False) + self.pending_events = [] + + def pending(self): + return len(self.pending_events) != 0 + + def get_event(self): + return self.pending_events.pop() + + def AddMonitor(self, path, obj): + """add a monitor to path, installing a callback to obj.HandleEvent""" + handleID = len(list(self.handles.keys())) + mode = os.stat(path)[stat.ST_MODE] + handle = Event(handleID, path, 'exists') + if stat.S_ISDIR(mode): + dirList = os.listdir(path) + self.pending_events.append(handle) + for includedFile in dirList: + self.pending_events.append(Event(handleID, + includedFile, + 'exists')) + self.pending_events.append(Event(handleID, path, 'endExist')) + else: + self.pending_events.append(Event(handleID, path, 'exists')) + if obj != None: + self.handles[handleID] = obj + return handleID + + +try: + from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \ + GAMChanged, GAMDeleted, GAMMoved + + class GaminEvent(Event): + """ + This class provides an event analogous to + python-fam events based on gamin sources. + """ + def __init__(self, request_id, filename, code): + Event.__init__(self, request_id, filename, code) + action_map = {GAMCreated: 'created', GAMExists: 'exists', + GAMChanged: 'changed', GAMDeleted: 'deleted', + GAMEndExist: 'endExist', GAMMoved: 'moved'} + if code in action_map: + self.action = action_map[code] + + class Gamin(FileMonitor): + """ + The fam object is a set of callbacks for + file alteration events (Gamin support) + """ + def __init__(self, debug=False): + FileMonitor.__init__(self, debug) + self.mon = WatchMonitor() + self.counter = 0 + self.events = [] + + def fileno(self): + return self.mon.get_fd() + + def queue(self, path, action, request_id): + """queue up the event for later handling""" + self.events.append(GaminEvent(request_id, path, action)) + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.HandleEvent.""" + handle = self.counter + self.counter += 1 + mode = os.stat(path)[stat.ST_MODE] + + # Flush queued gamin events + while self.mon.event_pending(): + self.mon.handle_one_event() + + if stat.S_ISDIR(mode): + self.mon.watch_directory(path, self.queue, handle) + else: + self.mon.watch_file(path, self.queue, handle) + self.handles[handle] = obj + return handle + + def pending(self): + return len(self.events) > 0 or self.mon.event_pending() + + def get_event(self): + if self.mon.event_pending(): + self.mon.handle_one_event() + return self.events.pop(0) + + available['gamin'] = Gamin +except ImportError: + # fall back to _fam + pass + +try: + import _fam + available['fam'] = FamFam +except ImportError: + pass +available['pseudo'] = Pseudo + +for fdrv in ['gamin', 'fam', 'pseudo']: + if fdrv in available: + available['default'] = available[fdrv] + break diff --git a/src/lib/Bcfg2/Server/Hostbase/.gitignore b/src/lib/Bcfg2/Server/Hostbase/.gitignore new file mode 100644 index 000000000..8e15b5395 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/.gitignore @@ -0,0 +1,3 @@ +*.pyc +dev.db +bcfg2.conf diff --git a/src/lib/Bcfg2/Server/Hostbase/__init__.py b/src/lib/Bcfg2/Server/Hostbase/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/__init__.py diff --git a/src/lib/Bcfg2/Server/Hostbase/backends.py b/src/lib/Bcfg2/Server/Hostbase/backends.py new file mode 100644 index 000000000..ecaf3c109 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/backends.py @@ -0,0 +1,68 @@ +from django.contrib.auth.models import User +#from ldapauth import * +from nisauth import * + +## class LDAPBackend(object): + +## def authenticate(self,username=None,password=None): +## try: + +## l = ldapauth(username,password) +## temp_pass = User.objects.make_random_password(100) +## ldap_user = dict(username=l.sAMAccountName, +## ) +## user_session_obj = dict( +## email=l.email, +## first_name=l.name_f, +## last_name=l.name_l, +## uid=l.badge_no +## ) +## #fixme: need to add this user session obj to session +## #print str(ldap_user) +## user,created = User.objects.get_or_create(username=username) +## #print user +## #print "created " + str(created) +## return user + +## except LDAPAUTHError,e: +## #print str(e) +## return None + +## def get_user(self,user_id): +## try: +## return User.objects.get(pk=user_id) +## except User.DoesNotExist, e: +## print str(e) +## return None + + +class NISBackend(object): + + def authenticate(self, username=None, password=None): + try: + n = nisauth(username, password) + temp_pass = User.objects.make_random_password(100) + nis_user = dict(username=username, + ) + + user_session_obj = dict( + email = username + "@mcs.anl.gov", + first_name = None, + last_name = None, + uid = n.uid + ) + user, created = User.objects.get_or_create(username=username) + + return user + + except NISAUTHError: + e = sys.exc_info()[1] + return None + + + def get_user(self, user_id): + try: + return User.objects.get(pk=user_id) + except User.DoesNotExist: + e = sys.exc_info()[1] + return None diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py new file mode 100644 index 000000000..70a2233cc --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py @@ -0,0 +1,15 @@ +from django.contrib import admin + +from models import Host, Interface, IP, MX, Name, CName, Nameserver, ZoneAddress, Zone, Log, ZoneLog + +admin.site.register(Host) +admin.site.register(Interface) +admin.site.register(IP) +admin.site.register(MX) +admin.site.register(Name) +admin.site.register(CName) +admin.site.register(Nameserver) +admin.site.register(ZoneAddress) +admin.site.register(Zone) +admin.site.register(Log) +admin.site.register(ZoneLog) diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py new file mode 100644 index 000000000..3f08a09a0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py @@ -0,0 +1,210 @@ +from django.db import models + +# Create your models here. +class Host(models.Model): + NETGROUP_CHOICES = ( + ('none', 'none'),('cave', 'cave'),('ccst', 'ccst'),('mcs', 'mcs'), + ('mmlab', 'mmlab'),('sp', 'sp'),('red', 'red'),('virtual', 'virtual'), + ('win', 'win'),('xterm', 'xterm'),('lcrc', 'lcrc'),('anlext', 'anlext'), + ('teragrid', 'teragrid') + ) + STATUS_CHOICES = ( + ('active','active'),('dormant','dormant') + ) + SUPPORT_CHOICES = ( + ('green','green'),('yellow','yellow'),('red','red') + ) + CLASS_CHOICES = ( + ('scientific','scientific'), + ('operations','operations'),('guest','guest'), + ('confidential','confidential'),('public','public') + ) + WHATAMI_CHOICES = ( + ('aix-3', 'aix-3'), ('aix-4', 'aix-4'), + ('aix-5', 'aix-5'), ('baytech', 'baytech'), + ('decserver', 'decserver'), ('dialup', 'dialup'), + ('dos', 'dos'), ('freebsd', 'freebsd'), + ('hpux', 'hpux'), ('irix-5', 'irix-5'), + ('irix-6', 'irix-6'), ('linux', 'linux'), + ('linux-2', 'linux-2'), ('linux-rh73', 'linux-rh73'), + ('linux-rh8', 'linux-rh8'), ('linux-sles8', 'linux-sles8'), + ('linux-sles8-64', 'linux-sles8-64'), ('linux-sles8-ia32', 'linux-sles8-ia32'), + ('linux-sles8-ia64', 'linux-sles8-ia64'), ('mac', 'mac'), + ('network', 'network'), ('next', 'next'), + ('none', 'none'), ('osf', 'osf'), ('printer', 'printer'), + ('robot', 'robot'), ('solaris-2', 'solaris-2'), + ('sun4', 'sun4'), ('unknown', 'unknown'), ('virtual', 'virtual'), + ('win31', 'win31'), ('win95', 'win95'), + ('winNTs', 'winNTs'), ('winNTw', 'winNTw'), + ('win2k', 'win2k'), ('winXP', 'winXP'), ('xterm', 'xterm') + ) + hostname = models.CharField(max_length=64) + whatami = models.CharField(max_length=16) + netgroup = models.CharField(max_length=32, choices=NETGROUP_CHOICES) + security_class = models.CharField('class', max_length=16) + support = models.CharField(max_length=8, choices=SUPPORT_CHOICES) + csi = models.CharField(max_length=32, blank=True) + printq = models.CharField(max_length=32, blank=True) + outbound_smtp = models.BooleanField() + primary_user = models.EmailField() + administrator = models.EmailField(blank=True) + location = models.CharField(max_length=16) + comments = models.TextField(blank=True) + expiration_date = models.DateField(null=True, blank=True) + last = models.DateField(auto_now=True, auto_now_add=True) + status = models.CharField(max_length=7, choices=STATUS_CHOICES) + dirty = models.BooleanField() + + class Admin: + list_display = ('hostname', 'last') + search_fields = ['hostname'] + + def __str__(self): + return self.hostname + + def get_logs(self): + """ + Get host's log. + """ + return Log.objects.filter(hostname=self.hostname) + +class Interface(models.Model): + TYPE_CHOICES = ( + ('eth', 'ethernet'), ('wl', 'wireless'), ('virtual', 'virtual'), ('myr', 'myr'), + ('mgmt', 'mgmt'), ('tape', 'tape'), ('fe', 'fe'), ('ge', 'ge'), + ) + # FIXME: The new admin interface has change a lot. + #host = models.ForeignKey(Host, edit_inline=models.TABULAR, num_in_admin=2) + host = models.ForeignKey(Host) + # FIXME: The new admin interface has change a lot. + #mac_addr = models.CharField(max_length=32, core=True) + mac_addr = models.CharField(max_length=32) + hdwr_type = models.CharField('type', max_length=16, choices=TYPE_CHOICES, blank=True) + # FIXME: The new admin interface has change a lot. + # radio_admin=True, blank=True) + dhcp = models.BooleanField() + + def __str__(self): + return self.mac_addr + + class Admin: + list_display = ('mac_addr', 'host') + search_fields = ['mac_addr'] + +class IP(models.Model): + interface = models.ForeignKey(Interface) + # FIXME: The new admin interface has change a lot. + # edit_inline=models.TABULAR, num_in_admin=1) + #ip_addr = models.IPAddressField(core=True) + ip_addr = models.IPAddressField() + + def __str__(self): + return self.ip_addr + + class Admin: + pass + + class Meta: + ordering = ('ip_addr', ) + +class MX(models.Model): + priority = models.IntegerField(blank=True) + # FIXME: The new admin interface has change a lot. + #mx = models.CharField(max_length=64, blank=True, core=True) + mx = models.CharField(max_length=64, blank=True) + + def __str__(self): + return (" ".join([str(self.priority), self.mx])) + + class Admin: + pass + +class Name(models.Model): + DNS_CHOICES = ( + ('global','global'),('internal','ANL internal'), + ('private','private') + ) + # FIXME: The new admin interface has change a lot. + #ip = models.ForeignKey(IP, edit_inline=models.TABULAR, num_in_admin=1) + ip = models.ForeignKey(IP) + # FIXME: The new admin interface has change a lot. + #name = models.CharField(max_length=64, core=True) + name = models.CharField(max_length=64) + dns_view = models.CharField(max_length=16, choices=DNS_CHOICES) + only = models.BooleanField(blank=True) + mxs = models.ManyToManyField(MX) + + def __str__(self): + return self.name + + class Admin: + pass + +class CName(models.Model): + # FIXME: The new admin interface has change a lot. + #name = models.ForeignKey(Name, edit_inline=models.TABULAR, num_in_admin=1) + name = models.ForeignKey(Name) + # FIXME: The new admin interface has change a lot. + #cname = models.CharField(max_length=64, core=True) + cname = models.CharField(max_length=64) + + def __str__(self): + return self.cname + + class Admin: + pass + +class Nameserver(models.Model): + name = models.CharField(max_length=64, blank=True) + + def __str__(self): + return self.name + + class Admin: + pass + +class ZoneAddress(models.Model): + ip_addr = models.IPAddressField(blank=True) + + def __str__(self): + return self.ip_addr + + class Admin: + pass + +class Zone(models.Model): + zone = models.CharField(max_length=64) + serial = models.IntegerField() + admin = models.CharField(max_length=64) + primary_master = models.CharField(max_length=64) + expire = models.IntegerField() + retry = models.IntegerField() + refresh = models.IntegerField() + ttl = models.IntegerField() + nameservers = models.ManyToManyField(Nameserver, blank=True) + mxs = models.ManyToManyField(MX, blank=True) + addresses = models.ManyToManyField(ZoneAddress, blank=True) + aux = models.TextField(blank=True) + + def __str__(self): + return self.zone + + class Admin: + pass + +class Log(models.Model): + # FIXME: Proposal hostname = models.ForeignKey(Host) + hostname = models.CharField(max_length=64) + date = models.DateTimeField(auto_now=True, auto_now_add=True) + log = models.TextField() + + def __str__(self): + return self.hostname + +class ZoneLog(models.Model): + zone = models.CharField(max_length=64) + date = models.DateTimeField(auto_now=True, auto_now_add=True) + log = models.TextField() + + def __str__(self): + return self.zone diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql b/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql new file mode 100644 index 000000000..b78187ab2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql @@ -0,0 +1,2 @@ +INSERT INTO hostbase_zone (zone, serial, admin, primary_master, expire, retry, refresh, ttl, aux) +VALUES ('.rev', 0, '', '', 1209600, 1800, 7200, 7200, '');
\ No newline at end of file diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py new file mode 100644 index 000000000..0ee204abe --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +from django.conf.urls.defaults import * +from django.contrib.auth.decorators import login_required +from django.core.urlresolvers import reverse +from django.views.generic.create_update import create_object, update_object, delete_object +from django.views.generic.list_detail import object_detail, object_list + +from models import Host, Zone, Log + +host_detail_dict = { + 'queryset':Host.objects.all(), + 'template_name':'host.html', + 'template_object_name':'host', +} + +host_delete_dict = { + 'model':Host, + 'post_delete_redirect':'/', +} + +host_log_detail_dict = host_detail_dict.copy() +host_log_detail_dict['template_name'] = 'logviewer.html' + +host_dns_detail_dict = host_detail_dict.copy() +host_dns_detail_dict['template_name'] = 'dns.html' + +zone_new_dict = { + 'model':Zone, + 'template_name':'zonenew.html', + 'post_save_redirect':'../%(id)s', +} + +zones_list_dict = { + 'queryset':Zone.objects.all(), + 'template_name':'zones.html', + 'template_object_name':'zone', +} + +zone_detail_dict = { + 'queryset':Zone.objects.all(), + 'template_name':'zoneview.html', + 'template_object_name':'zone', +} + +urlpatterns = patterns('', + (r'^(?P<object_id>\d+)/$', object_detail, host_detail_dict, 'host_detail'), + (r'^zones/new/$', login_required(create_object), zone_new_dict, 'zone_new'), + (r'^zones/(?P<object_id>\d+)/edit', login_required(update_object), zone_new_dict, 'zone_edit'), + (r'^zones/$', object_list, zones_list_dict, 'zone_list'), + (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'), + (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'), + (r'^\d+/logs/(?P<object_id>\d+)/', object_detail, { 'queryset':Log.objects.all() }, 'log_detail'), + (r'^(?P<object_id>\d+)/logs/', object_detail, host_log_detail_dict, 'host_log_list'), + (r'^(?P<object_id>\d+)/dns', object_detail, host_dns_detail_dict, 'host_dns_list'), + (r'^(?P<object_id>\d+)/remove', login_required(delete_object), host_delete_dict, 'host_delete'), +) + +urlpatterns += patterns('Bcfg2.Server.Hostbase.hostbase.views', + (r'^$', 'search'), + (r'^(?P<host_id>\d+)/edit', 'edit'), + (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'), + (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/(?P<name_id>\d+)/confirm', 'confirm'), + (r'^(?P<host_id>\d+)/dns/edit', 'dnsedit'), + (r'^new', 'new'), + (r'^(?P<host_id>\d+)/copy', 'copy'), +# (r'^hostinfo', 'hostinfo'), + (r'^zones/(?P<zone_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'), +) diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py new file mode 100644 index 000000000..57ef5eff8 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py @@ -0,0 +1,970 @@ +"""Views.py +Contains all the views associated with the hostbase app +Also has does form validation +""" +from django.http import HttpResponse, HttpResponseRedirect + +from django.contrib.auth.decorators import login_required +from django.contrib.auth import logout +from django.template import RequestContext +from Bcfg2.Server.Hostbase.hostbase.models import * +from datetime import date +from django.db import connection +from django.shortcuts import render_to_response +from django import forms +from Bcfg2.Server.Hostbase import settings, regex +import re, copy + +attribs = ['hostname', 'whatami', 'netgroup', 'security_class', 'support', + 'csi', 'printq', 'primary_user', 'administrator', 'location', + 'status', 'comments'] + +zoneattribs = ['zone', 'admin', 'primary_master', 'expire', 'retry', + 'refresh', 'ttl', 'aux'] + +dispatch = {'mac_addr':'i.mac_addr LIKE \'%%%%%s%%%%\'', + 'ip_addr':'p.ip_addr LIKE \'%%%%%s%%%%\'', + 'name':'n.name LIKE \'%%%%%s%%%%\'', +## 'hostname':'n.name LIKE \'%%%%%s%%%%\'', +## 'cname':'n.name LIKE \'%%%%%s%%%%\'', + 'mx':'m.mx LIKE \'%%%%%s%%%%\'', + 'dns_view':'n.dns_view = \'%s\'', + 'hdwr_type':'i.hdwr_type = \'%s\'', + 'dhcp':'i.dhcp = \'%s\''} + +def search(request): + """Search for hosts in the database + If more than one field is entered, logical AND is used + """ + if 'sub' in request.GET: + querystring = """SELECT DISTINCT h.hostname, h.id, h.status + FROM (((((hostbase_host h + INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id) + INNER JOIN hostbase_name_mxs x ON n.id = x.name_id) + INNER JOIN hostbase_mx m ON m.id = x.mx_id) + LEFT JOIN hostbase_cname c ON n.id = c.name_id + WHERE """ + + _and = False + for field in request.POST: + if request.POST[field] and field == 'hostname': + if _and: + querystring += ' AND ' + querystring += 'n.name LIKE \'%%%%%s%%%%\' or c.cname LIKE \'%%%%%s%%%%\'' % (request.POST[field], request.POST[field]) + _and = True + elif request.POST[field] and field in dispatch: + if _and: + querystring += ' AND ' + querystring += dispatch[field] % request.POST[field] + _and = True + elif request.POST[field]: + if _and: + querystring += ' AND ' + querystring += "h.%s LIKE \'%%%%%s%%%%\'" % (field, request.POST[field]) + _and = True + + if not _and: + cursor = connection.cursor() + cursor.execute("""SELECT hostname, id, status + FROM hostbase_host ORDER BY hostname""") + results = cursor.fetchall() + else: + querystring += " ORDER BY h.hostname" + cursor = connection.cursor() + cursor.execute(querystring) + results = cursor.fetchall() + + return render_to_response('results.html', + {'hosts': results, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + else: + return render_to_response('search.html', + {'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'DNS_CHOICES': Name.DNS_CHOICES, + 'yesno': [(1, 'yes'), (0, 'no')], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + + +def gethostdata(host_id, dnsdata=False): + """Grabs the necessary data about a host + Replaces a lot of repeated code""" + hostdata = {} + hostdata['ips'] = {} + hostdata['names'] = {} + hostdata['cnames'] = {} + hostdata['mxs'] = {} + hostdata['host'] = Host.objects.get(id=host_id) + hostdata['interfaces'] = hostdata['host'].interface_set.all() + for interface in hostdata['interfaces']: + hostdata['ips'][interface.id] = interface.ip_set.all() + if dnsdata: + for ip in hostdata['ips'][interface.id]: + hostdata['names'][ip.id] = ip.name_set.all() + for name in hostdata['names'][ip.id]: + hostdata['cnames'][name.id] = name.cname_set.all() + hostdata['mxs'][name.id] = name.mxs.all() + return hostdata + +def fill(template, hostdata, dnsdata=False): + """Fills a generic template + Replaces a lot of repeated code""" + if dnsdata: + template.names = hostdata['names'] + template.cnames = hostdata['cnames'] + template.mxs = hostdata['mxs'] + template.host = hostdata['host'] + template.interfaces = hostdata['interfaces'] + template.ips = hostdata['ips'] + return template + +def edit(request, host_id): + """edit general host information""" + manipulator = Host.ChangeManipulator(host_id) + changename = False + if request.method == 'POST': + host = Host.objects.get(id=host_id) + before = host.__dict__.copy() + if request.POST['hostname'] != host.hostname: + oldhostname = host.hostname.split(".")[0] + changename = True + interfaces = host.interface_set.all() + old_interfaces = [interface.__dict__.copy() for interface in interfaces] + + new_data = request.POST.copy() + + errors = manipulator.get_validation_errors(new_data) + if not errors: + + # somehow keep track of multiple interface change manipulators + # as well as multiple ip chnage manipulators??? (add manipulators???) + # change to many-to-many?????? + + # dynamically look up mx records? + text = '' + + for attrib in attribs: + if host.__dict__[attrib] != request.POST[attrib]: + text = do_log(text, attrib, host.__dict__[attrib], request.POST[attrib]) + host.__dict__[attrib] = request.POST[attrib] + + if 'expiration_date' in request.POST: + ymd = request.POST['expiration_date'].split("-") + if date(int(ymd[0]), int(ymd[1]), int(ymd[2])) != host.__dict__['expiration_date']: + text = do_log(text, 'expiration_date', host.__dict__['expiration_date'], + request.POST['expiration_date']) + host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2])) + + for inter in interfaces: + changetype = False + ips = IP.objects.filter(interface=inter.id) + if inter.mac_addr != request.POST['mac_addr%d' % inter.id]: + text = do_log(text, 'mac_addr', inter.mac_addr, request.POST['mac_addr%d' % inter.id]) + inter.mac_addr = request.POST['mac_addr%d' % inter.id].lower().replace('-',':') + if inter.hdwr_type != request.POST['hdwr_type%d' % inter.id]: + oldtype = inter.hdwr_type + text = do_log(text, 'hdwr_type', oldtype, request.POST['hdwr_type%d' % inter.id]) + inter.hdwr_type = request.POST['hdwr_type%d' % inter.id] + changetype = True + if (('dhcp%d' % inter.id) in request.POST and not inter.dhcp or + not ('dhcp%d' % inter.id) in request.POST and inter.dhcp): + text = do_log(text, 'dhcp', inter.dhcp, int(not inter.dhcp)) + inter.dhcp = not inter.dhcp + for ip in ips: + names = ip.name_set.all() + if not ip.ip_addr == request.POST['ip_addr%d' % ip.id]: + oldip = ip.ip_addr + oldsubnet = oldip.split(".")[2] + ip.ip_addr = request.POST['ip_addr%d' % ip.id] + ip.save() + text = do_log(text, 'ip_addr', oldip, ip.ip_addr) + for name in names: + if name.name.split(".")[0].endswith('-%s' % oldsubnet): + name.name = name.name.replace('-%s' % oldsubnet, '-%s' % ip.ip_addr.split(".")[2]) + name.save() + if changetype: + for name in names: + if name.name.split(".")[0].endswith('-%s' % oldtype): + name.name = name.name.replace('-%s' % oldtype, '-%s' % inter.hdwr_type) + name.save() + if changename: + for name in names: + if name.name.startswith(oldhostname): + name.name = name.name.replace(oldhostname, host.hostname.split(".")[0]) + name.save() + if request.POST['%dip_addr' % inter.id]: + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_ip = IP(interface=inter, ip_addr=request.POST['%dip_addr' % inter.id]) + new_ip.save() + text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr) + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + inter.save() + if request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr=request.POST['mac_addr_new'].lower().replace('-',':'), + hdwr_type=request.POST['hdwr_type_new'], + dhcp=request.POST['dhcp_new']) + text = do_log(text, '*new*', 'mac_addr', new_inter.mac_addr) + new_inter.save() + if request.POST['mac_addr_new'] and request.POST['ip_addr_new']: + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr) + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']: + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_inter = Interface(host=host, mac_addr="", + hdwr_type=request.POST['hdwr_type_new'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr) + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if text: + log = Log(hostname=host.hostname, log=text) + log.save() + host.save() + return HttpResponseRedirect('/hostbase/%s/' % host.id) + else: + return render_to_response('errors.html', + {'failures': errors, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + else: + host = Host.objects.get(id=host_id) + interfaces = [] + for interface in host.interface_set.all(): + interfaces.append([interface, interface.ip_set.all()]) + return render_to_response('edit.html', + {'host': host, + 'interfaces': interfaces, + 'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def confirm(request, item, item_id, host_id=None, name_id=None, zone_id=None): + """Asks if the user is sure he/she wants to remove an item""" + if 'sub' in request.GET: + if item == 'interface': + for ip in Interface.objects.get(id=item_id).ip_set.all(): + for name in ip.name_set.all(): + name.cname_set.all().delete() + ip.name_set.all().delete() + Interface.objects.get(id=item_id).ip_set.all().delete() + Interface.objects.get(id=item_id).delete() + elif item=='ip': + for name in IP.objects.get(id=item_id).name_set.all(): + name.cname_set.all().delete() + IP.objects.get(id=item_id).name_set.all().delete() + IP.objects.get(id=item_id).delete() + elif item=='cname': + CName.objects.get(id=item_id).delete() + elif item=='mx': + mx = MX.objects.get(id=item_id) + Name.objects.get(id=name_id).mxs.remove(mx) + elif item=='name': + Name.objects.get(id=item_id).cname_set.all().delete() + Name.objects.get(id=item_id).delete() + elif item=='nameserver': + nameserver = Nameserver.objects.get(id=item_id) + Zone.objects.get(id=zone_id).nameservers.remove(nameserver) + elif item=='zonemx': + mx = MX.objects.get(id=item_id) + Zone.objects.get(id=zone_id).mxs.remove(mx) + elif item=='address': + address = ZoneAddress.objects.get(id=item_id) + Zone.objects.get(id=zone_id).addresses.remove(address) + if item == 'cname' or item == 'mx' or item == 'name': + return HttpResponseRedirect('/hostbase/%s/dns/edit' % host_id) + elif item == 'nameserver' or item == 'zonemx' or item == 'address': + return HttpResponseRedirect('/hostbase/zones/%s/edit' % zone_id) + else: + return HttpResponseRedirect('/hostbase/%s/edit' % host_id) + else: + interface = None + ips = [] + names = [] + cnames = [] + mxs = [] + zonemx = None + nameserver = None + address = None + if item == 'interface': + interface = Interface.objects.get(id=item_id) + ips = interface.ip_set.all() + for ip in ips: + for name in ip.name_set.all(): + names.append((ip.id, name)) + for cname in name.cname_set.all(): + cnames.append((name.id, cname)) + for mx in name.mxs.all(): + mxs.append((name.id, mx)) + elif item=='ip': + ips = [IP.objects.get(id=item_id)] + for name in ips[0].name_set.all(): + names.append((ips[0].id, name)) + for cname in name.cname_set.all(): + cnames.append((name.id, cname)) + for mx in name.mxs.all(): + mxs.append((name.id, mx)) + elif item=='name': + names = [Name.objects.get(id=item_id)] + for cname in names[0].cname_set.all(): + cnames.append((names[0].id, cname)) + for mx in names[0].mxs.all(): + mxs.append((names[0].id, mx)) + elif item=='cname': + cnames = [CName.objects.get(id=item_id)] + elif item=='mx': + mxs = [MX.objects.get(id=item_id)] + elif item=='zonemx': + zonemx = MX.objects.get(id=item_id) + elif item=='nameserver': + nameserver = Nameserver.objects.get(id=item_id) + elif item=='address': + address = ZoneAddress.objects.get(id=item_id) + return render_to_response('confirm.html', + {'interface': interface, + 'ips': ips, + 'names': names, + 'cnames': cnames, + 'id': item_id, + 'type': item, + 'host_id': host_id, + 'mxs': mxs, + 'zonemx': zonemx, + 'nameserver': nameserver, + 'address': address, + 'zone_id': zone_id, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def dnsedit(request, host_id): + """Edits specific DNS information + Data is validated before committed to the database""" + text = '' + if 'sub' in request.GET: + hostdata = gethostdata(host_id, True) + for ip in hostdata['names']: + ipaddr = IP.objects.get(id=ip) + ipaddrstr = ipaddr.__str__() + for name in hostdata['cnames']: + for cname in hostdata['cnames'][name]: + if regex.host.match(request.POST['cname%d' % cname.id]): + text = do_log(text, 'cname', cname.cname, request.POST['cname%d' % cname.id]) + cname.cname = request.POST['cname%d' % cname.id] + cname.save() + for name in hostdata['mxs']: + for mx in hostdata['mxs'][name]: + if (mx.priority != request.POST['priority%d' % mx.id] and mx.mx != request.POST['mx%d' % mx.id]): + text = do_log(text, 'mx', ' '.join([str(mx.priority), str(mx.mx)]), + ' '.join([request.POST['priority%d' % mx.id], request.POST['mx%d' % mx.id]])) + nameobject = Name.objects.get(id=name) + nameobject.mxs.remove(mx) + newmx, created = MX.objects.get_or_create(priority=request.POST['priority%d' % mx.id], mx=request.POST['mx%d' % mx.id]) + if created: + newmx.save() + nameobject.mxs.add(newmx) + nameobject.save() + for name in hostdata['names'][ip]: + name.name = request.POST['name%d' % name.id] + name.dns_view = request.POST['dns_view%d' % name.id] + if (request.POST['%dcname' % name.id] and + regex.host.match(request.POST['%dcname' % name.id])): + cname = CName(name=name, + cname=request.POST['%dcname' % name.id]) + text = do_log(text, '*new*', 'cname', cname.cname) + cname.save() + if (request.POST['%dpriority' % name.id] and + request.POST['%dmx' % name.id]): + mx, created = MX.objects.get_or_create(priority=request.POST['%dpriority' % name.id], + mx=request.POST['%dmx' % name.id]) + if created: + mx.save() + text = do_log(text, '*new*', 'mx', + ' '.join([request.POST['%dpriority' % name.id], + request.POST['%dmx' % name.id]])) + name.mxs.add(mx) + name.save() + if request.POST['%sname' % ipaddrstr]: + name = Name(ip=ipaddr, + dns_view=request.POST['%sdns_view' % ipaddrstr], + name=request.POST['%sname' % ipaddrstr], only=False) + text = do_log(text, '*new*', 'name', name.name) + name.save() + if (request.POST['%scname' % ipaddrstr] and + regex.host.match(request.POST['%scname' % ipaddrstr])): + cname = CName(name=name, + cname=request.POST['%scname' % ipaddrstr]) + text = do_log(text, '*new*', 'cname', cname.cname) + cname.save() + if (request.POST['%smx' % ipaddrstr] and + request.POST['%spriority' % ipaddrstr]): + mx, created = MX.objects.get_or_create(priority=request.POST['%spriority' % ipaddrstr], + mx=request.POST['%smx' % ipaddrstr]) + if created: + mx.save() + text = do_log(text, '*new*', 'mx', + ' '.join([request.POST['%spriority' % ipaddrstr], request.POST['%smx' % ipaddrstr]])) + name.mxs.add(mx) + if text: + log = Log(hostname=hostdata['host'].hostname, log=text) + log.save() + return HttpResponseRedirect('/hostbase/%s/dns' % host_id) + else: + host = Host.objects.get(id=host_id) + ips = [] + info = [] + cnames = [] + mxs = [] + interfaces = host.interface_set.all() + for interface in host.interface_set.all(): + ips.extend(interface.ip_set.all()) + for ip in ips: + info.append([ip, ip.name_set.all()]) + for name in ip.name_set.all(): + cnames.extend(name.cname_set.all()) + mxs.append((name.id, name.mxs.all())) + return render_to_response('dnsedit.html', + {'host': host, + 'info': info, + 'cnames': cnames, + 'mxs': mxs, + 'request': request, + 'interfaces': interfaces, + 'DNS_CHOICES': Name.DNS_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def new(request): + """Function for creating a new host in hostbase + Data is validated before committed to the database""" + if 'sub' in request.GET: + try: + Host.objects.get(hostname=request.POST['hostname'].lower()) + return render_to_response('errors.html', + {'failures': ['%s already exists in hostbase' % request.POST['hostname']], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + except: + pass + if not validate(request, True): + if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']: + return render_to_response('errors.html', + {'failures': ['ip_addr: You must enter an ip address'], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + host = Host() + # this is the stuff that validate() should take care of + # examine the check boxes for any changes + host.outbound_smtp = 'outbound_smtp' in request.POST + for attrib in attribs: + if attrib in request.POST: + host.__dict__[attrib] = request.POST[attrib].lower() + if 'comments' in request.POST: + host.comments = request.POST['comments'] + if 'expiration_date' in request.POST: +# ymd = request.POST['expiration_date'].split("-") +# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2])) + host.__dict__['expiration_date'] = date(2000, 1, 1) + host.status = 'active' + host.save() + else: + return render_to_response('errors.html', + {'failures': validate(request, True), + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + + if request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new'], + dhcp = 'dhcp_new' in request.POST) + new_inter.save() + if request.POST['mac_addr_new'] and request.POST['ip_addr_new']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) +# Change all this things. Use a "post_save" signal handler for model Host to create all sociate models +# and use a generi view. + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new2'], + dhcp = 'dhcp_new2' in request.POST) + new_inter.save() + if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new2'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + host.save() + return HttpResponseRedirect('/hostbase/%s/' % host.id) + else: + return render_to_response('new.html', + {'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES, + 'CLASS_CHOICES': Host.CLASS_CHOICES, + 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES, + 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def copy(request, host_id): + """Function for creating a new host in hostbase + Data is validated before committed to the database""" + if 'sub' in request.GET: + try: + Host.objects.get(hostname=request.POST['hostname'].lower()) + return render_to_response('errors.html', + {'failures': ['%s already exists in hostbase' % request.POST['hostname']], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + except: + pass + if not validate(request, True): + if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']: + return render_to_response('errors.html', + {'failures': ['ip_addr: You must enter an ip address'], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + host = Host() + # this is the stuff that validate() should take care of + # examine the check boxes for any changes + host.outbound_smtp = 'outbound_smtp' in request.POST + for attrib in attribs: + if attrib in request.POST: + host.__dict__[attrib] = request.POST[attrib].lower() + if 'comments' in request.POST: + host.comments = request.POST['comments'] + if 'expiration_date' in request.POST: +# ymd = request.POST['expiration_date'].split("-") +# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2])) + host.__dict__['expiration_date'] = date(2000, 1, 1) + host.status = 'active' + host.save() + else: + return render_to_response('errors.html', + {'failures': validate(request, True), + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + + if request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new'], + dhcp = 'dhcp_new' in request.POST) + new_inter.save() + if request.POST['mac_addr_new'] and request.POST['ip_addr_new']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new2'], + dhcp = 'dhcp_new2' in request.POST) + new_inter.save() + if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new2'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + host.save() + return HttpResponseRedirect('/hostbase/%s/' % host.id) + else: + host = Host.objects.get(id=host_id) + return render_to_response('copy.html', + {'host': host, + 'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES, + 'CLASS_CHOICES': Host.CLASS_CHOICES, + 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES, + 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +# FIXME: delete all this things in a signal handler "pre_delete" +#def remove(request, host_id): +# host = Host.objects.get(id=host_id) +# if 'sub' in request: +# for interface in host.interface_set.all(): +# for ip in interface.ip_set.all(): +# for name in ip.name_set.all(): +# name.cname_set.all().delete() +# ip.name_set.all().delete() +# interface.ip_set.all().delete() +# interface.delete() +# host.delete() + +def validate(request, new=False, host_id=None): + """Function for checking form data""" + failures = [] + if (request.POST['expiration_date'] + and regex.date.match(request.POST['expiration_date'])): + try: + (year, month, day) = request.POST['expiration_date'].split("-") + date(int(year), int(month), int(day)) + except (ValueError): + failures.append('expiration_date') + elif request.POST['expiration_date']: + failures.append('expiration_date') + + if not (request.POST['hostname'] + and regex.host.match(request.POST['hostname'])): + failures.append('hostname') + +## if not regex.printq.match(request.POST['printq']) and request.POST['printq']: +## failures.append('printq') + +## if not regex.user.match(request.POST['primary_user']): +## failures.append('primary_user') + +## if (not regex.user.match(request.POST['administrator']) +## and request.POST['administrator']): +## failures.append('administrator') + +## if not (request.POST['location'] +## and regex.location.match(request.POST['location'])): +## failures.append('location') + + if new: + if (not regex.macaddr.match(request.POST['mac_addr_new']) + and request.POST['mac_addr_new']): + failures.append('mac_addr (#1)') + if ((request.POST['mac_addr_new'] or request.POST['ip_addr_new']) and + not 'hdwr_type_new' in request.REQUEST): + failures.append('hdwr_type (#1)') + if ((request.POST['mac_addr_new2'] or request.POST['ip_addr_new2']) and + not 'hdwr_type_new2' in request.REQUEST): + failures.append('hdwr_type (#2)') + + if (not regex.macaddr.match(request.POST['mac_addr_new2']) + and request.POST['mac_addr_new2']): + failures.append('mac_addr (#2)') + + if (not regex.ipaddr.match(request.POST['ip_addr_new']) + and request.POST['ip_addr_new']): + failures.append('ip_addr (#1)') + if (not regex. ipaddr.match(request.POST['ip_addr_new2']) + and request.POST['ip_addr_new2']): + failures.append('ip_addr (#2)') + + [failures.append('ip_addr (#1)') for number in + request.POST['ip_addr_new'].split(".") + if number.isdigit() and int(number) > 255 + and 'ip_addr (#1)' not in failures] + [failures.append('ip_addr (#2)') for number in + request.POST['ip_addr_new2'].split(".") + if number.isdigit() and int(number) > 255 + and 'ip_addr (#2)' not in failures] + + elif host_id: + interfaces = Interface.objects.filter(host=host_id) + for interface in interfaces: + if (not regex.macaddr.match(request.POST['mac_addr%d' % interface.id]) + and request.POST['mac_addr%d' % interface.id]): + failures.append('mac_addr (%s)' % request.POST['mac_addr%d' % interface.id]) + for ip in interface.ip_set.all(): + if not regex.ipaddr.match(request.POST['ip_addr%d' % ip.id]): + failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id]) + [failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id]) + for number in request.POST['ip_addr%d' % ip.id].split(".") + if (number.isdigit() and int(number) > 255 and + 'ip_addr (%s)' % request.POST['ip_addr%d' % ip.id] not in failures)] + if (request.POST['%dip_addr' % interface.id] + and not regex.ipaddr.match(request.POST['%dip_addr' % interface.id])): + failures.append('ip_addr (%s)' % request.POST['%dip_addr' % interface.id]) + if (request.POST['mac_addr_new'] + and not regex.macaddr.match(request.POST['mac_addr_new'])): + failures.append('mac_addr (%s)' % request.POST['mac_addr_new']) + if (request.POST['ip_addr_new'] + and not regex.ipaddr.match(request.POST['ip_addr_new'])): + failures.append('ip_addr (%s)' % request.POST['ip_addr_new']) + + if not failures: + return 0 + return failures + +def do_log(text, attribute, previous, new): + if previous != new: + text += "%-20s%-20s -> %s\n" % (attribute, previous, new) + return text + +## login required stuff +## uncomment the views below that you would like to restrict access to + +## uncomment the lines below this point to restrict access to pages that modify the database +## anonymous users can still view data in Hostbase + +edit = login_required(edit) +confirm = login_required(confirm) +dnsedit = login_required(dnsedit) +new = login_required(new) +copy = login_required(copy) +#remove = login_required(remove) +#zoneedit = login_required(zoneedit) +#zonenew = login_required(zonenew) + +## uncomment the lines below this point to restrict access to all of hostbase + +## search = login_required(search) +## look = login_required(look) +## dns = login_required(dns) +## zones = login_required(zones) +## zoneview = login_required(zoneview) + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html new file mode 100644 index 000000000..1d7c5565b --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html @@ -0,0 +1,34 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> +<head> + <title>{% block title %}BCFG2 - Hostbase{% endblock %}</title> + <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}/boxypastel.css" /> + <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}/base.css" /> + <!--<script type="text/javascript" src="http://hostbase.mcs.anl.gov/site_media/main.js"> --> + {% block extra_header_info %}{% endblock %} +</head> + +<body> + <div id="header"> + <div id="branding"> + <h1>BCFG2</h1> + </div> + <div id="user-tools">...Change is Coming...</div> + </div> + <div id="sidebar"> + {% block sidebar %} + <ul class="sidebar"> + </ul> + {% endblock %} + </div> + + <div id="content-main"> + <div id="container"> + {% block pagebanner %}{% endblock %} + {% block content %}{% endblock %} + + </div> + </div> +</body> +</html> diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html new file mode 100644 index 000000000..ca8b0cc07 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html @@ -0,0 +1,117 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Confirm Removal</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<form name="input" action="confirm.html?sub=true" method="post"> +Are you sure you want to remove these items? + +{% if interface %} +<ul> +<li> interface: {{ interface.mac_addr }} </li> +{% endif %} + + +{% if ips %} +<ul> +{% for ip in ips %} +<li> ip: {{ ip.ip_addr }} </li> +<ul> +{% for name in names %} +{% ifequal name.0 ip.id %} +<li> name: {{ name.1.name }} </li> +<ul> +{% endifequal %} +{% for cname in cnames %} +{% ifequal cname.0 name.1.id %} +<li> cname: {{ cname.1.name }} </li> +{% endifequal %} +{% endfor %} +</ul> +<ul> +{% for mx in mxs %} +{% ifequal mx.0 name.1.id %} +<li> mx: {{ mx.1.priority }} {{ mx.1.mx }} </li> +{% endifequal %} +{% endfor %} +</ul> +{% endfor %} +</ul> +{% endfor %} +</ul> +{% endif %} + +{% if names and not ips %} +<ul> +{% for name in names %} +<li> name: {{ name.name }} </li> +<ul> +{% for cname in cnames %} +{% ifequal cname.0 name.id %} +<li> cname: {{ cname.1.cname }} </li> +{% endifequal %} +{% endfor %} +</ul> +<ul> +{% for mx in mxs %} +{% ifequal mx.0 name.id %} +<li> mx: {{ mx.1.priority }} {{ mx.1.mx }} </li> +{% endifequal %} +{% endfor %} +</ul> +{% endfor %} +</ul> +{% endif %} + +{% if cnames and not names %} +<ul> +{% for cname in cnames %} +<li> cname: {{ cname.cname }} </li> +{% endfor %} +</ul> +{% endif %} + +{% if mxs and not names %} +<ul> +{% for mx in mxs %} +<li> mx: {{ mx.priority }} {{ mx.mx }} </li> +{% endfor %} +</ul> +{% endif %} + +{% if interface %} +</ul> +{% endif %} + +{% if zone_id %} +<ul> +{% ifequal type 'zonemx' %} +<li> mx: {{ zonemx.priority }} {{ zonemx.mx }} </li> +{% endifequal %} + +{% ifequal type 'nameserver' %} +<li> nameserver: {{ nameserver.name }} </li> +{% endifequal %} + +{% ifequal type 'address' %} +<li> address: {{ address.ip_addr }} </li> +{% endifequal %} +</ul> +{% endif %} + +<input type="submit" value="confirm"> +<input type="reset" value="cancel" onclick="history.back()"> +</form> + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html new file mode 100644 index 000000000..400ef58f2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html @@ -0,0 +1,122 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>new host information</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +<a href="/hostbase/" class="sidebar">search hostbase</a> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<form name="hostdata" action="?sub=true" method="post"> +<input type="hidden" name="host"> +<table border="0" width="100%"> + <colgroup> + <col width="150"> + <col width="*"> + <tr> <td> <b>hostname</b></td> + <td> <input name="hostname" type="text" value="{{ host.hostname }}" ></td></tr> + <tr> <td> <b>whatami</b></td> + <td> + <select name="whatami"> + {% for choice in WHATAMI_CHOICES %} + {% ifequal host.whatami choice.0 %} + <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select> + </td></tr> + <tr> <td> <b>netgroup</b></td> + <td> + <select name="netgroup"> + {% for choice in NETGROUP_CHOICES %} + {% ifequal host.netgroup choice.0 %} + <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select> + </td></tr> + <tr> <td> <b>class</b></td> + <td> + <select name="security_class"> + {% for choice in CLASS_CHOICES %} + {% ifequal host.security_class choice.0 %} + <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select></td></tr> + <tr> <td> <b>support</b></td> + <td> + <select name="support"> + {% for choice in SUPPORT_CHOICES %} + {% ifequal host.support choice.0 %} + <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select></td></tr> + <tr> <td> <b>csi</b></td> + <td> <input name="csi" type="text" value="{{ host.csi }}" ></td></tr> + <tr> <td> <b>printq</b></td> + <td> <input name="printq" type="text" value="{{ host.printq }}" ></td></tr> + <tr> <td> <b>outbound_smtp</b></td> + <td> + {% if host.outbound_smtp %} + <input type="checkbox" name="outbound_smtp" checked="checked" ></td></tr> + {% else %} + <input type="checkbox" name="outbound_smtp" ></td></tr> + {% endif %} + <tr> <td> <b>primary_user</b></td> + <td> <input name="primary_user" type="text" size="32" value="{{ host.primary_user }}"> (email address)</td></tr> + <tr> <td> <b>administrator</b></td> + <td> <input name="administrator" type="text" size="32" value="{{ host.administrator }}"> (email address)</td></tr> + <tr> <td> <b>location</b></td> + <td> <input name="location" type="text" value="{{ host.location }}"></td></tr> + <tr> <td> <b>expiration_date</b></td> + <td> <input name="expiration_date" type="text" size="10" value="{{ host.expiration_date }}">YYYY-MM-DD</td></tr> + <tr> <td><br><b>Interface</b></td><td><br> + {% for choice in TYPE_CHOICES %} + <input type="radio" name="hdwr_type_new" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %} + </td></tr> + <tr> <td> <b>dhcp</b></td> + <td> + <input type="checkbox" name="dhcp_new"></td></tr> + <tr> <td> <b>mac_addr</b></td> + <td> <input name="mac_addr_new" type="text"></td></tr> + <tr> <td> <b>ip_addr</b></td> + <td> <input name="ip_addr_new" type="text"></td></tr> + <tr> <td><br><b>Interface</b></td><td><br> + {% for choice in TYPE_CHOICES %} + <input type="radio" name="hdwr_type_new2" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %} + </td></tr> + <tr> <td> <b>dhcp</b></td> + <td> + <input type="checkbox" name="dhcp_new2"></td></tr> + <tr> <td> <b>mac_addr</b></td> + <td> <input name="mac_addr_new2" type="text"></td></tr> + <tr> <td> <b>ip_addr</b></td> + <td> <input name="ip_addr_new2" type="text"></td></tr> + <tr> <td> <b>comments</b></td> + <td> <textarea rows="10" cols="50" name="comments"></textarea></td></tr> +</table> +<br> +<p><input type="submit" value="Submit"> +</form> + +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html new file mode 100644 index 000000000..da179e5a1 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html @@ -0,0 +1,40 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>dns info for {{ host.hostname }}</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul class="sidebar"> + <li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li> + <li><a href="/hostbase/{{ host.id }}/edit/" class="sidebar">edit host info</a></li> + <li><a href="edit/" class="sidebar">edit dns info</a></li> + <li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +{% for interface in host.interface_set.all %} + {% for ip in interface.ip_set.all %} + <ul><li> <b>ip_addr:</b> {{ ip.ip_addr }}</li> + {% for name in ip.name_set.all %} + <ul> <li><b>name:</b> {{ name.name }}</li> <ul> + {% for cname in name.cname_set.all %} + <li> <b>cname:</b> {{ cname.cname }}</li> + {% endfor %} + {% for mx in name.mxs.all %} + <li> <b>mx:</b> {{ mx.priority }} {{ mx.mx }}</li> + {% endfor %} + </ul></ul> + {% endfor %} + </ul> + {% endfor %} +{% endfor %} +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html new file mode 100644 index 000000000..b1b71ab67 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html @@ -0,0 +1,98 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>dns info for {{ host.hostname }}</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul class="sidebar"> + <li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li> + <li><a href="/hostbase/{{ host.id }}/edit/" class="sidebar">edit host info</a></li> + <li><a href="/hostbase/{{ host.id }}/dns/" class="sidebar">see dns info</a></li> + <li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<form name="dns" action="?sub=true" method="post"> +<input type="hidden" name="host" value="{{ host.id }}"> +<table border="0" width="100%"> + <colgroup> + <col width="150"> + <col width="*"> + {% for interface in interfaces %} + <tr><td><br></td></tr> + <tr> <td> <b>interface type</b> </td> + <td> {{ interface.hdwr_type }} </td></tr> + <tr> <td> <b>mac_addr</b> </td> + <td> {{ interface.mac_addr }} </td></tr> + <tr><td><hr></td><td><hr></td></tr> + {% for ip in info %} + {% ifequal ip.0.interface interface %} + <tr> <td> <b>ip_addr</b></td> + <td>{{ ip.0.ip_addr }}</td></tr> + {% for name in ip.1 %} + <tr> <td><b>name(dns)</b></td> + <td> <input name="name{{ name.id }}" type="text" value="{{ name.name }}"> + <select name="dns_view{{ name.id }}"> + {% for choice in DNS_CHOICES %} + {% ifequal name.dns_view choice.0 %} + <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select> + <a style="font-size:75%" href="/hostbase/{{ host.id }}/name/{{ name.id }}/confirm">remove</a></td></tr> + {% for cname in cnames %} + {% ifequal name cname.name %} + <tr> <td> <b>cname</b></td> + <td> <input name="cname{{ cname.id }}" type="text" value="{{ cname.cname }}"> + <a style="font-size:75%" href="/hostbase/{{ host.id }}/cname/{{ cname.id }}/confirm">remove</a></td></tr> + {% endifequal %} + {% endfor %} + <tr> <td> <b>cname</b></td> + <td> <input name="{{ name.id }}cname" type="text"></td></tr> + {% for mx in mxs %} + {% ifequal mx.0 name.id %} + {% for record in mx.1 %} + <tr> <td> <b>mx</b></td> + <td> <input name="priority{{ record.id }}" type="text" size="6" value="{{ record.priority }}"> + <input name="mx{{ record.id }}" type="text" value="{{ record.mx }}"> + <a style="font-size:75%" href="/hostbase/{{ host.id }}/mx/{{ record.id }}/{{ name.id }}/confirm">remove</a></td></tr> + {% endfor %} + {% endifequal %} + {% endfor %} + <tr> <td> <b>mx</b></td> + <td> <input name="{{ name.id }}priority" type="text" size="6"> + <input name="{{ name.id }}mx" type="text"></td></tr> + {% endfor %} + <tr> <td> <b>name</b></td> + <td> <input name="{{ ip.0.ip_addr }}name" type="text"> + <select name="{{ ip.0.ip_addr }}dns_view"> + {% for choice in DNS_CHOICES %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endfor %} + </select></td></tr> + <tr> <td> <b>cname</b></td> + <td> <input name="{{ ip.0.ip_addr }}cname" type="text"></td></tr> + <tr> <td> <b>mx</b></td> + <td> <input name="{{ ip.0.ip_addr }}priority" type="text" size="6"> + <input name="{{ ip.0.ip_addr }}mx" type="text"></td></tr> + <tr><td></td></tr> + <tr><td><hr></td><td><hr></td></tr> + {% endifequal %} + {% endfor %} + {% endfor %} + </table> + +<p><input type="submit" value="Submit"> +</form> + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html new file mode 100644 index 000000000..961c9d143 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html @@ -0,0 +1,191 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>{{ host.hostname }}</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul class="sidebar"> +<li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li> +<li><a href="/hostbase/{{ host.id }}/dns/" class="sidebar">detailed dns info</a></li> +<li><a href="/hostbase/{{ host.id }}/dns/edit/" class="sidebar">edit dns info</a></li> +<li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<script language="JavaScript" type="text/Javascript"> +function toggleAddr(interface_id){ + if(document.getElementById){ + var style = document.getElementById('ipaddr'+interface_id).style; + style.display = style.display? "":"block"; + } +} +function toggleInter(){ + if(document.getElementById){ + var style = document.getElementById('interface').style; + style.display = style.display? "":"block"; + } +} +</script> + +<style type=text/css> +{% for interface in interfaces %} +div#ipaddr{{ interface.0.id }}{ + display: none; +} +{% endfor %} +div#interface{ + display: none; +} +</style> + +<form name="hostdata" action="" method="post"> +<fieldset class="module aligned ()"> +<input type="hidden" name="host" value="{{ host.id }}"> + <label for="id_hostname">hostname:</label> + <input name="hostname" value="{{ host.hostname }}"><br> + <label for="id_whatami">whatami:</label> + <select name="whatami"> + {% for choice in host.WHATAMI_CHOICES %} + {% ifequal host.whatami choice.0 %} + <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select><br> + <label for="id_netgroup">netgroup:</label> + <select name="netgroup"> + {% for choice in host.NETGROUP_CHOICES %} + {% ifequal host.netgroup choice.0 %} + <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select><br> + <label for="id_security_class">class:</label> + <select name="security_class"> + {% for choice in host.CLASS_CHOICES %} + {% ifequal host.security_class choice.0 %} + <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select><br> + <label for="id_support">support:</label> + <select name="support"> + {% for choice in host.SUPPORT_CHOICES %} + {% ifequal host.support choice.0 %} + <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select><br> + <label for="id_csi">csi:</label> + <input name="csi" type="text" value="{{ host.csi }}"><br> + <label for="id_printq">printq:</label> + <input name="printq" type="text" value="{{ host.printq }}"><br> + <label for="id_outbound_smtp">outbound_smtp:</label> + {% if host.outbound_smtp %} + <input type="checkbox" checked="checked" name="outbound_smtp"> + {% else %} + <input type="checkbox" name="outbound_smtp"> + {% endif %}<br> + <label for="id_primary_user">primary_user:</label> + <input name="primary_user" type="text" size="32" value="{{ host.primary_user }}"><br> + <label for="id_administrator">administrator:</label> + <input name="administrator" type="text" size="32" value="{{ host.administrator }}"><br> + <label for="id_location">location:</label> + <input name="location" type="text" value="{{ host.location }}"><br> + <label for="id_expiration_date">expiration_date:</label> + <input name="expiration_date" type="text" value="{{ host.expiration_date }}"> YYYY-MM-DD<br> + {% for interface in interfaces %} + <label for="id_interface">Interface:</label> + <select name="hdwr_type{{ interface.0.id }}"> + {% for choice in interface.0.TYPE_CHOICES %} + {% ifequal interface.0.hdwr_type choice.0 %} + <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }} + {% else %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endifequal %} + {% endfor %} + </select><br> + <label for="id_dhcp">dhcp:</label> + {% if interface.0.dhcp %} + <input type="checkbox" checked="checked" name="dhcp{{ interface.0.id }}"> + {% else %} + <input type="checkbox" name="dhcp{{ interface.0.id }}"> + {% endif %}<br> + <label for="id_mac_addr">mac_addr:</label> + <input name="mac_addr{{ interface.0.id }}" type="text" value="{{ interface.0.mac_addr }}"> + <a style="font-size:75%" href="/hostbase/{{ host.id }}/interface/{{ interface.0.id }}/confirm">remove</a><br> + {% for ip in interface.1 %} + <label for="id_ip_addr">ip_addr:</label> + <input name="ip_addr{{ ip.id }}" type="text" value="{{ ip.ip_addr }}"> + <a style="font-size:75%" href="/hostbase/{{ host.id }}/ip/{{ ip.id }}/confirm">remove</a><br> + {% endfor %} + +<!-- Section for adding a new IP address to an existing interface --> +<!-- By default, section is hidden --> + <div id=ipaddr{{ interface.0.id }}> + <label for="id_ip_addr">ip_addr:</label> + <input name="{{ interface.0.id }}ip_addr" type="text"><br> + </div> + <a style="font-size:75%" href=# onclick="toggleAddr({{ interface.0.id }})">Add a New IP Address</a><br> + {% endfor %} +<!-- End section for new IP address --> + +<!-- Section for add an entirely new interface to a host --> +<!-- By default, section is hidden --> + <div id=interface> + <label for="id_interface">Interface:</label> + <select name="hdwr_type_new"> + {% for choice in TYPE_CHOICES %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endfor %} + </select><br> + <label for="id_dhcp">dhcp:</label> + {% if host.dhcp %} + <input type="checkbox" checked="checked" name="dhcp_new"> + {% else %} + <input type="checkbox" name="dhcp_new"> + {% endif %}<br> + <label for="id_mac_addr">mac_addr:</label> + <td> <input name="mac_addr_new" type="text"><br> + <label for="id_ip_addr">ip_addr:</label> + <td> <input name="ip_addr_new" type="text"><br> +</div> +<a style="font-size:75%" href=# onclick="toggleInter()">Add a New Interface</a><br> +<!-- End new interface section --> + + +<label for="id_comments">comments:</label> +<textarea rows="10" cols="50" name="comments">{{ host.comments }}</textarea><br> +<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/edit">edit detailed DNS information for this host</a> +<br> +this host is +<select name="status"> +{% for choice in host.STATUS_CHOICES %} +{% ifequal host.status choice.0 %} +<option value="{{ choice.0 }}" selected="selected">{{ choice.1 }} +{% else %} +<option value="{{ choice.0 }}">{{ choice.1 }} +{% endifequal %} +{% endfor %} +</select><br> +last update on {{ host.last }}<br> +<input type="submit" value="submit"> +<input type="reset" value="cancel" onclick="history.back()"> +</form> + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html new file mode 100644 index 000000000..e5429b86c --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html @@ -0,0 +1,31 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Search Results</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +{% if failures %} +There were errors in the following fields<br><br> +{% for failure in failures %} + +<font color="#FF0000">{{ failure }}</font><br> +{% comment %} +{{ failure.1|join:", " }} +{% endcomment %} + +{% endfor %} +{% endif %} +<br> +Press the back button on your browser and edit those field(s) + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html new file mode 100644 index 000000000..d6b8873bc --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html @@ -0,0 +1,80 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>{{ host.hostname }}</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul class="sidebar"> + <li><a href="dns/" class="sidebar">detailed dns info</a></li> + <li><a href="edit/" class="sidebar">edit host info</a></li> + <li><a href="dns/edit/" class="sidebar">edit dns info</a></li> + <li><a href="logs/" class="sidebar">change logs</a></li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<table border="0" width="100%"> + <colgroup> + <col width="150"> + <col width="*"> + <tr> <td> <b>hostname</b></td> + <td> {{ host.hostname }}</td></tr> + <tr> <td> <b>whatami</b></td> + <td> {{ host.whatami }}</td></tr> + <tr> <td> <b>netgroup</b></td> + <td> {{ host.netgroup }}</td></tr> + <tr> <td> <b>class</b></td> + <td> {{ host.security_class }}</td></tr> + <tr> <td> <b>support</b></td> + <td> {{ host.support }}</td></tr> + <tr> <td> <b>csi</b></td> + <td> {{ host.csi }}</td></tr> + <tr> <td> <b>printq</b></td> + <td> {{ host.printq }}</td></tr> + <tr> <td> <b>outbound_smtp</b></td> + {% if host.outbound_smtp %} + <td> y </td></tr> + {% else %} + <td> n </td></tr> + {% endif %} + <tr> <td> <b>primary_user</b></td> + <td> {{ host.primary_user }}</td></tr> + <tr> <td> <b>administrator</b></td> + <td> {{ host.administrator }}</td></tr> + <tr> <td> <b>location</b></td> + <td> {{ host.location }}</td></tr> + <tr> <td> <b>expiration_date</b></td> + <td> {{ host.expiration_date }}</td></tr> + {% for interface in host.inserface_set.all %} + <tr> <td><br><b>Interface</b></td> + {% ifnotequal interface.0.hdwr_type 'no' %} + <td><br>{{ interface.0.hdwr_type }}</td></tr> + {% endifnotequal %} + {% if interface.0.dhcp %} + <tr> <td> <b>mac_addr</b></td> + <td> {{ interface.0.mac_addr }}</b></td></tr> + {% endif %} + {% for ip in interface.1 %} + <tr> <td> <b>ip_addr</b></td> + <td> {{ ip.ip_addr }}</td></tr> + {% endfor %} + {% endfor %} + <tr> <td valign="top"> <b>comments</b></td> + <td> + {{ host.comments|linebreaksbr }}<br> + </td></tr> + +</table> +<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/">see detailed DNS information for this host</a> +<br><br> +this host is {{ host.status }}<br> +last update on {{ host.last }}<br> + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html new file mode 100644 index 000000000..b5d794b50 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html @@ -0,0 +1,89 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Are you sure you want to remove {{ object.hostname }}?</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul class="sidebar"> + <li><a href="dns/" class="sidebar">detailed dns info</a></li> + <li><a href="edit/" class="sidebar">edit host info</a></li> + <li><a href="dns/edit/" class="sidebar">edit dns info</a></li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<table border="0" width="100%"> + <colgroup> + <col width="150"> + <col width="*"> + <tr> <td> <b>hostname</b></td> + <td> {{ object.hostname }}</td></tr> + <tr> <td> <b>whatami</b></td> + <td> {{ object.whatami }}</td></tr> + <tr> <td> <b>netgroup</b></td> + <td> {{ object.netgroup }}</td></tr> + <tr> <td> <b>class</b></td> + <td> {{ object.security_class }}</td></tr> + <tr> <td> <b>support</b></td> + <td> {{ object.support }}</td></tr> + <tr> <td> <b>csi</b></td> + <td> {{ object.csi }}</td></tr> + <tr> <td> <b>printq</b></td> + <td> {{ object.printq }}</td></tr> + <tr> <td> <b>dhcp</b></td> + {% if host.dhcp %} + <td> y </td></tr> + {% else %} + <td> n </td></tr> + {% endif %} + <tr> <td> <b>outbound_smtp</b></td> + {% if host.outbound_smtp %} + <td> y </td></tr> + {% else %} + <td> n </td></tr> + {% endif %} + <tr> <td> <b>primary_user</b></td> + <td> {{ object.primary_user }}</td></tr> + <tr> <td> <b>administrator</b></td> + <td> {{ object.administrator }}</td></tr> + <tr> <td> <b>location</b></td> + <td> {{ object.location }}</td></tr> + <tr> <td> <b>expiration_date</b></td> + <td> {{ object.expiration_date }}</td></tr> + {% for interface in interfaces %} + <tr> <td><br><b>Interface</b></td> + {% ifnotequal interface.0.hdwr_type 'no' %} + <td><br>{{ interface.0.hdwr_type }}</td></tr> + {% endifnotequal %} + <tr> <td> <b>mac_addr</b></td> + <td> {{ interface.0.mac_addr }}</b></td></tr> + {% for ip in interface.1 %} + <tr> <td> <b>ip_addr</b></td> + <td> {{ ip.ip_addr }}</td></tr> + {% endfor %} + {% endfor %} + <tr> <td valign="top"> <b>comments</b></td> + <td> + {{ object.comments|linebreaksbr }}<br> + </td></tr> + +</table> +<a style="font-size:75%" href="/hostbase/{{ object.id }}/dns/">see detailed DNS information for this host</a> +<br><br> +this host is {{ object.status }}<br> +last update on {{ object.last }}<br> + +<form name="input" action="remove.html?sub=true" method="post"> +<input type="submit" value="remove"> +<input type="reset" value="cancel" onclick="history.back()"> +</form> + +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html new file mode 100644 index 000000000..aa9679cbd --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html @@ -0,0 +1,23 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Change Logs for {{ object.hostname }}</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<ul> +<li><b>Hostname:</b>{{ object.hostname }}</li> +<li><b>Date:</b>{{ object.date }}</li> +<li><b>Log:</b>{{ object.log }}</li> +</ul> + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html new file mode 100644 index 000000000..92258b648 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html @@ -0,0 +1,16 @@ +{% extends "base.html" %} +{% block pagebanner %} + <div class="header"> + <h2>Welcome to Hostbase!</h2> + <p>Hostbase is a web based management tools for Bcfg2 Hosts</p> + </div> + <br/> +{% endblock %} +{% block sidebar %} +<a href="/login/" class="sidebar">login to hostbase</a><br> +<a href="/hostbase/" class="sidebar">search for hosts</a><br> +<a href="hostbase/zones/" class="sidebar">zone file information</a> +{% endblock %} +{% block content %} +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html new file mode 100644 index 000000000..ec24a0fc0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html @@ -0,0 +1,37 @@ +{% extends "base.html" %} +{% block pagebanner %} + <div class="header"> + <h2>Login to Hostbase!</h2> + <p>You must login to manage hosts</p> + </div> + <br/> +{% endblock %} +{% block sidebar %} +<a href="/hostbase/" class="sidebar">search for hosts</a><br> +<a href="/hostbase/new" class="sidebar">add a new host</a><br> +<a href="hostbase/zones/" class="sidebar">zone file information</a> +{% endblock %} +{% block content %} + {% if form.has_errors %} + {{ form.username.errors|join:", " }} + <p>Login Failed.</p> + {% endif %} + {% if user.is_authenticated %} + <p>Welcome, {{ user.username }}. Thanks for logging in.</p> + {% else %} + <p>Welcome, user. Please log in.</p> + <form name="input" action="." method="post"> + <input name="username" type="text"> + <br /> + <input name="password" type="password"> + <br /> + <input type="submit" value="Login"> + {% if next %} + <input type="hidden" name="next" value="{{ next }}" /> + {% else %} + <input type="hidden" name="next" value="/hostbase/" /> + {% endif %} + + </form> + {% endif %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html new file mode 100644 index 000000000..994f631a8 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html @@ -0,0 +1,13 @@ +{% extends "base.html" %} +{% block pagebanner %} + <div class="header"> + <h2>You are logged out of Hostbase!</h2> + </div> + <br/> +{% endblock %} +{% block sidebar %} +<a href="/login/" class="sidebar">Login to Hostbase</a> +{% endblock %} +{% block content %} +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl new file mode 100644 index 000000000..e71e90e76 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl @@ -0,0 +1,6 @@ +<p> +{% if logged_in %} +<a href="/logout/" class="sidebar">logout</a> +{% else %} +<a href="/login/" class="sidebar">login</a> +{% endif %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html new file mode 100644 index 000000000..806ccd63d --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html @@ -0,0 +1,27 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Change Logs for {{ hostname }}</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +{% if host.get_logs %} +<ul> +{% for log in host.get_logs %} +<li><a href="{{ log.id }}/">{{ log.date }}</li> +{% endfor %} +</ul> +{% else %} +There are no logs for this host<br> +{% endif %} + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl new file mode 100644 index 000000000..877d427d0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl @@ -0,0 +1,5 @@ +<a href="/hostbase/" class="sidebar">host search</a><br> +<a href="/hostbase/new" class="sidebar">add a new host</a><br> +<a href="/hostbase/zones" class="sidebar">zone file information</a><br> +<a href="/hostbase/zones/new" class="sidebar">add a zone</a><br> + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html new file mode 100644 index 000000000..2dcd6271f --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html @@ -0,0 +1,102 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>new host information</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +<a href="/hostbase/" class="sidebar">search hostbase</a> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<form name="hostdata" action="?sub=true" method="post"> +<input type="hidden" name="host"> +<table border="0" width="100%"> + <colgroup> + <col width="150"> + <col width="*"> + <tr> <td> <b>hostname</b></td> + <td> <input name="hostname" type="text" ></td></tr> + <tr> <td> <b>whatami</b></td> + <td> + <select name="whatami"> + {% for choice in WHATAMI_CHOICES %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endfor %} + </select> + </td></tr> + <tr> <td> <b>netgroup</b></td> + <td> + <select name="netgroup"> + {% for choice in NETGROUP_CHOICES %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endfor %} + </select> + </td></tr> + <tr> <td> <b>class</b></td> + <td> + <select name="security_class"> + {% for choice in CLASS_CHOICES %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endfor %} + </select></td></tr> + <tr> <td> <b>support</b></td> + <td> + <select name="support"> + {% for choice in SUPPORT_CHOICES %} + <option value="{{ choice.0 }}">{{ choice.1 }} + {% endfor %} + </select></td></tr> + <tr> <td> <b>csi</b></td> + <td> <input name="csi" type="text" ></td></tr> + <tr> <td> <b>printq</b></td> + <td> <input name="printq" type="text" ></td></tr> + <tr> <td> <b>outbound_smtp</b></td> + <td> + <input type="checkbox" name="outbound_smtp"></td></tr> + <tr> <td> <b>primary_user</b></td> + <td> <input name="primary_user" type="text" size="32" > (email address)</td></tr> + <tr> <td> <b>administrator</b></td> + <td> <input name="administrator" type="text" size="32" > (email address)</td></tr> + <tr> <td> <b>location</b></td> + <td> <input name="location" type="text" ></td></tr> + <tr> <td> <b>expiration_date</b></td> + <td> <input name="expiration_date" type="text" size="10" >YYYY-MM-DD</td></tr> + <tr> <td><br><b>Interface</b></td><td><br> + {% for choice in TYPE_CHOICES %} + <input type="radio" name="hdwr_type_new" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %} + </td></tr> + <tr> <td> <b>dhcp</b></td> + <td> + <input type="checkbox" name="dhcp_new"></td></tr> + <tr> <td> <b>mac_addr</b></td> + <td> <input name="mac_addr_new" type="text"></td></tr> + <tr> <td> <b>ip_addr</b></td> + <td> <input name="ip_addr_new" type="text"></td></tr> + <tr> <td><br><b>Interface</b></td><td><br> + {% for choice in TYPE_CHOICES %} + <input type="radio" name="hdwr_type_new2" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %} + </td></tr> + <tr> <td> <b>dhcp</b></td> + <td> + <input type="checkbox" name="dhcp_new2"></td></tr> + <tr> <td> <b>mac_addr</b></td> + <td> <input name="mac_addr_new2" type="text"></td></tr> + <tr> <td> <b>ip_addr</b></td> + <td> <input name="ip_addr_new2" type="text"></td></tr> + <tr> <td> <b>comments</b></td> + <td> <textarea rows="10" cols="50" name="comments"></textarea></td></tr> +</table> +<br> +<p><input type="submit" value="Submit"> +</form> + +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html new file mode 100644 index 000000000..4329200dd --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html @@ -0,0 +1,89 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Are you sure you want to remove {{ host.hostname }}?</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul class="sidebar"> + <li><a href="dns/" class="sidebar">detailed dns info</a></li> + <li><a href="edit/" class="sidebar">edit host info</a></li> + <li><a href="dns/edit/" class="sidebar">edit dns info</a></li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<table border="0" width="100%"> + <colgroup> + <col width="150"> + <col width="*"> + <tr> <td> <b>hostname</b></td> + <td> {{ host.hostname }}</td></tr> + <tr> <td> <b>whatami</b></td> + <td> {{ host.whatami }}</td></tr> + <tr> <td> <b>netgroup</b></td> + <td> {{ host.netgroup }}</td></tr> + <tr> <td> <b>class</b></td> + <td> {{ host.security_class }}</td></tr> + <tr> <td> <b>support</b></td> + <td> {{ host.support }}</td></tr> + <tr> <td> <b>csi</b></td> + <td> {{ host.csi }}</td></tr> + <tr> <td> <b>printq</b></td> + <td> {{ host.printq }}</td></tr> + <tr> <td> <b>dhcp</b></td> + {% if host.dhcp %} + <td> y </td></tr> + {% else %} + <td> n </td></tr> + {% endif %} + <tr> <td> <b>outbound_smtp</b></td> + {% if host.outbound_smtp %} + <td> y </td></tr> + {% else %} + <td> n </td></tr> + {% endif %} + <tr> <td> <b>primary_user</b></td> + <td> {{ host.primary_user }}</td></tr> + <tr> <td> <b>administrator</b></td> + <td> {{ host.administrator }}</td></tr> + <tr> <td> <b>location</b></td> + <td> {{ host.location }}</td></tr> + <tr> <td> <b>expiration_date</b></td> + <td> {{ host.expiration_date }}</td></tr> + {% for interface in interfaces %} + <tr> <td><br><b>Interface</b></td> + {% ifnotequal interface.0.hdwr_type 'no' %} + <td><br>{{ interface.0.hdwr_type }}</td></tr> + {% endifnotequal %} + <tr> <td> <b>mac_addr</b></td> + <td> {{ interface.0.mac_addr }}</b></td></tr> + {% for ip in interface.1 %} + <tr> <td> <b>ip_addr</b></td> + <td> {{ ip.ip_addr }}</td></tr> + {% endfor %} + {% endfor %} + <tr> <td valign="top"> <b>comments</b></td> + <td> + {{ host.comments|linebreaksbr }}<br> + </td></tr> + +</table> +<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/">see detailed DNS information for this host</a> +<br><br> +this host is {{ host.status }}<br> +last update on {{ host.last }}<br> + +<form name="input" action="remove.html?sub=true" method="post"> +<input type="submit" value="remove"> +<input type="reset" value="cancel" onclick="history.back()"> +</form> + +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html new file mode 100644 index 000000000..45b22058d --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html @@ -0,0 +1,45 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Search Results</h2> + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +{% if hosts %} +<table border="0" width="100%"> + <colgroup> + <col width="200"> + <col width="75"> + <col width="50"> + <col width="50"> + <col width="50"> + <col width="*"> + <tr> <td><b>hostname</b></td> + <td> <b>status</b> </td> + </tr> + {% for host in hosts %} + <tr> <td>{{ host.0 }}</td> + <td> {{ host.2 }} </td> + <td> <a href="{{ host.1 }}">view</a> </td> + <td> <a href="{{ host.1 }}/edit">edit</a> </td> + <td> <a href="{{ host.1 }}/copy">copy</a> </td> + <td> <a href="{{ host.1 }}/logs">logs</a> </td> +<!-- <td> <a href="{{ host.1 }}/remove">remove</a> </td> --> + </tr> + {% endfor %} +</table> +{% else %} +No hosts matched your query<br> +Click the back button on your browser to edit your search +{% endif %} + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html new file mode 100644 index 000000000..409d418fe --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html @@ -0,0 +1,57 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Welcome to Hostbase!</h2> + <p>search for hosts using one or more of the fields below + </div> + <br/> +{% endblock %} + +{% block sidebar %} +<a href="/hostbase/new" class="sidebar">add a new host</a><br> +<a href="/hostbase/zones" class="sidebar">zone file information</a><br> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} +{% comment %} + ...or go to <a href="hostinfo">this</a> + page to enter hostinfo-like queries<br><br> +{% endcomment %} + +<form name="input" action="?sub=true" method="post"> + <fieldset class="module aligned ()"> + <label for="hostname">hostname:</label><input name="hostname" type="text" ><br> + <label for="netgroup">netgroup:</label><input name="netgroup" type="text" ><br> + <label for="security_class">class:</label><input name="security_class" type="text" ><br> + <label for="support">support:</label><input name="support" type="text" ><br> + <label for="csi">csi:</label><input name="csi" type="text" ><br> + <label for="printq">printq:</label><input name="printq" type="text" ><br> + <label for="outbound_smtp">outbound_smtp:</label> + {% for choice in yesno %} + <input type="radio" name="outbound_smtp" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %}<br> + <label for="primary_user">primary_user:</label><input name="primary_user" type="text" ><br> + <label for="administrator">administrator:</label><input name="administrator" type="text" ><br> + <label for="location">location:</label><input name="location" type="text" ><br> + <label for="expiration_date">expiration_date:</label><input name="expiration_date" type="text" ><br> + <br><label for="Interface">Interface:</label> + {% for choice in TYPE_CHOICES %} + <input type="radio" name="hdwr_type" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %}<br> + <label for="dhcp">dhcp:</label> + {% for choice in yesno %} + <input type="radio" name="dhcp" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %}<br> + <label for="mac_addr">mac_addr:</label><input name="mac_addr" type="text" ><br> + <label for="ip_addr">ip_addr:</label><input name="ip_addr" type="text" ><br> + <label for="dns_view">dns_viewer:</label> + {% for choice in DNS_CHOICES %} + <input type="radio" name="dns_view" value="{{ choice.0 }}" >{{ choice.1 }} + {% endfor %}<br> + <label for="mx">mx:</label><input name="mx" type="text" ><br> +<p> +<input type="submit" value="Search"> +</form> +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html new file mode 100644 index 000000000..ee355ee87 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html @@ -0,0 +1,81 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Zones</h2> + <p>Edit information for {{ zone }} + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul> +<li><a href="/hostbase/zones/{{ zone_id }}/" class="sidebar">view zone</a><br> +</li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} + +<script language="JavaScript" type="text/Javascript"> +function toggleField(fieldname){ + if(document.getElementById){ + var style = document.getElementById(fieldname).style; + style.display = style.display? "":"block"; + } +} +</script> + +<style type=text/css> +div#nameserver{ + display: none; +} +div#mx{ + display: none; +} +div#address{ + display: none; +} +</style> + +<form name="zonedata" action="" method="post"> + <fieldset class="module aligned ()"> +<label for="id_zone">zone:</label></td> <td>{{ form.zone }}<br> +<label for="id_admin">admin:</label></td> <td>{{ form.admin }}<br> +<label for="id_primary_master">primary_master:</label></td> <td>{{ form.primary_master }}<br> +<label for="id_expire">expire:</label></td> <td>{{ form.expire }}<br> +<label for="id_retry">retry:</label></td> <td>{{ form.retry }}<br> +<label for="id_refresh">refresh:</label></td> <td>{{ form.refresh }}<br> +<label for="id_ttl">ttl:</label></td> <td>{{ form.ttl }}<br> +{% for ns in nsforms %} +<label for="id_name">nameserver:</label></td> <td>{{ ns.name }}<br> +{% endfor %} +</table> +<div id=nameserver> + <label for="id_name">nameserver:</label></td> <td>{{ nsadd.name }}<br> + <label for="id_name">nameserver:</label></td> <td>{{ nsadd.name }}<br> +</div> +<a style="font-size:75%" href=# onclick="toggleField('nameserver')">Add NS records</a><br> +{% for mx in mxforms %} +<label for="id_mx">mx:</label></td> <td>{{ mx.priority }} {{ mx.mx }}<br> +{% endfor %} +<div id=mx> + <label for="id_mx">mx:</label></td> <td>{{ mxadd.priority }} {{ mxadd.mx }}<br> + <label for="id_mx">mx:</label></td> <td>{{ mxadd.priority }} {{ mxadd.mx }}<br> +</div> +<a style="font-size:75%" href=# onclick="toggleField('mx')">Add MX records</a><br> +{% for a in aforms %} +<label for="id_address">ip address:</label></td> <td>{{ a.ip_addr }}<br> +{% endfor %} +<div id=address> + <label for="id_address">ip address:</label></td> <td>{{ addadd.ip_addr }}<br> + <label for="id_address">ip address:</label></td> <td>{{ addadd.ip_addr }}<br> +</div> +<a style="font-size:75%" href=# onclick="toggleField('address')">Add A records</a><br> +<label for="id_aux">aux:</label></td> <td>{{ form.aux }}<br> +<p><input type="submit" value="Submit"> +</form> + +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html new file mode 100644 index 000000000..b59fa9e3c --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html @@ -0,0 +1,43 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Zones</h2> + <p>Enter information for a new zone to be generated by Hostbase + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} +<form name="zonedata" action="" method="post"> + <fieldset class="module aligned ()"> + {{ form.as_p}} +<!-- + <label for="id_zone">zone:</label>{{ form.zone }}<br> + <label for="id_admin">admin:</label>{{ form.admin }}<br> + <label for="id_primary_master">primary_master:</label>{{ form.primary_master }}<br> + <label for="id_expire">expire:</label>{{ form.expire }}<br> + <label for="id_retry">retry:</label>{{ form.retry }}<br> + <label for="id_refresh">refresh:</label>{{ form.refresh }}<br> + <label for="id_ttl">ttl:</label>{{ form.ttl }}<br> + <label for="id_name">nameserver:</label>{{ nsform.name }}<br> + <label for="id_name">nameserver:</label>{{ nsform.name }}<br> + <label for="id_name">nameserver:</label>{{ nsform.name }}<br> + <label for="id_name">nameserver:</label>{{ nsform.name }}<br> + <label for="id_mx">mx:</label>{{ mxform.priority }} {{ mxform.mx }}<br> + <label for="id_mx">mx:</label>{{ mxform.priority }} {{ mxform.mx }}<br> + <label for="id_mx">ip address:</label>{{ aform.ip_addr }}<br> + <label for="id_mx">ip address:</label>{{ aform.ip_addr }}<br> + <label for="id_aux">aux: +(information not generated from Hostbase)</label>{{ form.aux }}<br> +--!> + <p><input type="submit" value="Submit"> + </fieldset> +</form> +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html new file mode 100644 index 000000000..c773e7922 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html @@ -0,0 +1,37 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Zones</h2> + <p>Hostbase generates DNS zone files for the following zones. + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} +{% if zone_list %} +<table border="0" width="100%"> + <colgroup> + <col width="200"> + <col width="75"> + <col width="50"> + <col width="*"> + <tr> <td><b>zone</b></td> + </tr> + {% for zone in zone_list|dictsort:"zone" %} + <tr> <td> {{ zone.zone }}</td> + <td> <a href="{{ zone.id }}">view</a> </td> + <td> <a href="{{ zone.id }}/edit">edit</a> </td> + </tr> + {% endfor %} +</table> +{% else %} +There is no zone data currently in the database<br> +{% endif %} +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html new file mode 100644 index 000000000..fa12e3ec5 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html @@ -0,0 +1,71 @@ +{% extends "base.html" %} + +{% block pagebanner %} + <div class="header"> + <h2>Zones</h2> + <p>Hostbase generates DNS zone files for the following zones. + </div> + <br/> +{% endblock %} + +{% block sidebar %} +{% include "navbar.tmpl" %} +<ul class="sidebar"> +<li><a href="/hostbase/zones/{{ zone.id }}/edit/" class="sidebar">edit zone</a><br> +</li> +</ul> +{% include "logout.tmpl" %} +{% endblock %} + +{% block content %} +<table border="0" width="100%"> + <colgroup> + <col width="200"> + <col width="*"> + <tr> <td> <b>zone</b></td> + <td> {{ zone.zone }}</td></tr> + <tr> <td> <b>serial</b></td> + <td> {{ zone.serial }}</td></tr> + <tr> <td> <b>admin</b></td> + <td> {{ zone.admin }}</td></tr> + <tr> <td> <b>primary_master</b></td> + <td> {{ zone.primary_master }}</td></tr> + <tr> <td> <b>expire</b></td> + <td> {{ zone.expire }}</td></tr> + <tr> <td> <b>retry</b></td> + <td> {{ zone.retry }}</td></tr> + <tr> <td> <b>refresh</b></td> + <td> {{ zone.refresh }}</td></tr> + <tr> <td> <b>ttl</b></td> + <td> {{ zone.ttl }}</td></tr> + + <tr><td valign="top"> <b>nameservers</b></td> + <td> + {% for nameserver in zone.nameservers.all %} + {{ nameserver.name }}<br> + {% endfor %} + </td></tr> + <tr><td valign="top"> <b>mxs</b></td> + <td> + {% for mx in zone.mxs.all %} + {{ mx.priority }} {{ mx.mx }}<br> + {% endfor %} + </td></tr> + {% if addresses %} + <tr><td valign="top"> <b>A records</b></td> + <td> + {% for address in sof.addresses.all %} + {{ address.ip_addr }}<br> + {% endfor %} + </td></tr> + {% endif %} + + <tr> <td valign="top"> <b>aux</b></td> + <td> + {{ zone.aux|linebreaksbr }} + </td></tr> + +</table> +<br><br> +{% endblock %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py new file mode 100644 index 000000000..f3db26f67 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py @@ -0,0 +1,180 @@ +""" +Checks with LDAP (ActiveDirectory) to see if the current user is an LDAP(AD) +user, and returns a subset of the user's profile that is needed by Argonne/CIS +to set user level privleges in Django +""" + +import os +import ldap + + +class LDAPAUTHError(Exception): + """LDAPAUTHError is raised when somehting goes boom.""" + pass + + +class ldapauth(object): + group_test = False + check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP'] + securitylevel = 0 + distinguishedName = None + sAMAccountName = None + telephoneNumber = None + title = None + memberOf = None + department = None # this will be a list + mail = None + extensionAttribute1 = None # badgenumber + badge_no = None + + def __init__(self, login, passwd): + """get username (if using ldap as auth the + apache env var REMOTE_USER should be used) + from username get user profile from AD/LDAP + """ + #p = self.user_profile(login,passwd) + d = self.user_dn(login) # success, distname + print(d[1]) + if d[0] == 'success': + pass + p = self.user_bind(d[1], passwd) + if p[0] == 'success': + #parse results + parsed = self.parse_results(p[2]) + print(self.department) + self.group_test = self.member_of() + securitylevel = self.security_level() + print("ACCESS LEVEL: " + str(securitylevel)) + else: + raise LDAPAUTHError(p[2]) + else: + raise LDAPAUTHError(p[2]) + + def user_profile(self, login, passwd=None): + """NOT USED RIGHT NOW""" + ldap_login = "CN=%s" % login + svc_acct = os.environ['LDAP_SVC_ACCT_NAME'] + svc_pass = os.environ['LDAP_SVC_ACCT_PASS'] + #svc_acct = 'CN=%s,DC=anl,DC=gov' % login + #svc_pass = passwd + + search_pth = os.environ['LDAP_SEARCH_PTH'] + + try: + conn = ldap.initialize(os.environ['LDAP_URI']) + conn.bind(svc_acct, svc_pass, ldap.AUTH_SIMPLE) + result_id = conn.search(search_pth, + ldap.SCOPE_SUBTREE, + ldap_login, + None) + result_type, result_data = conn.result(result_id, 0) + return ('success', 'User profile found', result_data,) + except ldap.LDAPError: + e = sys.exc_info()[1] + #connection failed + return ('error', 'LDAP connect failed', e,) + + def user_bind(self, distinguishedName, passwd): + """Binds to LDAP Server""" + search_pth = os.environ['LDAP_SEARCH_PTH'] + try: + conn = ldap.initialize(os.environ['LDAP_URI']) + conn.bind(distinguishedName, passwd, ldap.AUTH_SIMPLE) + cn = distinguishedName.split(",") + result_id = conn.search(search_pth, + ldap.SCOPE_SUBTREE, + cn[0], + None) + result_type, result_data = conn.result(result_id, 0) + return ('success', 'User profile found', result_data,) + except ldap.LDAPError: + e = sys.exc_info()[1] + #connection failed + return ('error', 'LDAP connect failed', e,) + + def user_dn(self, cn): + """Uses Service Account to get distinguishedName""" + ldap_login = "CN=%s" % cn + svc_acct = os.environ['LDAP_SVC_ACCT_NAME'] + svc_pass = os.environ['LDAP_SVC_ACCT_PASS'] + search_pth = os.environ['LDAP_SEARCH_PTH'] + + try: + conn = ldap.initialize(os.environ['LDAP_URI']) + conn.bind(svc_acct, svc_pass, ldap.AUTH_SIMPLE) + result_id = conn.search(search_pth, + ldap.SCOPE_SUBTREE, + ldap_login, + None) + result_type, result_data = conn.result(result_id, 0) + raw_obj = result_data[0][1] + distinguishedName = raw_obj['distinguishedName'] + return ('success', distinguishedName[0],) + except ldap.LDAPError: + e = sys.exc_info()[1] + #connection failed + return ('error', 'LDAP connect failed', e,) + + def parse_results(self, user_obj): + """Clean up the huge ugly object handed to us in the LDAP query""" + #user_obj is a list formatted like this: + #[('LDAP_DN',{user_dict},),] + try: + raw_obj = user_obj[0][1] + self.memberOf = raw_obj['memberOf'] + self.sAMAccountName = raw_obj['sAMAccountName'][0] + self.distinguishedName = raw_obj['distinguishedName'][0] + self.telephoneNumber = raw_obj['telephoneNumber'][0] + self.title = raw_obj['title'][0] + self.department = raw_obj['department'][0] + self.mail = raw_obj['mail'][0] + self.badge_no = raw_obj['extensionAttribute1'][0] + self.email = raw_obj['extensionAttribute2'][0] + display_name = raw_obj['displayName'][0].split(",") + self.name_f = raw_obj['givenName'][0] + self.name_l = display_name[0] + self.is_staff = False + self.is_superuser = False + + return + except KeyError: + e = sys.exc_info()[1] + raise LDAPAUTHError("Portions of the LDAP User profile not present") + + def member_of(self): + """See if this user is in our group that is allowed to login""" + m = [g for g in self.memberOf if g == self.check_member_of] + #print m + if len(m) == 1: + return True + else: + return False + + def security_level(self): + level = self.securitylevel + + user = os.environ['LDAP_GROUP_USER'] + m = [g for g in self.memberOf if g == user] + if len(m) == 1: + if level < 1: + level = 1 + + cspr = os.environ['LDAP_GROUP_SECURITY_LOW'] + m = [g for g in self.memberOf if g == cspr] + if len(m) == 1: + if level < 2: + level = 2 + + cspo = os.environ['LDAP_GROUP_SECURITY_HIGH'] + m = [g for g in self.memberOf if g == cspo] + if len(m) == 1: + if level < 3: + level = 3 + + admin = os.environ['LDAP_GROUP_ADMIN'] + m = [g for g in self.memberOf if g == admin] + if len(m) == 1: + if level < 4: + level = 4 + + return level diff --git a/src/lib/Bcfg2/Server/Hostbase/manage.py b/src/lib/Bcfg2/Server/Hostbase/manage.py new file mode 100755 index 000000000..5e78ea979 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/manage.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +try: + import settings # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) + sys.exit(1) + +if __name__ == "__main__": + execute_manager(settings) diff --git a/src/lib/Bcfg2/Server/Hostbase/media/base.css b/src/lib/Bcfg2/Server/Hostbase/media/base.css new file mode 100644 index 000000000..ddbf02165 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/media/base.css @@ -0,0 +1,5 @@ + +/* Import other styles */ +@import url('global.css'); +@import url('layout.css'); +@import url('boxypastel.css'); diff --git a/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css b/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css new file mode 100644 index 000000000..7ae0684ef --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css @@ -0,0 +1,179 @@ +body { + background-color: #fff; + color: #000; + font: 12px 'Lucida Grande', Arial, Helvetica, sans-serif; + margin-left:0px; + margin-right:100px; +} +/* links */ +a:link { + color: #00f; + text-decoration: none; +} +a:visited { + color: #00a; + text-decoration: none; +} +a:hover { + color: #00a; + text-decoration: underline; +} +a:active { + color: #00a; + text-decoration: underline; +} +/* divs*/ +div.bad { + border: 1px solid #660000; + background: #FF6A6A; + margin: 10px 0; + padding: 8px; + text-align: left; + margin-left:50px; + margin-right:50px; +} +div.modified { + border: 1px solid #CC9900; + background: #FFEC8B; + margin: 10px 0; + padding: 8px; + text-align: left; + margin-left:50px; + margin-right:50px; +} +div.clean { + border: 1px solid #006600; + background: #9AFF9A; + margin: 10px 0; + padding: 8px; + text-align: left; + margin-left:50px; + margin-right:50px; +} +div.extra { + border: 1px solid #006600; + background: #6699CC; + margin: 10px 0; + padding: 8px; + text-align: left; + margin-left:50px; + margin-right:50px; +} +div.warning { + border: 1px + solid #CC3300; + background: #FF9933; + margin: 10px 0; + padding: 8px; + text-align: left; + margin-left:50px; + margin-right:50px; +} +div.all-warning { + border: 1px solid #DD5544; + background: #FFD9A2; + margin: 10px 0; + padding: 8px; + text-align: left; + margin-left:50px; + margin-right:50px; +} +div.down { + border: 1px + solid #999; + background-color: #DDD; + margin: 10px 0; + padding: 8px; + text-align: left; + margin-left:50px; + margin-right:50px; +} +div.items{ + display: none; +} +div.nodebox { + border: 1px solid #c7cfd5; + background: #f1f5f9; + margin: 20px 0; + padding: 8px 8px 16px 8px; + text-align: left; + position:relative; +} +div.header { + background-color: #DDD; + padding: 8px; + text-indent:50px; + position:relative; +} + +/*Spans*/ +.nodename { + font-style: italic; +} +.nodelisttitle { + font-size: 14px; +} + +h2{ + font-size: 16px; + color: #000; +} + +ul.plain { + list-style-type:none; + text-align: left; +} + +.notebox { + position: absolute; + top: 0px; + right: 0px; + padding: 1px; + text-indent:0px; + border: 1px solid #FFF; + background: #999; + color: #FFF; +} + +.configbox { + position: absolute; + bottom: 0px; + right: 0px; + padding: 1px; + text-indent:0px; + border: 1px solid #999; + background: #FFF; + color: #999; +} + +p.indented{ + text-indent: 50px +} + +/* + Sortable tables */ +table.sortable a.sortheader { + background-color:#dfd; + font-weight: bold; + text-decoration: none; + display: block; +} +table.sortable { + padding: 2px 4px 2px 4px; + border: 1px solid #000000; + border-spacing: 0px +} +td.sortable{ + padding: 2px 8px 2px 8px; +} + +th.sortable{ + background-color:#F3DD91; + border: 1px solid #FFFFFF; +} +tr.tablelist { + background-color:#EDF3FE; +} +tr.tablelist-alt{ + background-color:#FFFFFF; +} diff --git a/src/lib/Bcfg2/Server/Hostbase/media/global.css b/src/lib/Bcfg2/Server/Hostbase/media/global.css new file mode 100644 index 000000000..73451e1bc --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/media/global.css @@ -0,0 +1,8 @@ +body { + margin:0; + padding:0; + font-size:12px; + font-family:"Lucida Grande","Bitstream Vera Sans",Verdana,Arial,sans-serif; + color:#000; + background:#fff; + } diff --git a/src/lib/Bcfg2/Server/Hostbase/media/layout.css b/src/lib/Bcfg2/Server/Hostbase/media/layout.css new file mode 100644 index 000000000..9085cc220 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/media/layout.css @@ -0,0 +1,62 @@ +/* Page Structure */ +#container { position:absolute; top: 3em; margin-left:1em; margin-right:2em; padding:0; margin-top:1.5em; min-width: + 650px; } +#header { width:100%; } +#content-main { float:left; } + +/* HEADER */ +#header { +background:#000; +color:#ffc; +position:absolute; +} +#header a:link, #header a:visited { color:white; } +#header a:hover { text-decoration:underline; } +#branding h1 { padding:0 10px; font-size:18px; margin:8px 0; font-weight:normal; color:#f4f379; } +#branding h2 { padding:0 10px; font-size:14px; margin:-8px 0 8px 0; font-weight:normal; color:#ffc; } +#user-tools { position:absolute; top:0; right:0; padding:1.2em 10px; font-size:11px; text-align:right; } + +/*SIDEBAR*/ +#sidebar { + float:left; + position: relative; + width: auto; + height: 100%; + margin-top: 3em; + padding-right: 1.5em; + padding-left: 1.5em; + padding-top: 1em; + padding-bottom:3em; + background: #000; + color:ffc; +} + +a.sidebar:link {color: #fff;} +a.sidebar:active {color: #fff;} +a.sidebar:visited {color: #fff;} +a.sidebar:hover {color: #fff;} + +ul.sidebar { + color: #ffc; + text-decoration: none; + list-style-type: none; + text-indent: -1em; +} +ul.sidebar-level2 { + text-indent: -2em; + list-style-type: none; + font-size: 11px; +} + +/* ALIGNED FIELDSETS */ +.aligned label { display:block; padding:0 1em 3px 0; float:left; width:8em; } +.aligned label.inline { display:inline; float:none; } +.colMS .aligned .vLargeTextField, .colMS .aligned .vXMLLargeTextField { width:350px; } +form .aligned p, form .aligned ul { margin-left:7em; padding-left:30px; } +form .aligned table p { margin-left:0; padding-left:0; } +form .aligned p.help { padding-left:38px; } +.aligned .vCheckboxLabel { float:none !important; display:inline; padding-left:4px; } +.colM .aligned .vLargeTextField, colM .aligned .vXMLLargeTextField { width:610px; } +.checkbox-row p.help { margin-left:0; padding-left:0 !important; } + + diff --git a/src/lib/Bcfg2/Server/Hostbase/nisauth.py b/src/lib/Bcfg2/Server/Hostbase/nisauth.py new file mode 100644 index 000000000..ae4c6c021 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/nisauth.py @@ -0,0 +1,40 @@ +"""Checks with NIS to see if the current user is in the support group""" +import os +import crypt, nis +from Bcfg2.Server.Hostbase.settings import AUTHORIZED_GROUP + + +class NISAUTHError(Exception): + """NISAUTHError is raised when somehting goes boom.""" + pass + +class nisauth(object): + group_test = False +# check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP'] + samAcctName = None + distinguishedName = None + sAMAccountName = None + telephoneNumber = None + title = None + memberOf = None + department = None #this will be a list + mail = None + extensionAttribute1 = None #badgenumber + badge_no = None + uid = None + + def __init__(self,login,passwd=None): + """get user profile from NIS""" + try: + p = nis.match(login, 'passwd.byname').split(":") + except: + raise NISAUTHError('username') + # check user password using crypt and 2 character salt from passwd file + if p[1] == crypt.crypt(passwd, p[1][:2]): + # check to see if user is in valid support groups + # will have to include these groups in a settings file eventually + if not login in nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[-1].split(',') and p[3] != nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[2]: + raise NISAUTHError('group') + self.uid = p[2] + else: + raise NISAUTHError('password') diff --git a/src/lib/Bcfg2/Server/Hostbase/regex.py b/src/lib/Bcfg2/Server/Hostbase/regex.py new file mode 100644 index 000000000..41cc0f6f0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/regex.py @@ -0,0 +1,6 @@ +import re + +date = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}$') +host = re.compile('^[a-z0-9-_]+(\.[a-z0-9-_]+)+$') +macaddr = re.compile('^[0-9abcdefABCDEF]{2}(:[0-9abcdefABCDEF]{2}){5}$|virtual') +ipaddr = re.compile('^[0-9]{1,3}(\.[0-9]{1,3}){3}$') diff --git a/src/lib/Bcfg2/Server/Hostbase/settings.py b/src/lib/Bcfg2/Server/Hostbase/settings.py new file mode 100644 index 000000000..4e641f13c --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/settings.py @@ -0,0 +1,143 @@ +import os.path +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + +PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) + +c = ConfigParser.ConfigParser() +#This needs to be configurable one day somehow +c.read(['./bcfg2.conf']) + +defaults = {'database_engine':'sqlite3', + 'database_name':'./dev.db', + 'database_user':'', + 'database_password':'', + 'database_host':'', + 'database_port':3306, + 'default_mx':'localhost', + 'priority':10, + 'authorized_group':'admins', + } + +if c.has_section('hostbase'): + options = dict(c.items('hostbase')) +else: + options = defaults + +# Django settings for Hostbase project. +DEBUG = True +TEMPLATE_DEBUG = DEBUG +ADMINS = ( + ('Root', 'root'), +) +MANAGERS = ADMINS + +# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'. +DATABASE_ENGINE = options['database_engine'] +# Or path to database file if using sqlite3. +DATABASE_NAME = options['database_name'] +# Not used with sqlite3. +DATABASE_USER = options['database_user'] +# Not used with sqlite3. +DATABASE_PASSWORD = options['database_password'] +# Set to empty string for localhost. Not used with sqlite3. +DATABASE_HOST = options['database_host'] +# Set to empty string for default. Not used with sqlite3. +DATABASE_PORT = int(options['database_port']) +# Local time zone for this installation. All choices can be found here: +# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone +try: + TIME_ZONE = c.get('statistics', 'time_zone') +except: + TIME_ZONE = None + +# enter the defauly MX record machines will get in Hostbase +# this setting may move elsewhere eventually +DEFAULT_MX = options['default_mx'] +PRIORITY = int(options['priority']) + +SESSION_EXPIRE_AT_BROWSER_CLOSE = True + +# Uncomment a backend below if you would like to use it for authentication +AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', + 'Bcfg2.Server.Hostbase.backends.NISBackend', + #'Bcfg2.Server.Hostbase.backends.LDAPBacken', + ) +# enter an NIS group name you'd like to give access to edit hostbase records +AUTHORIZED_GROUP = options['authorized_group'] + +#create login url area: +import django.contrib.auth +django.contrib.auth.LOGIN_URL = '/login' +# Absolute path to the directory that holds media. +# Example: "/home/media/media.lawrence.com/" +MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') +# Just for development +SERVE_MEDIA = DEBUG + +# Language code for this installation. All choices can be found here: +# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes +# http://blogs.law.harvard.edu/tech/stories/storyReader$15 +LANGUAGE_CODE = 'en-us' +SITE_ID = 1 +# URL that handles the media served from MEDIA_ROOT. +# Example: "http://media.lawrence.com" +MEDIA_URL = '/site_media/' +# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a +# trailing slash. +# Examples: "http://foo.com/media/", "/media/". +ADMIN_MEDIA_PREFIX = '/media/' +# Make this unique, and don't share it with anybody. +SECRET_KEY = '*%=fv=yh9zur&gvt4&*d#84o(cy^-*$ox-v1e9%32pzf2*qu#s' +# List of callables that know how to import templates from various sources. +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', +# 'django.template.loaders.eggs.load_template_source', +) + +TEMPLATE_CONTEXT_PROCESSORS = ( + "django.core.context_processors.auth", + "django.core.context_processors.debug", + "django.core.context_processors.i18n", + "django.core.context_processors.request", + "django.core.context_processors.media", +# Django development version. +# "django.core.context_processors.csrf", +) + + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.locale.LocaleMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.middleware.doc.XViewMiddleware', +) + +ROOT_URLCONF = 'Bcfg2.Server.Hostbase.urls' + +TEMPLATE_DIRS = ( + # Put strings here, like "/home/html/django_templates". + # Always use forward slashes, even on Windows. + '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates', + '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates', + '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/templates', + '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/templates', + '/usr/share/bcfg2/Hostbase/templates', + os.path.join(PROJECT_ROOT, 'templates'), + os.path.join(PROJECT_ROOT, 'hostbase/webtemplates'), +) + +INSTALLED_APPS = ( + 'django.contrib.admin', + 'django.contrib.admindocs', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.humanize', + 'Bcfg2.Server.Hostbase.hostbase', +) + +LOGIN_URL = '/login/' diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl new file mode 100644 index 000000000..74ea3c047 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl @@ -0,0 +1,29 @@ +#mx -> +#priority -> + +hostname -> +whatami -> +netgroup -> +security_class -> +support -> +csi -> +printq -> +dhcp -> +outbound_smtp -> +primary_user -> +administrator -> +location -> +expiration_date -> YYYY-MM-DD +comments -> + +mac_addr -> +hdwr_type -> +ip_addr -> +#ip_addr -> +cname -> +#cname -> + +#mac_addr -> +#hdwr_type -> +#ip_addr -> +#cname -> diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head new file mode 100644 index 000000000..a3d19547e --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head @@ -0,0 +1,5 @@ +# +# dhcpd.conf +# +# Configuration file for ISC dhcpd +# diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl new file mode 100644 index 000000000..757b263cd --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl @@ -0,0 +1,17 @@ +# +# This file is automatically generated. +# DO NOT EDIT IT BY HAND! +# +# This file contains {{ numips }} IP addresses +# Generated on: {% now "r" %} +# + +{% include "dhcpd.conf.head" %} + +# Hosts which require special configuration options can be listed in +# host statements. If no address is specified, the address will be +# allocated dynamically (if possible), but the host-specific information +# will still come from the host declaration. + +{% for host in hosts %}host {{ host.0 }} {hardware ethernet {{ host.1 }};fixed-address {{ host.2 }};} +{% endfor %} diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl new file mode 100644 index 000000000..251cb5a79 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl @@ -0,0 +1,26 @@ +############################################################################## +# MCS hosts file +# +# This file is generated automatically - DO NOT EDIT IT. +# +# Generated on: {% now "r" %} +# + +127.0.0.1 localhost.mcs.anl.gov localhost + +# This file lists hosts in these domains: +{% for domain in domain_data %}# {{ domain.0 }}: {{ domain.1 }} +{% endfor %} +# +# This file lists hosts on these networks: +# +# Network Hosts +# --------------------------------------------------------------------- +{% for octet in two_octets_data %}# {{ octet.0 }} {{octet.1 }} +{% endfor %} +# +{% for octet in three_octets_data %}# {{ octet.0 }} {{ octet.1 }} +{% endfor %} +# +# Total host interfaces (ip addresses) in this file: {{ num_ips }} + diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl new file mode 100644 index 000000000..00e0d5d04 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl @@ -0,0 +1,5 @@ +########################################################################## +# Hosts on subnet: {{ subnet.0 }} +# total hosts: {{ subnet.1 }} +{% for ip in ips %}{{ ip.0 }} {{ ip.1 }}{% if ip.4 and not ip.3 %} # {{ ip.5 }}{% else %}{% for name in ip.2 %} {{ name }}{% endfor %}{% for cname in ip.3 %} {{ cname }}{% endfor %} # {{ ip.5 }}{% endif %} +{% endfor %} diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl new file mode 100644 index 000000000..03e054198 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl @@ -0,0 +1,69 @@ +// This is the primary configuration file for the BIND DNS server named. +// +// Please read /usr/share/doc/bind9/README.Debian.gz for information on the +// structure of BIND configuration files in Debian, *BEFORE* you customize +// this configuration file. +// + +include "/etc/bind/named.conf.options"; + +include "/etc/bind/rndc.key"; + +// prime the server with knowledge of the root servers +zone "." { + type hint; + file "/etc/bind/db.root"; +}; + +// be authoritative for the localhost forward and reverse zones, and for +// broadcast zones as per RFC 1912 +{% for zone in zones %} +zone "{{ zone.1 }}" { + type master; + file "/etc/bind/hostbase/{{ zone.1 }}"; + notify no; + also-notify { 140.221.9.6;140.221.8.10; }; +};{% endfor %} + +zone "localhost" { + type master; + file "/etc/bind/db.local"; +}; + +zone "127.in-addr.arpa" { + type master; + file "/etc/bind/db.127"; +}; + +zone "0.in-addr.arpa" { + type master; + file "/etc/bind/db.0"; +}; + +zone "255.in-addr.arpa" { + type master; + file "/etc/bind/db.255"; +}; +{% for reverse in reverses %} +zone "{{ reverse.0 }}.in-addr.arpa" { + type master; + file "/etc/bind/hostbase/{{ reverse.0 }}.rev"; + notify no; + also-notify { 140.221.9.6;140.221.8.10; }; +};{% endfor %} + +// zone "com" { type delegation-only; }; +// zone "net" { type delegation-only; }; + +// From the release notes: +// Because many of our users are uncomfortable receiving undelegated answers +// from root or top level domains, other than a few for whom that behaviour +// has been trusted and expected for quite some length of time, we have now +// introduced the "root-delegations-only" feature which applies delegation-only +// logic to all top level domains, and to the root domain. An exception list +// should be specified, including "MUSEUM" and "DE", and any other top level +// domains from whom undelegated responses are expected and trusted. +// root-delegation-only exclude { "DE"; "MUSEUM"; }; + +include "/etc/bind/named.conf.local"; +include "/etc/bind/named.conf.static"; diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl new file mode 100644 index 000000000..52021620e --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl @@ -0,0 +1,92 @@ +// This is the primary configuration file for the BIND DNS server named. +// +// Please read /usr/share/doc/bind9/README.Debian.gz for information on the +// structure of BIND configuration files in Debian, *BEFORE* you customize +// this configuration file. +// + +include "/etc/bind/named.conf.options"; + +include "/etc/bind/rndc.key"; + +view "internal" { + match-clients { 140.221.9.6;140.221.8.10;140.221.8.88;140.221.8.15; }; + recursion yes; + // prime the server with knowledge of the root servers + zone "." { + type hint; + file "/etc/bind/db.root"; + }; + {% for zone in zones %} + zone "{{ zone.1 }}" { + type master; + file "/etc/bind/hostbase/{{ zone.1 }}"; + notify no; + also-notify { 140.221.9.6;140.221.8.10;140.221.8.88;140.221.8.15; }; + };{% endfor %} + // be authoritative for the localhost forward and reverse zones, and for + // broadcast zones as per RFC 1912 + + zone "localhost" { + type master; + file "/etc/bind/db.local"; + }; + + zone "127.in-addr.arpa" { + type master; + file "/etc/bind/db.127"; + }; + + zone "0.in-addr.arpa" { + type master; + file "/etc/bind/db.0"; + }; + + zone "255.in-addr.arpa" { + type master; + file "/etc/bind/db.255"; + }; + {% for reverse in reverses %} + zone "{{ reverse.0 }}.in-addr.arpa" { + type master; + file "/etc/bind/hostbase/{{ reverse.0 }}.rev"; + notify no; + also-notify { 140.221.9.6;140.221.8.10;140.221.8.88; }; + };{% endfor %} + include "/etc/bind/named.conf.static"; +}; + +view "external" { + match-clients { any; }; + recursion no; + {% for zone in zones %} + zone "{{ zone.1 }}" { + type master; + file "/etc/bind/hostbase/{{ zone.1 }}.external"; + notify no; + };{% endfor %} + + {% for reverse in reverses %} + zone "{{ reverse.0 }}.in-addr.arpa" { + type master; + file "/etc/bind/hostbase/{{ reverse.0 }}.rev.external"; + notify no; + };{% endfor %} + include "/etc/bind/named.conf.static"; +}; + + +// zone "com" { type delegation-only; }; +// zone "net" { type delegation-only; }; + +// From the release notes: +// Because many of our users are uncomfortable receiving undelegated answers +// from root or top level domains, other than a few for whom that behaviour +// has been trusted and expected for quite some length of time, we have now +// introduced the "root-delegations-only" feature which applies delegation-only +// logic to all top level domains, and to the root domain. An exception list +// should be specified, including "MUSEUM" and "DE", and any other top level +// domains from whom undelegated responses are expected and trusted. +// root-delegation-only exclude { "DE"; "MUSEUM"; }; + +include "/etc/bind/named.conf.local"; diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl new file mode 100644 index 000000000..6ed520c98 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl @@ -0,0 +1,4 @@ +{% if fileorigin %}$ORIGIN {{ fileorigin }}.in-addr.arpa.{% endif %} +$ORIGIN {{ inaddr }}.in-addr.arpa. +{% for host in hosts %}{{ host.0.3 }} PTR {{ host.1 }}. +{% endfor %} diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl new file mode 100644 index 000000000..d142eaf7f --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl @@ -0,0 +1,13 @@ +$ORIGIN . +$TTL {{ zone.8 }} +{{ inaddr }}.in-addr.arpa IN SOA {{ zone.4 }}. {{ zone.3 }} ( + {{ zone.2 }} ; serial + {{ zone.7 }} ; refresh interval + {{ zone.6 }} ; retry interval + {{ zone.5 }} ; expire interval + {{ zone.8 }} ; min ttl + ) + + {% for ns in nameservers %}NS {{ ns.0 }} + {% endfor %} + diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl new file mode 100644 index 000000000..aad48d179 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl @@ -0,0 +1,18 @@ +$ORIGIN . +$TTL {{ zone.8 }} +{{ zone.1 }}. IN SOA {{ zone.4 }}. {{ zone.3 }}. ( + {{ zone.2 }} ; serial + {{ zone.7 }} ; refresh interval + {{ zone.6 }} ; retry interval + {{ zone.5 }} ; expire interval + {{ zone.8 }} ; min ttl + ) + + {% for ns in nameservers %}NS {{ ns.0 }} + {% endfor %} + {% for a in addresses %}A {{ a.0 }} + {% endfor %} + {% for mx in mxs %}MX {{ mx.0 }} {{ mx.1 }} + {% endfor %} +$ORIGIN {{ zone.1 }}. +localhost A 127.0.0.1 diff --git a/src/lib/Bcfg2/Server/Hostbase/test/harness.py b/src/lib/Bcfg2/Server/Hostbase/test/harness.py new file mode 100644 index 000000000..befcff5c0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/test/harness.py @@ -0,0 +1,11 @@ +import os, sys + +here = os.path.realpath('harness') + +server_hostbase = os.path.realpath(here + '../../../../') + +sys.path.insert(0,server_hostbase) +sys.path.insert(0,server_hostbase + '../') +#commented this out, but might be needed for now until the harness is figured out +#if so, use your actual path to the Hostbase module +#sys.path.insert(0,'/home/dahl/Code/bcfg2/src/lib/Server/Hostbase') diff --git a/src/lib/Bcfg2/Server/Hostbase/test/test_environ_settings.py b/src/lib/Bcfg2/Server/Hostbase/test/test_environ_settings.py new file mode 100644 index 000000000..ad35c624e --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/test/test_environ_settings.py @@ -0,0 +1,35 @@ +import sys +import os + + +def env_setup(): + os.environ['bcfg_db_engine'] = 'foo' + os.environ['bcfg_db_name'] = 'bar' + os.environ['bcfg_db_user'] = 'baz' + os.environ['bcfg_db_password'] = 'pass' + os.environ['bcfg_db_host'] = 'biff' + os.environ['bcfg_db_port'] = '3306' + os.environ['bcfg_time_zone'] = 'CHI' + +def teardown(): + pass + +def test_environ_settings(): + + os.environ['bcfg_db_engine'] = 'foo' + os.environ['bcfg_db_name'] = 'bar' + os.environ['bcfg_db_user'] = 'baz' + os.environ['bcfg_db_password'] = 'pass' + os.environ['bcfg_db_host'] = 'biff' + os.environ['bcfg_db_port'] = '3306' + os.environ['bcfg_time_zone'] = 'CHI' + import Hostbase.settings + s = Hostbase.settings + s.CFG_TYPE = 'environ' + assert s.DATABASE_ENGINE == 'mysql' + assert s.DATABASE_PASSWORD == 'pass' + assert s.DATABASE_NAME == 'bar' + assert s.DATABASE_USER == 'baz' + assert s.DATABASE_HOST == 'biff' + assert s.DATABASE_PORT == '3306' + assert s.TIME_ZONE == 'CHI' diff --git a/src/lib/Bcfg2/Server/Hostbase/test/test_ldapauth.py b/src/lib/Bcfg2/Server/Hostbase/test/test_ldapauth.py new file mode 100644 index 000000000..7fc009ad2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/test/test_ldapauth.py @@ -0,0 +1,10 @@ +import os,sys +import harness + +from Hostbase.ldapauth import * + +def test_it(): + l = ldapauth(os.environ['LDAP_SVC_ACCT_NAME'], + os.environ['LDAP_SVC_ACCT_PASS']) + + assert l.department == 'foo' diff --git a/src/lib/Bcfg2/Server/Hostbase/test/test_settings.py b/src/lib/Bcfg2/Server/Hostbase/test/test_settings.py new file mode 100644 index 000000000..0dfc30f38 --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/test/test_settings.py @@ -0,0 +1,12 @@ +import sys +import os +import Hostbase.settings + +def setup(): + pass + +def teardown(): + pass + +def test_mcs_settings(): + pass diff --git a/src/lib/Bcfg2/Server/Hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/urls.py new file mode 100644 index 000000000..01fe97d4f --- /dev/null +++ b/src/lib/Bcfg2/Server/Hostbase/urls.py @@ -0,0 +1,27 @@ +from django.conf.urls.defaults import * +from django.conf import settings +from django.views.generic.simple import direct_to_template +from django.contrib import admin + + +admin.autodiscover() + + +urlpatterns = patterns('', + # Uncomment the admin/doc line below and add 'django.contrib.admindocs' + # to INSTALLED_APPS to enable admin documentation: + (r'^admin/doc/', include('django.contrib.admindocs.urls')), + + # Uncomment the next line to enable the admin: + (r'^admin/', include(admin.site.urls)), + + (r'^$',direct_to_template, {'template':'index.html'}, 'index'), + (r'^hostbase/', include('hostbase.urls')), + (r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), + (r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'logout.html'}) +) + +if settings.SERVE_MEDIA: + urlpatterns += patterns('', + (r'^site_media/(?P<path>.*)$', 'django.views.static.serve', + dict(document_root=settings.MEDIA_ROOT)),) diff --git a/src/lib/Bcfg2/Server/Lint/Bundles.py b/src/lib/Bcfg2/Server/Lint/Bundles.py new file mode 100644 index 000000000..e6b6307f2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/Bundles.py @@ -0,0 +1,54 @@ +import lxml.etree +import Bcfg2.Server.Lint + +class Bundles(Bcfg2.Server.Lint.ServerPlugin): + """ Perform various bundle checks """ + def Run(self): + """ run plugin """ + if 'Bundler' in self.core.plugins: + self.missing_bundles() + for bundle in self.core.plugins['Bundler'].entries.values(): + if self.HandlesFile(bundle.name): + if (not Bcfg2.Server.Plugins.Bundler.have_genshi or + type(bundle) is not + Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile): + self.bundle_names(bundle) + + @classmethod + def Errors(cls): + return {"bundle-not-found":"error", + "inconsistent-bundle-name":"warning"} + + def missing_bundles(self): + """ find bundles listed in Metadata but not implemented in Bundler """ + if self.files is None: + # when given a list of files on stdin, this check is + # useless, so skip it + groupdata = self.metadata.groups_xml.xdata + ref_bundles = set([b.get("name") + for b in groupdata.findall("//Bundle")]) + + allbundles = self.core.plugins['Bundler'].entries.keys() + for bundle in ref_bundles: + xmlbundle = "%s.xml" % bundle + genshibundle = "%s.genshi" % bundle + if (xmlbundle not in allbundles and + genshibundle not in allbundles): + self.LintError("bundle-not-found", + "Bundle %s referenced, but does not exist" % + bundle) + + def bundle_names(self, bundle): + """ verify bundle name attribute matches filename """ + try: + xdata = lxml.etree.XML(bundle.data) + except AttributeError: + # genshi template + xdata = lxml.etree.parse(bundle.template.filepath).getroot() + + fname = bundle.name.split('Bundler/')[1].split('.')[0] + bname = xdata.get('name') + if fname != bname: + self.LintError("inconsistent-bundle-name", + "Inconsistent bundle name: filename is %s, bundle name is %s" % + (fname, bname)) diff --git a/src/lib/Bcfg2/Server/Lint/Comments.py b/src/lib/Bcfg2/Server/Lint/Comments.py new file mode 100644 index 000000000..3b9370c3b --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/Comments.py @@ -0,0 +1,194 @@ +import os.path +import lxml.etree +import Bcfg2.Server.Lint + +class Comments(Bcfg2.Server.Lint.ServerPlugin): + """ check files for various required headers """ + def __init__(self, *args, **kwargs): + Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs) + self.config_cache = {} + + def Run(self): + self.check_bundles() + self.check_properties() + self.check_metadata() + self.check_cfg() + self.check_infoxml() + self.check_probes() + + @classmethod + def Errors(cls): + return {"unexpanded-keywords":"warning", + "keywords-not-found":"warning", + "comments-not-found":"warning", + "broken-xinclude-chain":"warning"} + + def required_keywords(self, rtype): + """ given a file type, fetch the list of required VCS keywords + from the bcfg2-lint config """ + return self.required_items(rtype, "keyword") + + def required_comments(self, rtype): + """ given a file type, fetch the list of required comments + from the bcfg2-lint config """ + return self.required_items(rtype, "comment") + + def required_items(self, rtype, itype): + """ given a file type and item type (comment or keyword), + fetch the list of required items from the bcfg2-lint config """ + if itype not in self.config_cache: + self.config_cache[itype] = {} + + if rtype not in self.config_cache[itype]: + rv = [] + global_item = "global_%ss" % itype + if global_item in self.config: + rv.extend(self.config[global_item].split(",")) + + item = "%s_%ss" % (rtype.lower(), itype) + if item in self.config: + if self.config[item]: + rv.extend(self.config[item].split(",")) + else: + # config explicitly specifies nothing + rv = [] + self.config_cache[itype][rtype] = rv + return self.config_cache[itype][rtype] + + def check_bundles(self): + """ check bundle files for required headers """ + if 'Bundler' in self.core.plugins: + for bundle in self.core.plugins['Bundler'].entries.values(): + xdata = None + rtype = "" + try: + xdata = lxml.etree.XML(bundle.data) + rtype = "bundler" + except (lxml.etree.XMLSyntaxError, AttributeError): + xdata = lxml.etree.parse(bundle.template.filepath).getroot() + rtype = "sgenshi" + + self.check_xml(bundle.name, xdata, rtype) + + def check_properties(self): + """ check properties files for required headers """ + if 'Properties' in self.core.plugins: + props = self.core.plugins['Properties'] + for propfile, pdata in props.store.entries.items(): + if os.path.splitext(propfile)[1] == ".xml": + self.check_xml(pdata.name, pdata.xdata, 'properties') + + def check_metadata(self): + """ check metadata files for required headers """ + if self.has_all_xincludes("groups.xml"): + self.check_xml(os.path.join(self.metadata.data, "groups.xml"), + self.metadata.groups_xml.data, + "metadata") + if self.has_all_xincludes("clients.xml"): + self.check_xml(os.path.join(self.metadata.data, "clients.xml"), + self.metadata.clients_xml.data, + "metadata") + + def check_cfg(self): + """ check Cfg files for required headers """ + if 'Cfg' in self.core.plugins: + for entryset in self.core.plugins['Cfg'].entries.values(): + for entry in entryset.entries.values(): + if entry.name.endswith(".genshi"): + rtype = "tgenshi" + else: + rtype = "cfg" + self.check_plaintext(entry.name, entry.data, rtype) + + def check_infoxml(self): + """ check info.xml files for required headers """ + if 'Cfg' in self.core.plugins: + for entryset in self.core.plugins['Cfg'].entries.items(): + if (hasattr(entryset, "infoxml") and + entryset.infoxml is not None): + self.check_xml(entryset.infoxml.name, + entryset.infoxml.pnode.data, + "infoxml") + + def check_probes(self): + """ check probes for required headers """ + if 'Probes' in self.core.plugins: + for probe in self.core.plugins['Probes'].probes.entries.values(): + self.check_plaintext(probe.name, probe.data, "probes") + + def check_xml(self, filename, xdata, rtype): + """ check generic XML files for required headers """ + self.check_lines(filename, + [str(el) + for el in xdata.getiterator(lxml.etree.Comment)], + rtype) + + def check_plaintext(self, filename, data, rtype): + """ check generic plaintex files for required headers """ + self.check_lines(filename, data.splitlines(), rtype) + + def check_lines(self, filename, lines, rtype): + """ generic header check for a set of lines """ + if self.HandlesFile(filename): + # found is trivalent: + # False == not found + # None == found but not expanded + # True == found and expanded + found = dict((k, False) for k in self.required_keywords(rtype)) + + for line in lines: + # we check for both '$<keyword>:' and '$<keyword>$' to see + # if the keyword just hasn't been expanded + for (keyword, status) in found.items(): + if not status: + if '$%s:' % keyword in line: + found[keyword] = True + elif '$%s$' % keyword in line: + found[keyword] = None + + unexpanded = [keyword for (keyword, status) in found.items() + if status is None] + if unexpanded: + self.LintError("unexpanded-keywords", + "%s: Required keywords(s) found but not expanded: %s" % + (filename, ", ".join(unexpanded))) + missing = [keyword for (keyword, status) in found.items() + if status is False] + if missing: + self.LintError("keywords-not-found", + "%s: Required keywords(s) not found: $%s$" % + (filename, "$, $".join(missing))) + + # next, check for required comments. found is just + # boolean + found = dict((k, False) for k in self.required_comments(rtype)) + + for line in lines: + for (comment, status) in found.items(): + if not status: + found[comment] = comment in line + + missing = [comment for (comment, status) in found.items() + if status is False] + if missing: + self.LintError("comments-not-found", + "%s: Required comments(s) not found: %s" % + (filename, ", ".join(missing))) + + def has_all_xincludes(self, mfile): + """ return true if self.files includes all XIncludes listed in + the specified metadata type, false otherwise""" + if self.files is None: + return True + else: + path = os.path.join(self.metadata.data, mfile) + if path in self.files: + xdata = lxml.etree.parse(path) + for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'): + if not self.has_all_xincludes(el.get('href')): + self.LintError("broken-xinclude-chain", + "Broken XInclude chain: could not include %s" % path) + return False + + return True + diff --git a/src/lib/Bcfg2/Server/Lint/Deltas.py b/src/lib/Bcfg2/Server/Lint/Deltas.py index cf91d1d13..de2e0668f 100644 --- a/src/lib/Bcfg2/Server/Lint/Deltas.py +++ b/src/lib/Bcfg2/Server/Lint/Deltas.py @@ -10,6 +10,11 @@ class Deltas(Bcfg2.Server.Lint.ServerPlugin): for basename, entry in list(cfg.entries.items()): self.check_entry(basename, entry) + @classmethod + def Errors(cls): + return {"cat-file-used":"warning", + "diff-file-used":"warning"} + def check_entry(self, basename, entry): for fname in list(entry.entries.keys()): if self.HandlesFile(fname): diff --git a/src/lib/Bcfg2/Server/Lint/Duplicates.py b/src/lib/Bcfg2/Server/Lint/Duplicates.py new file mode 100644 index 000000000..ee6b7a2e6 --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/Duplicates.py @@ -0,0 +1,89 @@ +import os.path +import lxml.etree +import Bcfg2.Server.Lint + +class Duplicates(Bcfg2.Server.Lint.ServerPlugin): + """ Find duplicate clients, groups, etc. """ + def __init__(self, *args, **kwargs): + Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs) + self.groups_xdata = None + self.clients_xdata = None + self.load_xdata() + + def Run(self): + """ run plugin """ + # only run this plugin if we were not given a list of files. + # not only is it marginally silly to run this plugin with a + # partial list of files, it turns out to be really freaking + # hard to get only a fragment of group or client metadata + if self.groups_xdata is not None: + self.duplicate_groups() + self.duplicate_defaults() + if self.clients_xdata is not None: + self.duplicate_clients() + + @classmethod + def Errors(cls): + return {"broken-xinclude-chain":"warning", + "duplicate-client":"error", + "duplicate-group":"error", + "duplicate-package":"error", + "multiple-default-groups":"error"} + + def load_xdata(self): + """ attempt to load XML data for groups and clients. only + actually load data if all documents reference in XIncludes can + be found in self.files""" + if self.has_all_xincludes("groups.xml"): + self.groups_xdata = self.metadata.clients_xml.xdata + if self.has_all_xincludes("clients.xml"): + self.clients_xdata = self.metadata.clients_xml.xdata + + def duplicate_groups(self): + """ find duplicate groups """ + self.duplicate_entries(self.clients_xdata.xpath('//Groups/Group'), + 'group') + + def duplicate_clients(self): + """ find duplicate clients """ + self.duplicate_entries(self.clients_xdata.xpath('//Clients/Client'), + 'client') + + def duplicate_entries(self, data, etype): + """ generic duplicate entry finder """ + seen = {} + for el in data: + if el.get('name') not in seen: + seen[el.get('name')] = el + else: + self.LintError("duplicate-%s" % etype, + "Duplicate %s '%s':\n%s\n%s" % + (etype, el.get('name'), + self.RenderXML(seen[el.get('name')]), + self.RenderXML(el))) + + def duplicate_defaults(self): + """ check for multiple default group definitions """ + default_groups = [g for g in self.groups_xdata.findall('.//Group') + if g.get('default') == 'true'] + if len(default_groups) > 1: + self.LintError("multiple-default-groups", + "Multiple default groups defined: %s" % + ",".join(default_groups)) + + def has_all_xincludes(self, mfile): + """ return true if self.files includes all XIncludes listed in + the specified metadata type, false otherwise""" + if self.files is None: + return True + else: + path = os.path.join(self.metadata.data, mfile) + if path in self.files: + xdata = lxml.etree.parse(path) + for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'): + if not self.has_all_xincludes(el.get('href')): + self.LintError("broken-xinclude-chain", + "Broken XInclude chain: could not include %s" % path) + return False + + return True diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py new file mode 100755 index 000000000..b6007161e --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/Genshi.py @@ -0,0 +1,32 @@ +import genshi.template +import Bcfg2.Server.Lint + +class Genshi(Bcfg2.Server.Lint.ServerPlugin): + """ Check Genshi templates for syntax errors """ + def Run(self): + """ run plugin """ + loader = genshi.template.TemplateLoader() + for plugin in ['Cfg', 'TGenshi']: + if plugin in self.core.plugins: + self.check_files(self.core.plugins[plugin].entries, + loader=loader) + + @classmethod + def Errors(cls): + return {"genshi-syntax-error":"error"} + + def check_files(self, entries, loader=None): + if loader is None: + loader = genshi.template.TemplateLoader() + + for eset in entries.values(): + for fname, sdata in list(eset.entries.items()): + if (self.HandlesFile(fname) and + (fname.endswith(".genshi") or fname.endswith(".newtxt"))): + try: + loader.load(sdata.name, + cls=genshi.template.NewTextTemplate) + except genshi.template.TemplateSyntaxError: + err = sys.exc_info()[1] + self.LintError("genshi-syntax-error", + "Genshi syntax error: %s" % err) diff --git a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py b/src/lib/Bcfg2/Server/Lint/GroupPatterns.py new file mode 100644 index 000000000..431ba4056 --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/GroupPatterns.py @@ -0,0 +1,35 @@ +import sys +import Bcfg2.Server.Lint +from Bcfg2.Server.Plugins.GroupPatterns import PatternMap + +class GroupPatterns(Bcfg2.Server.Lint.ServerPlugin): + """ Check Genshi templates for syntax errors """ + + def Run(self): + """ run plugin """ + if 'GroupPatterns' in self.core.plugins: + cfg = self.core.plugins['GroupPatterns'].config + for entry in cfg.xdata.xpath('//GroupPattern'): + groups = [g.text for g in entry.findall('Group')] + self.check(entry, groups, ptype='NamePattern') + self.check(entry, groups, ptype='NameRange') + + @classmethod + def Errors(cls): + return {"pattern-fails-to-initialize":"error"} + + def check(self, entry, groups, ptype="NamePattern"): + if ptype == "NamePattern": + pmap = lambda p: PatternMap(p, None, groups) + else: + pmap = lambda p: PatternMap(None, p, groups) + + for el in entry.findall(ptype): + pat = el.text + try: + pmap(pat) + except: + err = sys.exc_info()[1] + self.LintError("pattern-fails-to-initialize", + "Failed to initialize %s %s for %s: %s" % + (ptype, pat, entry.get('pattern'), err)) diff --git a/src/lib/Bcfg2/Server/Lint/InfoXML.py b/src/lib/Bcfg2/Server/Lint/InfoXML.py new file mode 100644 index 000000000..7b89e86b2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/InfoXML.py @@ -0,0 +1,48 @@ +import os.path +import Bcfg2.Options +import Bcfg2.Server.Lint + +class InfoXML(Bcfg2.Server.Lint.ServerPlugin): + """ ensure that all config files have an info.xml file""" + def Run(self): + if 'Cfg' in self.core.plugins: + for filename, entryset in self.core.plugins['Cfg'].entries.items(): + infoxml_fname = os.path.join(entryset.path, "info.xml") + if self.HandlesFile(infoxml_fname): + if (hasattr(entryset, "infoxml") and + entryset.infoxml is not None): + self.check_infoxml(infoxml_fname, + entryset.infoxml.pnode.data) + else: + self.LintError("no-infoxml", + "No info.xml found for %s" % filename) + + @classmethod + def Errors(cls): + return {"no-infoxml":"warning", + "paranoid-false":"warning", + "broken-xinclude-chain":"warning", + "required-infoxml-attrs-missing":"error"} + + def check_infoxml(self, fname, xdata): + for info in xdata.getroottree().findall("//Info"): + required = [] + if "required_attrs" in self.config: + required = self.config["required_attrs"].split(",") + + missing = [attr for attr in required if info.get(attr) is None] + if missing: + self.LintError("required-infoxml-attrs-missing", + "Required attribute(s) %s not found in %s:%s" % + (",".join(missing), fname, self.RenderXML(info))) + + if ((Bcfg2.Options.MDATA_PARANOID.value and + info.get("paranoid") is not None and + info.get("paranoid").lower() == "false") or + (not Bcfg2.Options.MDATA_PARANOID.value and + (info.get("paranoid") is None or + info.get("paranoid").lower() != "true"))): + self.LintError("paranoid-false", + "Paranoid must be true in %s:%s" % + (fname, self.RenderXML(info))) + diff --git a/src/lib/Bcfg2/Server/Lint/MergeFiles.py b/src/lib/Bcfg2/Server/Lint/MergeFiles.py new file mode 100644 index 000000000..797de6ed9 --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/MergeFiles.py @@ -0,0 +1,74 @@ +import os +import copy +from difflib import SequenceMatcher +import Bcfg2.Server.Lint + +class MergeFiles(Bcfg2.Server.Lint.ServerPlugin): + """ find Probes or Cfg files with multiple similar files that + might be merged into one """ + def Run(self): + if 'Cfg' in self.core.plugins: + self.check_cfg() + if 'Probes' in self.core.plugins: + self.check_probes() + + @classmethod + def Errors(cls): + return {"merge-cfg":"warning", + "merge-probes":"warning"} + + + def check_cfg(self): + for filename, entryset in self.core.plugins['Cfg'].entries.items(): + for mset in self.get_similar(entryset.entries): + self.LintError("merge-cfg", + "The following files are similar: %s. " + "Consider merging them into a single Genshi " + "template." % + ", ".join([os.path.join(filename, p) + for p in mset])) + + def check_probes(self): + probes = self.core.plugins['Probes'].probes.entries + for mset in self.get_similar(probes): + self.LintError("merge-probes", + "The following probes are similar: %s. " + "Consider merging them into a single probe." % + ", ".join([p for p in mset])) + + def get_similar(self, entries): + if "threshold" in self.config: + # accept threshold either as a percent (e.g., "threshold=75") or + # as a ratio (e.g., "threshold=.75") + threshold = float(self.config['threshold']) + if threshold > 1: + threshold /= 100 + else: + threshold = 0.75 + rv = [] + elist = entries.items() + while elist: + result = self._find_similar(elist.pop(0), copy.copy(elist), + threshold) + if len(result) > 1: + elist = [(fname, fdata) + for fname, fdata in elist + if fname not in result] + rv.append(result) + return rv + + def _find_similar(self, ftuple, others, threshold): + fname, fdata = ftuple + rv = [fname] + while others: + cname, cdata = others.pop(0) + sm = SequenceMatcher(None, fdata.data, cdata.data) + # perform progressively more expensive comparisons + if (sm.real_quick_ratio() > threshold and + sm.quick_ratio() > threshold and + sm.ratio() > threshold): + rv.extend(self._find_similar((cname, cdata), copy.copy(others), + threshold)) + return rv + + diff --git a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py new file mode 100644 index 000000000..ceb46238a --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py @@ -0,0 +1,38 @@ +import glob +import lxml.etree +import Bcfg2.Server.Lint + +class Pkgmgr(Bcfg2.Server.Lint.ServerlessPlugin): + """ find duplicate Pkgmgr entries with the same priority """ + def Run(self): + pset = set() + for pfile in glob.glob("%s/Pkgmgr/*.xml" % self.config['repo']): + if self.HandlesFile(pfile): + xdata = lxml.etree.parse(pfile).getroot() + # get priority, type, group + priority = xdata.get('priority') + ptype = xdata.get('type') + for pkg in xdata.xpath("//Package"): + if pkg.getparent().tag == 'Group': + grp = pkg.getparent().get('name') + if (type(grp) is not str and + grp.getparent().tag == 'Group'): + pgrp = grp.getparent().get('name') + else: + pgrp = 'none' + else: + grp = 'none' + pgrp = 'none' + ptuple = (pkg.get('name'), priority, ptype, grp, pgrp) + # check if package is already listed with same + # priority, type, grp + if ptuple in pset: + self.LintError("duplicate-package", + "Duplicate Package %s, priority:%s, type:%s" % + (pkg.get('name'), priority, ptype)) + else: + pset.add(ptuple) + + @classmethod + def Errors(cls): + return {"duplicate-packages":"error"} diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py new file mode 100644 index 000000000..6f76cf2db --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py @@ -0,0 +1,145 @@ +import os.path +import lxml.etree +import Bcfg2.Server.Lint +from Bcfg2.Server.Plugins.Packages import Apt, Yum + +class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): + """ verify attributes for configuration entries (as defined in + doc/server/configurationentries) """ + def __init__(self, *args, **kwargs): + Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs) + self.required_attrs = { + 'Path': { + 'device': ['name', 'owner', 'group', 'dev_type'], + 'directory': ['name', 'owner', 'group', 'perms'], + 'file': ['name', 'owner', 'group', 'perms', '__text__'], + 'hardlink': ['name', 'to'], + 'symlink': ['name', 'to'], + 'ignore': ['name'], + 'nonexistent': ['name'], + 'permissions': ['name', 'owner', 'group', 'perms'], + 'vcs': ['vcstype', 'revision', 'sourceurl']}, + 'Service': { + 'chkconfig': ['name'], + 'deb': ['name'], + 'rc-update': ['name'], + 'smf': ['name', 'FMRI'], + 'upstart': ['name']}, + 'Action': ['name', 'timing', 'when', 'status', 'command'], + 'Package': ['name']} + + def Run(self): + self.check_packages() + if "Defaults" in self.core.plugins: + self.logger.info("Defaults plugin enabled; skipping required " + "attribute checks") + else: + self.check_rules() + self.check_bundles() + + @classmethod + def Errors(cls): + return {"unknown-entry-type":"error", + "unknown-entry-tag":"error", + "required-attrs-missing":"error", + "extra-attrs":"warning"} + + + def check_packages(self): + """ check package sources for Source entries with missing attrs """ + if 'Packages' in self.core.plugins: + for source in self.core.plugins['Packages'].sources: + if isinstance(source, Yum.YumSource): + if (not source.pulp_id and not source.url and + not source.rawurl): + self.LintError("required-attrs-missing", + "A %s source must have either a url, " + "rawurl, or pulp_id attribute: %s" % + (source.ptype, + self.RenderXML(source.xsource))) + elif not source.url and not source.rawurl: + self.LintError("required-attrs-missing", + "A %s source must have either a url or " + "rawurl attribute: %s" % + (source.ptype, + self.RenderXML(source.xsource))) + + if (not isinstance(source, Apt.AptSource) and + source.recommended): + self.LintError("extra-attrs", + "The recommended attribute is not " + "supported on %s sources: %s" % + (source.ptype, + self.RenderXML(source.xsource))) + + def check_rules(self): + """ check Rules for Path entries with missing attrs """ + if 'Rules' in self.core.plugins: + for rules in self.core.plugins['Rules'].entries.values(): + xdata = rules.pnode.data + for path in xdata.xpath("//Path"): + self.check_entry(path, os.path.join(self.config['repo'], + rules.name)) + + def check_bundles(self): + """ check bundles for BoundPath entries with missing attrs """ + if 'Bundler' in self.core.plugins: + for bundle in self.core.plugins['Bundler'].entries.values(): + try: + xdata = lxml.etree.XML(bundle.data) + except (lxml.etree.XMLSyntaxError, AttributeError): + xdata = lxml.etree.parse(bundle.template.filepath).getroot() + + for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"): + self.check_entry(path, bundle.name) + + def check_entry(self, entry, filename): + """ generic entry check """ + if self.HandlesFile(filename): + name = entry.get('name') + tag = entry.tag + if tag.startswith("Bound"): + tag = tag[5:] + if tag not in self.required_attrs: + self.LintError("unknown-entry-tag", + "Unknown entry tag '%s': %s" % + (entry.tag, self.RenderXML(entry))) + + if isinstance(self.required_attrs[tag], dict): + etype = entry.get('type') + if etype in self.required_attrs[tag]: + required_attrs = set(self.required_attrs[tag][etype] + + ['type']) + else: + self.LintError("unknown-entry-type", + "Unknown %s type %s: %s" % + (tag, etype, self.RenderXML(entry))) + return + else: + required_attrs = set(self.required_attrs[tag]) + attrs = set(entry.attrib.keys()) + + if 'dev_type' in required_attrs: + dev_type = entry.get('dev_type') + if dev_type in ['block', 'char']: + # check if major/minor are specified + required_attrs |= set(['major', 'minor']) + + if '__text__' in required_attrs: + required_attrs.remove('__text__') + if (not entry.text and + not entry.get('empty', 'false').lower() == 'true'): + self.LintError("required-attrs-missing", + "Text missing for %s %s in %s: %s" % + (entry.tag, name, filename, + self.RenderXML(entry))) + + if not attrs.issuperset(required_attrs): + self.LintError("required-attrs-missing", + "The following required attribute(s) are " + "missing for %s %s in %s: %s\n%s" % + (entry.tag, name, filename, + ", ".join([attr + for attr in + required_attrs.difference(attrs)]), + self.RenderXML(entry))) diff --git a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py new file mode 100644 index 000000000..be270a59c --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py @@ -0,0 +1,64 @@ +import sys +import imp +import glob +import Bcfg2.Server.Lint +from Bcfg2.Server.Plugins.TemplateHelper import HelperModule + +class TemplateHelper(Bcfg2.Server.Lint.ServerlessPlugin): + """ find duplicate Pkgmgr entries with the same priority """ + def __init__(self, *args, **kwargs): + Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) + hm = HelperModule("foo.py", None, None) + self.reserved_keywords = dir(hm) + + def Run(self): + for helper in glob.glob("%s/TemplateHelper/*.py" % self.config['repo']): + if not self.HandlesFile(helper): + continue + + match = HelperModule._module_name_re.search(helper) + if match: + module_name = match.group(1) + else: + module_name = helper + + try: + module = imp.load_source(module_name, helper) + except: + err = sys.exc_info()[1] + self.LintError("templatehelper-import-error", + "Failed to import %s: %s" % + (helper, err)) + continue + + if not hasattr(module, "__export__"): + self.LintError("templatehelper-no-export", + "%s has no __export__ list" % helper) + continue + elif not isinstance(module.__export__, list): + self.LintError("templatehelper-nonlist-export", + "__export__ is not a list in %s" % helper) + continue + + for sym in module.__export__: + if not hasattr(module, sym): + self.LintError("templatehelper-nonexistent-export", + "%s: exported symbol %s does not exist" % + (helper, sym)) + elif sym in self.reserved_keywords: + self.LintError("templatehelper-reserved-export", + "%s: exported symbol %s is reserved" % + (helper, sym)) + elif sym.startswith("_"): + self.LintError("templatehelper-underscore-export", + "%s: exported symbol %s starts with underscore" % + (helper, sym)) + + @classmethod + def Errors(cls): + return {"templatehelper-import-error":"error", + "templatehelper-no-export":"error", + "templatehelper-nonlist-export":"error", + "templatehelper-nonexistent-export":"error", + "templatehelper-reserved-export":"error", + "templatehelper-underscore-export":"warning"} diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py new file mode 100644 index 000000000..05fedc313 --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/Validate.py @@ -0,0 +1,210 @@ +import fnmatch +import glob +import lxml.etree +import os +from subprocess import Popen, PIPE, STDOUT +import sys + +import Bcfg2.Server.Lint + +class Validate(Bcfg2.Server.Lint.ServerlessPlugin): + """ Ensure that the repo validates """ + + def __init__(self, *args, **kwargs): + Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) + self.filesets = {"metadata:groups":"%s/metadata.xsd", + "metadata:clients":"%s/clients.xsd", + "info":"%s/info.xsd", + "%s/Bundler/*.xml":"%s/bundle.xsd", + "%s/Bundler/*.genshi":"%s/bundle.xsd", + "%s/Pkgmgr/*.xml":"%s/pkglist.xsd", + "%s/Base/*.xml":"%s/base.xsd", + "%s/Rules/*.xml":"%s/rules.xsd", + "%s/Defaults/*.xml":"%s/defaults.xsd", + "%s/etc/report-configuration.xml":"%s/report-configuration.xsd", + "%s/Svcmgr/*.xml":"%s/services.xsd", + "%s/Deps/*.xml":"%s/deps.xsd", + "%s/Decisions/*.xml":"%s/decisions.xsd", + "%s/Packages/sources.xml":"%s/packages.xsd", + "%s/GroupPatterns/config.xml":"%s/grouppatterns.xsd", + "%s/NagiosGen/config.xml":"%s/nagiosgen.xsd", + "%s/FileProbes/config.xml":"%s/fileprobes.xsd", + } + + self.filelists = {} + self.get_filelists() + + def Run(self): + schemadir = self.config['schema'] + + for path, schemaname in self.filesets.items(): + try: + filelist = self.filelists[path] + except KeyError: + filelist = [] + + if filelist: + # avoid loading schemas for empty file lists + schemafile = schemaname % schemadir + try: + schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile)) + except IOError: + e = sys.exc_info()[1] + self.LintError("input-output-error", str(e)) + continue + except lxml.etree.XMLSchemaParseError: + e = sys.exc_info()[1] + self.LintError("schema-failed-to-parse", + "Failed to process schema %s: %s" % + (schemafile, e)) + continue + for filename in filelist: + self.validate(filename, schemafile, schema=schema) + + self.check_properties() + + @classmethod + def Errors(cls): + return {"broken-xinclude-chain":"warning", + "schema-failed-to-parse":"warning", + "properties-schema-not-found":"warning", + "xml-failed-to-parse":"error", + "xml-failed-to-read":"error", + "xml-failed-to-verify":"error", + "input-output-error":"error"} + + def check_properties(self): + """ check Properties files against their schemas """ + for filename in self.filelists['props']: + schemafile = "%s.xsd" % os.path.splitext(filename)[0] + if os.path.exists(schemafile): + self.validate(filename, schemafile) + else: + self.LintError("properties-schema-not-found", + "No schema found for %s" % filename) + + def validate(self, filename, schemafile, schema=None): + """validate a file against the given lxml.etree.Schema. + return True on success, False on failure """ + if schema is None: + # if no schema object was provided, instantiate one + try: + schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile)) + except: + self.LintError("schema-failed-to-parse", + "Failed to process schema %s" % schemafile) + return False + + try: + datafile = lxml.etree.parse(filename) + except SyntaxError: + lint = Popen(["xmllint", filename], stdout=PIPE, stderr=STDOUT) + self.LintError("xml-failed-to-parse", + "%s fails to parse:\n%s" % (filename, + lint.communicate()[0])) + lint.wait() + return False + except IOError: + self.LintError("xml-failed-to-read", + "Failed to open file %s" % filename) + return False + + if not schema.validate(datafile): + cmd = ["xmllint"] + if self.files is None: + cmd.append("--xinclude") + cmd.extend(["--noout", "--schema", schemafile, filename]) + lint = Popen(cmd, stdout=PIPE, stderr=STDOUT) + output = lint.communicate()[0] + if lint.wait(): + self.LintError("xml-failed-to-verify", + "%s fails to verify:\n%s" % (filename, output)) + return False + return True + + def get_filelists(self): + """ get lists of different kinds of files to validate """ + if self.files is not None: + listfiles = lambda p: fnmatch.filter(self.files, p % "*") + else: + listfiles = lambda p: glob.glob(p % self.config['repo']) + + for path in self.filesets.keys(): + if path.startswith("metadata:"): + mtype = path.split(":")[1] + self.filelists[path] = self.get_metadata_list(mtype) + elif path == "info": + if self.files is not None: + self.filelists[path] = \ + [f for f in self.files + if os.path.basename(f) == 'info.xml'] + else: # self.files is None + self.filelists[path] = [] + for infodir in ['Cfg', 'TGenshi', 'TCheetah']: + for root, dirs, files in os.walk('%s/%s' % + (self.config['repo'], + infodir)): + self.filelists[path].extend([os.path.join(root, f) + for f in files + if f == 'info.xml']) + else: + self.filelists[path] = listfiles(path) + + self.filelists['props'] = listfiles("%s/Properties/*.xml") + all_metadata = listfiles("%s/Metadata/*.xml") + + # if there are other files in Metadata that aren't xincluded + # from clients.xml or groups.xml, we can't verify them. warn + # about those. + for fname in all_metadata: + if (fname not in self.filelists['metadata:groups'] and + fname not in self.filelists['metadata:clients']): + self.LintError("broken-xinclude-chain", + "Broken XInclude chain: Could not determine file type of %s" % fname) + + def get_metadata_list(self, mtype): + """ get all metadata files for the specified type (clients or + group) """ + if self.files is not None: + rv = fnmatch.filter(self.files, "*/Metadata/%s.xml" % mtype) + else: + rv = glob.glob("%s/Metadata/%s.xml" % (self.config['repo'], mtype)) + + # attempt to follow XIncludes. if the top-level files aren't + # listed in self.files, though, there's really nothing we can + # do to guess what a file in Metadata is + if rv: + try: + rv.extend(self.follow_xinclude(rv[0])) + except lxml.etree.XMLSyntaxError: + e = sys.exc_info()[1] + self.LintError("xml-failed-to-parse", + "%s fails to parse:\n%s" % (rv[0], e)) + + + return rv + + def follow_xinclude(self, xfile): + """ follow xincludes in the given file """ + xdata = lxml.etree.parse(xfile) + included = set([ent.get('href') for ent in + xdata.findall('./{http://www.w3.org/2001/XInclude}include')]) + rv = [] + + while included: + try: + filename = included.pop() + except KeyError: + continue + + path = os.path.join(os.path.dirname(xfile), filename) + if self.HandlesFile(path): + rv.append(path) + groupdata = lxml.etree.parse(path) + [included.add(el.get('href')) + for el in + groupdata.findall('./{http://www.w3.org/2001/XInclude}include')] + included.discard(filename) + + return rv + diff --git a/src/lib/Bcfg2/Server/Lint/__init__.py b/src/lib/Bcfg2/Server/Lint/__init__.py new file mode 100644 index 000000000..5d7dd707b --- /dev/null +++ b/src/lib/Bcfg2/Server/Lint/__init__.py @@ -0,0 +1,192 @@ +__all__ = ['Bundles', + 'Comments', + 'Duplicates', + 'InfoXML', + 'MergeFiles', + 'Pkgmgr', + 'RequiredAttrs', + 'Validate', + 'Genshi', + 'Deltas'] + +import logging +import os +import sys +from copy import copy +import textwrap +import lxml.etree +import Bcfg2.Logger +import fcntl +import termios +import struct + +def _ioctl_GWINSZ(fd): + try: + cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) + except: + return None + return cr + +def get_termsize(): + """ get a tuple of (width, height) giving the size of the terminal """ + if not sys.stdout.isatty(): + return None + cr = _ioctl_GWINSZ(0) or _ioctl_GWINSZ(1) or _ioctl_GWINSZ(2) + if not cr: + try: + fd = os.open(os.ctermid(), os.O_RDONLY) + cr = _ioctl_GWINSZ(fd) + os.close(fd) + except: + pass + if not cr: + try: + cr = (os.environ['LINES'], os.environ['COLUMNS']) + except KeyError: + return None + return int(cr[1]), int(cr[0]) + +class Plugin (object): + """ base class for ServerlessPlugin and ServerPlugin """ + + def __init__(self, config, errorhandler=None, files=None): + self.files = files + self.config = config + self.logger = logging.getLogger('bcfg2-lint') + if errorhandler is None: + self.errorhandler = ErrorHandler() + else: + self.errorhandler = errorhandler + self.errorhandler.RegisterErrors(self.Errors()) + + def Run(self): + """ run the plugin. must be overloaded by child classes """ + pass + + @classmethod + def Errors(cls): + """ returns a dict of errors the plugin supplies. must be + overloaded by child classes """ + + def HandlesFile(self, fname): + """ returns true if the given file should be handled by the + plugin according to the files list, false otherwise """ + return (self.files is None or + fname in self.files or + os.path.join(self.config['repo'], fname) in self.files or + os.path.abspath(fname) in self.files or + os.path.abspath(os.path.join(self.config['repo'], + fname)) in self.files) + + def LintError(self, err, msg): + self.errorhandler.dispatch(err, msg) + + def RenderXML(self, element): + """render an XML element for error output -- line number + prefixed, no children""" + xml = None + if len(element) or element.text: + el = copy(element) + if el.text: + el.text = '...' + [el.remove(c) for c in el.iterchildren()] + xml = lxml.etree.tostring(el).strip() + else: + xml = lxml.etree.tostring(element).strip() + return " line %s: %s" % (element.sourceline, xml) + + +class ErrorHandler (object): + def __init__(self, config=None): + self.errors = 0 + self.warnings = 0 + + self.logger = logging.getLogger('bcfg2-lint') + + termsize = get_termsize() + if termsize is not None: + twrap = textwrap.TextWrapper(initial_indent=" ", + subsequent_indent=" ", + width=termsize[0]) + self._wrapper = twrap.wrap + else: + self._wrapper = lambda s: [s] + + self._handlers = {} + if config is not None: + for err, action in config.items(): + if "warn" in action: + self._handlers[err] = self.warn + elif "err" in action: + self._handlers[err] = self.error + else: + self._handlers[err] = self.debug + + def RegisterErrors(self, errors): + for err, action in errors.items(): + if err not in self._handlers: + if "warn" in action: + self._handlers[err] = self.warn + elif "err" in action: + self._handlers[err] = self.error + else: + self._handlers[err] = self.debug + + def dispatch(self, err, msg): + if err in self._handlers: + self._handlers[err](msg) + self.logger.debug(" (%s)" % err) + else: + # assume that it's an error, but complain + self.error(msg) + self.logger.warning("Unknown error %s" % err) + + def error(self, msg): + """ log an error condition """ + self.errors += 1 + self._log(msg, self.logger.error, prefix="ERROR: ") + + def warn(self, msg): + """ log a warning condition """ + self.warnings += 1 + self._log(msg, self.logger.warning, prefix="WARNING: ") + + def debug(self, msg): + """ log a silent/debug condition """ + self._log(msg, self.logger.debug) + + def _log(self, msg, logfunc, prefix=""): + # a message may itself consist of multiple lines. wrap() will + # elide them all into a single paragraph, which we don't want. + # so we split the message into its paragraphs and wrap each + # paragraph individually. this means, unfortunately, that we + # lose textwrap's built-in initial indent functionality, + # because we want to only treat the very first line of the + # first paragraph specially. so we do some silliness. + rawlines = msg.splitlines() + firstline = True + for rawline in rawlines: + lines = self._wrapper(rawline) + for line in lines: + if firstline: + logfunc(prefix + line.lstrip()) + firstline = False + else: + logfunc(line) + + +class ServerlessPlugin (Plugin): + """ base class for plugins that are run before the server starts + up (i.e., plugins that check things that may prevent the server + from starting up) """ + pass + + +class ServerPlugin (Plugin): + """ base class for plugins that check things that require the + running Bcfg2 server """ + def __init__(self, lintCore, config, **kwargs): + Plugin.__init__(self, config, **kwargs) + self.core = lintCore + self.logger = self.core.logger + self.metadata = self.core.metadata diff --git a/src/lib/Bcfg2/Server/Plugin.py b/src/lib/Bcfg2/Server/Plugin.py new file mode 100644 index 000000000..41314bbea --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugin.py @@ -0,0 +1,1255 @@ +"""This module provides the baseclass for Bcfg2 Server Plugins.""" + +import copy +import logging +import lxml.etree +import os +import pickle +import posixpath +import re +import sys +import threading +from Bcfg2.Bcfg2Py3k import ConfigParser + +from lxml.etree import XML, XMLSyntaxError + +import Bcfg2.Options + +# py3k compatibility +if sys.hexversion >= 0x03000000: + from functools import reduce + from io import FileIO as BUILTIN_FILE_TYPE +else: + BUILTIN_FILE_TYPE = file +from Bcfg2.Bcfg2Py3k import Queue +from Bcfg2.Bcfg2Py3k import Empty +from Bcfg2.Bcfg2Py3k import Full + +# grab default metadata info from bcfg2.conf +opts = {'owner': Bcfg2.Options.MDATA_OWNER, + 'group': Bcfg2.Options.MDATA_GROUP, + 'important': Bcfg2.Options.MDATA_IMPORTANT, + 'perms': Bcfg2.Options.MDATA_PERMS, + 'paranoid': Bcfg2.Options.MDATA_PARANOID, + 'sensitive': Bcfg2.Options.MDATA_SENSITIVE} +mdata_setup = Bcfg2.Options.OptionParser(opts) +mdata_setup.parse([]) +del mdata_setup['args'] + +logger = logging.getLogger('Bcfg2.Server.Plugin') + +default_file_metadata = mdata_setup + +info_regex = re.compile( \ + 'encoding:(\s)*(?P<encoding>\w+)|' + + 'group:(\s)*(?P<group>\S+)|' + + 'important:(\s)*(?P<important>\S+)|' + + 'mtime:(\s)*(?P<mtime>\w+)|' + + 'owner:(\s)*(?P<owner>\S+)|' + + 'paranoid:(\s)*(?P<paranoid>\S+)|' + + 'perms:(\s)*(?P<perms>\w+)|' + + 'sensitive:(\s)*(?P<sensitive>\S+)|') + + +class PluginInitError(Exception): + """Error raised in cases of Plugin initialization errors.""" + pass + + +class PluginExecutionError(Exception): + """Error raised in case of Plugin execution errors.""" + pass + + +class Debuggable(object): + __rmi__ = ['toggle_debug'] + + def __init__(self, name=None): + if name is None: + name = "%s.%s" % (self.__class__.__module__, + self.__class__.__name__) + self.debug_flag = False + self.logger = logging.getLogger(name) + + def toggle_debug(self): + self.debug_flag = not self.debug_flag + + def debug_log(self, message, flag=None): + if (flag is None and self.debug_flag) or flag: + self.logger.error(message) + + +class Plugin(Debuggable): + """This is the base class for all Bcfg2 Server plugins. + Several attributes must be defined in the subclass: + name : the name of the plugin + __author__ : the author/contact for the plugin + + Plugins can provide three basic types of functionality: + - Structure creation (overloading BuildStructures) + - Configuration entry binding (overloading HandlesEntry, or loads the Entries table) + - Data collection (overloading GetProbes/ReceiveData) + """ + name = 'Plugin' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = False + deprecated = False + conflicts = [] + + # Default sort_order to 500. Plugins of the same type are + # processed in order of ascending sort_order value. Plugins with + # the same sort_order are sorted alphabetically by their name. + sort_order = 500 + + def __init__(self, core, datastore): + """Initialize the plugin. + + :param core: the Bcfg2.Server.Core initializing the plugin + :param datastore: the filesystem path of Bcfg2's repository + """ + object.__init__(self) + self.Entries = {} + self.core = core + self.data = os.path.join(datastore, self.name) + self.running = True + Debuggable.__init__(self, name=self.name) + + @classmethod + def init_repo(cls, repo): + path = "%s/%s" % (repo, cls.name) + os.makedirs(path) + + def shutdown(self): + self.running = False + + +class Generator(object): + """Generator plugins contribute to literal client configurations.""" + def HandlesEntry(self, entry, metadata): + """This is the slow path method for routing configuration binding requests.""" + return False + + def HandleEntry(self, entry, metadata): + """This is the slow-path handler for configuration entry binding.""" + raise PluginExecutionError + + +class Structure(object): + """Structure Plugins contribute to abstract client configurations.""" + def BuildStructures(self, metadata): + """Return a list of abstract goal structures for client.""" + raise PluginExecutionError + + +class Metadata(object): + """Signal metadata capabilities for this plugin""" + def add_client(self, client_name, attribs): + """Add client.""" + pass + + def remove_client(self, client_name): + """Remove client.""" + pass + + def viz(self, hosts, bundles, key, colors): + """Create viz str for viz admin mode.""" + pass + + def get_initial_metadata(self, client_name): + raise PluginExecutionError + + def merge_additional_data(self, imd, source, groups, data): + raise PluginExecutionError + + +class Connector(object): + """Connector Plugins augment client metadata instances.""" + def get_additional_groups(self, metadata): + """Determine additional groups for metadata.""" + return list() + + def get_additional_data(self, metadata): + """Determine additional data for metadata instances.""" + return dict() + + +class Probing(object): + """Signal probe capability for this plugin.""" + def GetProbes(self, _): + """Return a set of probes for execution on client.""" + return [] + + def ReceiveData(self, _, dummy): + """Receive probe results pertaining to client.""" + pass + + +class Statistics(object): + """Signal statistics handling capability.""" + def process_statistics(self, client, xdata): + pass + + +class ThreadedStatistics(Statistics, + threading.Thread): + """Threaded statistics handling capability.""" + def __init__(self, core, datastore): + Statistics.__init__(self) + threading.Thread.__init__(self) + # Event from the core signaling an exit + self.terminate = core.terminate + self.work_queue = Queue(100000) + self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__) + self.daemon = True + self.start() + + def save(self): + """Save any pending data to a file.""" + pending_data = [] + try: + while not self.work_queue.empty(): + (metadata, data) = self.work_queue.get_nowait() + try: + pending_data.append((metadata.hostname, lxml.etree.tostring(data))) + except: + self.logger.warning("Dropping interaction for %s" % metadata.hostname) + except Empty: + pass + + try: + savefile = open(self.pending_file, 'w') + pickle.dump(pending_data, savefile) + savefile.close() + self.logger.info("Saved pending %s data" % self.__class__.__name__) + except: + self.logger.warning("Failed to save pending data") + + def load(self): + """Load any pending data to a file.""" + if not os.path.exists(self.pending_file): + return True + pending_data = [] + try: + savefile = open(self.pending_file, 'r') + pending_data = pickle.load(savefile) + savefile.close() + except Exception: + e = sys.exc_info()[1] + self.logger.warning("Failed to load pending data: %s" % e) + for (pmetadata, pdata) in pending_data: + # check that shutdown wasnt called early + if self.terminate.isSet(): + return False + + try: + while True: + try: + metadata = self.core.build_metadata(pmetadata) + break + except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError: + pass + + self.terminate.wait(5) + if self.terminate.isSet(): + return False + + self.work_queue.put_nowait((metadata, lxml.etree.fromstring(pdata))) + except Full: + self.logger.warning("Queue.Full: Failed to load queue data") + break + except lxml.etree.LxmlError: + lxml_error = sys.exc_info()[1] + self.logger.error("Unable to load save interaction: %s" % lxml_error) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata) + try: + os.unlink(self.pending_file) + except: + self.logger.error("Failed to unlink save file: %s" % self.pending_file) + self.logger.info("Loaded pending %s data" % self.__class__.__name__) + return True + + def run(self): + if not self.load(): + return + while not self.terminate.isSet(): + try: + (xdata, client) = self.work_queue.get(block=True, timeout=2) + except Empty: + continue + except Exception: + e = sys.exc_info()[1] + self.logger.error("ThreadedStatistics: %s" % e) + continue + self.handle_statistic(xdata, client) + if not self.work_queue.empty(): + self.save() + + def process_statistics(self, metadata, data): + warned = False + try: + self.work_queue.put_nowait((metadata, copy.copy(data))) + warned = False + except Full: + if not warned: + self.logger.warning("%s: Queue is full. Dropping interactions." % self.__class__.__name__) + warned = True + + def handle_statistics(self, metadata, data): + """Handle stats here.""" + pass + + +class PullSource(object): + def GetExtra(self, client): + return [] + + def GetCurrentEntry(self, client, e_type, e_name): + raise PluginExecutionError + + +class PullTarget(object): + def AcceptChoices(self, entry, metadata): + raise PluginExecutionError + + def AcceptPullData(self, specific, new_entry, verbose): + """This is the null per-plugin implementation + of bcfg2-admin pull.""" + raise PluginExecutionError + + +class Decision(object): + """Signal decision handling capability.""" + def GetDecisions(self, metadata, mode): + return [] + + +class ValidationError(Exception): + pass + + +class StructureValidator(object): + """Validate/modify goal structures.""" + def validate_structures(self, metadata, structures): + raise ValidationError("not implemented") + + +class GoalValidator(object): + """Validate/modify configuration goals.""" + def validate_goals(self, metadata, goals): + raise ValidationError("not implemented") + + +class Version(object): + """Interact with various version control systems.""" + def get_revision(self): + return [] + + def commit_data(self, file_list, comment=None): + pass + + +# the rest of the file contains classes for coherent file caching + +class FileBacked(object): + """This object caches file data in memory. + HandleEvent is called whenever fam registers an event. + Index can parse the data into member data as required. + This object is meant to be used as a part of DirectoryBacked. + """ + + def __init__(self, name): + object.__init__(self) + self.data = '' + self.name = name + + def HandleEvent(self, event=None): + """Read file upon update.""" + if event and event.code2str() not in ['exists', 'changed', 'created']: + return + try: + self.data = BUILTIN_FILE_TYPE(self.name).read() + self.Index() + except IOError: + err = sys.exc_info()[1] + logger.error("Failed to read file %s: %s" % (self.name, err)) + + def Index(self): + """Update local data structures based on current file state""" + pass + + def __repr__(self): + return "%s: %s" % (self.__class__.__name__, str(self)) + + def __str__(self): + return "%s: %s" % (self.name, self.data) + + +class DirectoryBacked(object): + """This object is a coherent cache for a filesystem hierarchy of files.""" + __child__ = FileBacked + patterns = re.compile('.*') + + def __init__(self, data, fam): + """Initialize the DirectoryBacked object. + + :param self: the object being initialized. + :param data: the path to the data directory that will be + monitored. + :param fam: The FileMonitor object used to receive + notifications of changes. + """ + object.__init__(self) + + self.data = os.path.normpath(data) + self.fam = fam + + # self.entries contains information about the files monitored + # by this object.... The keys of the dict are the relative + # paths to the files. The values are the objects (of type + # __child__) that handle their contents. + self.entries = {} + + # self.handles contains information about the directories + # monitored by this object. The keys of the dict are the + # values returned by the initial fam.AddMonitor() call (which + # appear to be integers). The values are the relative paths of + # the directories. + self.handles = {} + + # Monitor everything in the plugin's directory + self.add_directory_monitor('') + + def __getitem__(self, key): + return self.entries[key] + + def __iter__(self): + return iter(list(self.entries.items())) + + def add_directory_monitor(self, relative): + """Add a new directory to FAM structures for monitoring. + + :param relative: Path name to monitor. This must be relative + to the plugin's directory. An empty string value ("") will + cause the plugin directory itself to be monitored. + """ + dirpathname = os.path.join(self.data, relative) + if relative not in self.handles.values(): + if not posixpath.isdir(dirpathname): + logger.error("Failed to open directory %s" % (dirpathname)) + return + reqid = self.fam.AddMonitor(dirpathname, self) + self.handles[reqid] = relative + + def add_entry(self, relative, event): + """Add a new file to our structures for monitoring. + + :param relative: Path name to monitor. This must be relative + to the plugin's directory. + :param event: File Monitor event that caused this entry to be + added. + """ + self.entries[relative] = self.__child__(os.path.join(self.data, + relative)) + self.entries[relative].HandleEvent(event) + + def HandleEvent(self, event): + """Handle FAM/Gamin events. + + This method is invoked by FAM/Gamin when it detects a change + to a filesystem object we have requsted to be monitored. + + This method manages the lifecycle of events related to the + monitored objects, adding them to our indiciess and creating + objects of type __child__ that actually do the domain-specific + processing. When appropriate, it propogates events those + objects by invoking their HandleEvent in turn. + """ + action = event.code2str() + + # Clean up the absolute path names passed in + event.filename = os.path.normpath(event.filename) + if event.filename.startswith(self.data): + event.filename = event.filename[len(self.data)+1:] + + # Exclude events for actions we don't care about + if action == 'endExist': + return + + if event.requestID not in self.handles: + logger.warn("Got %s event with unknown handle (%s) for %s" + % (action, event.requestID, abspath)) + return + + # Calculate the absolute and relative paths this event refers to + abspath = os.path.join(self.data, self.handles[event.requestID], + event.filename) + relpath = os.path.join(self.handles[event.requestID], event.filename) + + if action == 'deleted': + for key in self.entries.keys(): + if key.startswith(relpath): + del self.entries[key] + # We remove values from self.entries, but not + # self.handles, because the FileMonitor doesn't stop + # watching a directory just because it gets deleted. If it + # is recreated, we will start getting notifications for it + # again without having to add a new monitor. + elif posixpath.isdir(abspath): + # Deal with events for directories + if action in ['exists', 'created']: + self.add_directory_monitor(relpath) + elif action == 'changed': + if relpath in self.entries: + # Ownerships, permissions or timestamps changed on + # the directory. None of these should affect the + # contents of the files, though it could change + # our ability to access them. + # + # It seems like the right thing to do is to cancel + # monitoring the directory and then begin + # monitoring it again. But the current FileMonitor + # class doesn't support canceling, so at least let + # the user know that a restart might be a good + # idea. + logger.warn("Directory properties for %s changed, please " + + " consider restarting the server" % (abspath)) + else: + # Got a "changed" event for a directory that we + # didn't know about. Go ahead and treat it like a + # "created" event, but log a warning, because this + # is unexpected. + logger.warn("Got %s event for unexpected dir %s" % (action, + abspath)) + self.add_directory_monitor(relpath) + else: + logger.warn("Got unknown dir event %s %s %s" % (event.requestID, + event.code2str(), + abspath)) + else: + # Deal with events for non-directories + if ((event.filename[-1] == '~') or + (event.filename[:2] == '.#') or + (event.filename[-4:] == '.swp') or + (event.filename in ['SCCS', '.svn', '4913']) or + (not self.patterns.match(event.filename))): + return + if action in ['exists', 'created']: + self.add_entry(relpath, event) + elif action == 'changed': + if relpath in self.entries: + self.entries[relpath].HandleEvent(event) + else: + # Got a "changed" event for a file that we didn't + # know about. Go ahead and treat it like a + # "created" event, but log a warning, because this + # is unexpected. + logger.warn("Got %s event for unexpected file %s" % (action, + abspath)) + self.add_entry(relpath, event) + else: + logger.warn("Got unknown file event %s %s %s" % (event.requestID, + event.code2str(), + abspath)) + + +class XMLFileBacked(FileBacked): + """ + This object is a coherent cache for an XML file to be used as a + part of DirectoryBacked. + """ + __identifier__ = 'name' + + def __init__(self, filename): + self.label = "dummy" + self.entries = [] + FileBacked.__init__(self, filename) + + def Index(self): + """Build local data structures.""" + try: + self.xdata = XML(self.data) + except XMLSyntaxError: + logger.error("Failed to parse %s" % (self.name)) + return + self.entries = self.xdata.getchildren() + if self.__identifier__ is not None: + self.label = self.xdata.attrib[self.__identifier__] + + def __iter__(self): + return iter(self.entries) + + def __str__(self): + return "%s: %s" % (self.name, lxml.etree.tostring(self.xdata)) + + +class SingleXMLFileBacked(XMLFileBacked): + """This object is a coherent cache for an independent XML file.""" + def __init__(self, filename, fam): + XMLFileBacked.__init__(self, filename) + self.extras = [] + self.fam = fam + self.fam.AddMonitor(filename, self) + + def _follow_xincludes(self, fname=None, xdata=None): + ''' follow xincludes, adding included files to fam and to + self.extras ''' + if xdata is None: + if fname is None: + xdata = self.xdata.getroottree() + else: + xdata = lxml.etree.parse(fname) + included = [ent.get('href') + for ent in xdata.findall('//{http://www.w3.org/2001/XInclude}include')] + for name in included: + if name not in self.extras: + if name.startswith("/"): + fpath = name + else: + fpath = os.path.join(os.path.dirname(self.name), name) + self.add_monitor(fpath, name) + self._follow_xincludes(fname=fpath) + + def add_monitor(self, fpath, fname): + self.fam.AddMonitor(fpath, self) + self.extras.append(fname) + + def Index(self): + """Build local data structures.""" + try: + self.xdata = lxml.etree.XML(self.data, base_url=self.name) + except lxml.etree.XMLSyntaxError: + err = sys.exc_info()[1] + logger.error("Failed to parse %s: %s" % (self.name, err)) + raise Bcfg2.Server.Plugin.PluginInitError + + self._follow_xincludes() + if self.extras: + try: + self.xdata.getroottree().xinclude() + except lxml.etree.XIncludeError: + err = sys.exc_info()[1] + logger.error("XInclude failed on %s: %s" % (self.name, err)) + + self.entries = self.xdata.getchildren() + if self.__identifier__ is not None: + self.label = self.xdata.attrib[self.__identifier__] + + +class StructFile(XMLFileBacked): + """This file contains a set of structure file formatting logic.""" + __identifier__ = None + + def __init__(self, name): + XMLFileBacked.__init__(self, name) + + def _match(self, item, metadata): + """ recursive helper for Match() """ + if isinstance(item, lxml.etree._Comment): + return [] + elif item.tag == 'Group': + rv = [] + if ((item.get('negate', 'false').lower() == 'true' and + item.get('name') not in metadata.groups) or + (item.get('negate', 'false').lower() == 'false' and + item.get('name') in metadata.groups)): + for child in item.iterchildren(): + rv.extend(self._match(child, metadata)) + return rv + elif item.tag == 'Client': + rv = [] + if ((item.get('negate', 'false').lower() == 'true' and + item.get('name') != metadata.hostname) or + (item.get('negate', 'false').lower() == 'false' and + item.get('name') == metadata.hostname)): + for child in item.iterchildren(): + rv.extend(self._match(child, metadata)) + return rv + else: + rv = copy.copy(item) + for child in rv.iterchildren(): + rv.remove(child) + for child in item.iterchildren(): + rv.extend(self._match(child, metadata)) + return [rv] + + def Match(self, metadata): + """Return matching fragments of independent.""" + rv = [] + for child in self.entries: + rv.extend(self._match(child, metadata)) + return rv + + +class INode: + """ + LNodes provide lists of things available at a particular + group intersection. + """ + raw = {'Client': "lambda m, e:'%(name)s' == m.hostname and predicate(m, e)", + 'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"} + nraw = {'Client': "lambda m, e:'%(name)s' != m.hostname and predicate(m, e)", + 'Group': "lambda m, e:'%(name)s' not in m.groups and predicate(m, e)"} + containers = ['Group', 'Client'] + ignore = [] + + def __init__(self, data, idict, parent=None): + self.data = data + self.contents = {} + if parent == None: + self.predicate = lambda m, d: True + else: + predicate = parent.predicate + if data.get('negate', 'false') in ['true', 'True']: + psrc = self.nraw + else: + psrc = self.raw + if data.tag in list(psrc.keys()): + self.predicate = eval(psrc[data.tag] % + {'name': data.get('name')}, + {'predicate': predicate}) + else: + raise Exception + mytype = self.__class__ + self.children = [] + for item in data.getchildren(): + if item.tag in self.ignore: + continue + elif item.tag in self.containers: + self.children.append(mytype(item, idict, self)) + else: + try: + self.contents[item.tag][item.get('name')] = item.attrib + except KeyError: + self.contents[item.tag] = {item.get('name'): item.attrib} + if item.text: + self.contents[item.tag]['__text__'] = item.text + try: + idict[item.tag].append(item.get('name')) + except KeyError: + idict[item.tag] = [item.get('name')] + + def Match(self, metadata, data, entry=lxml.etree.Element("None")): + """Return a dictionary of package mappings.""" + if self.predicate(metadata, entry): + for key in self.contents: + try: + data[key].update(self.contents[key]) + except: + data[key] = {} + data[key].update(self.contents[key]) + for child in self.children: + child.Match(metadata, data, entry=entry) + + +class InfoNode (INode): + """ INode implementation that includes <Path> tags """ + raw = {'Client': "lambda m, e:'%(name)s' == m.hostname and predicate(m, e)", + 'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)", + 'Path': "lambda m, e:('%(name)s' == e.get('name') or '%(name)s' == e.get('realname')) and predicate(m, e)"} + nraw = {'Client': "lambda m, e:'%(name)s' != m.hostname and predicate(m, e)", + 'Group': "lambda m, e:'%(name)s' not in m.groups and predicate(m, e)", + 'Path': "lambda m, e:('%(name)s' != e.get('name') and '%(name)s' != e.get('realname')) and predicate(m, e)"} + containers = ['Group', 'Client', 'Path'] + + +class XMLSrc(XMLFileBacked): + """XMLSrc files contain a LNode hierarchy that returns matching entries.""" + __node__ = INode + __cacheobj__ = dict + + def __init__(self, filename, noprio=False): + XMLFileBacked.__init__(self, filename) + self.items = {} + self.cache = None + self.pnode = None + self.priority = -1 + self.noprio = noprio + + def HandleEvent(self, _=None): + """Read file upon update.""" + try: + data = BUILTIN_FILE_TYPE(self.name).read() + except IOError: + logger.error("Failed to read file %s" % (self.name)) + return + self.items = {} + try: + xdata = lxml.etree.XML(data) + except lxml.etree.XMLSyntaxError: + logger.error("Failed to parse file %s" % (self.name)) + return + self.pnode = self.__node__(xdata, self.items) + self.cache = None + try: + self.priority = int(xdata.get('priority')) + except (ValueError, TypeError): + if not self.noprio: + logger.error("Got bogus priority %s for file %s" % + (xdata.get('priority'), self.name)) + del xdata, data + + def Cache(self, metadata): + """Build a package dict for a given host.""" + if self.cache == None or self.cache[0] != metadata: + cache = (metadata, self.__cacheobj__()) + if self.pnode == None: + logger.error("Cache method called early for %s; forcing data load" % (self.name)) + self.HandleEvent() + return + self.pnode.Match(metadata, cache[1]) + self.cache = cache + + def __str__(self): + return str(self.items) + + +class InfoXML (XMLSrc): + __node__ = InfoNode + + +class XMLDirectoryBacked(DirectoryBacked): + """Directorybacked for *.xml.""" + patterns = re.compile('.*\.xml') + + +class PrioDir(Plugin, Generator, XMLDirectoryBacked): + """This is a generator that handles package assignments.""" + name = 'PrioDir' + __child__ = XMLSrc + + def __init__(self, core, datastore): + Plugin.__init__(self, core, datastore) + Generator.__init__(self) + try: + XMLDirectoryBacked.__init__(self, self.data, self.core.fam) + except OSError: + self.logger.error("Failed to load %s indices" % (self.name)) + raise PluginInitError + + def HandleEvent(self, event): + """Handle events and update dispatch table.""" + XMLDirectoryBacked.HandleEvent(self, event) + self.Entries = {} + for src in list(self.entries.values()): + for itype, children in list(src.items.items()): + for child in children: + try: + self.Entries[itype][child] = self.BindEntry + except KeyError: + self.Entries[itype] = {child: self.BindEntry} + + def _matches(self, entry, metadata, rules): + return entry.get('name') in rules + + def BindEntry(self, entry, metadata): + attrs = self.get_attrs(entry, metadata) + for key, val in list(attrs.items()): + entry.attrib[key] = val + + def get_attrs(self, entry, metadata): + """ get a list of attributes to add to the entry during the bind """ + for src in self.entries.values(): + src.Cache(metadata) + + matching = [src for src in list(self.entries.values()) + if (src.cache and + entry.tag in src.cache[1] and + self._matches(entry, metadata, + src.cache[1][entry.tag]))] + if len(matching) == 0: + raise PluginExecutionError('No matching source for entry when retrieving attributes for %s(%s)' % (entry.tag, entry.attrib.get('name'))) + elif len(matching) == 1: + index = 0 + else: + prio = [int(src.priority) for src in matching] + if prio.count(max(prio)) > 1: + self.logger.error("Found conflicting sources with " + "same priority for %s, %s %s" % + (metadata.hostname, + entry.tag.lower(), entry.get('name'))) + self.logger.error([item.name for item in matching]) + self.logger.error("Priority was %s" % max(prio)) + raise PluginExecutionError + index = prio.index(max(prio)) + + for rname in list(matching[index].cache[1][entry.tag].keys()): + if self._matches(entry, metadata, [rname]): + data = matching[index].cache[1][entry.tag][rname] + break + if '__text__' in data: + entry.text = data['__text__'] + if '__children__' in data: + [entry.append(copy.copy(item)) for item in data['__children__']] + + return dict([(key, data[key]) + for key in list(data.keys()) + if not key.startswith('__')]) + + +# new unified EntrySet backend +class SpecificityError(Exception): + """Thrown in case of filename parse failure.""" + pass + + +class Specificity: + + def __init__(self, all=False, group=False, hostname=False, prio=0, delta=False): + self.hostname = hostname + self.all = all + self.group = group + self.prio = prio + self.delta = delta + + def __lt__(self, other): + return self.__cmp__(other) < 0 + + def matches(self, metadata): + return self.all or \ + self.hostname == metadata.hostname or \ + self.group in metadata.groups + + def __cmp__(self, other): + """Sort most to least specific.""" + if self.all: + return 1 + if self.group: + if other.hostname: + return 1 + if other.group and other.prio > self.prio: + return 1 + if other.group and other.prio == self.prio: + return 0 + return -1 + + def more_specific(self, other): + """Test if self is more specific than other.""" + if self.all: + True + elif self.group: + if other.hostname: + return True + elif other.group and other.prio > self.prio: + return True + return False + + +class SpecificData(object): + def __init__(self, name, specific, encoding): + self.name = name + self.specific = specific + + def handle_event(self, event): + if event.code2str() == 'deleted': + return + try: + self.data = open(self.name).read() + except: + logger.error("Failed to read file %s" % self.name) + + +class EntrySet(object): + """Entry sets deal with the host- and group-specific entries.""" + ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\\.genshi_include)$") + + def __init__(self, basename, path, entry_type, encoding): + self.path = path + self.entry_type = entry_type + self.entries = {} + self.metadata = default_file_metadata.copy() + self.infoxml = None + self.encoding = encoding + pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % basename + pattern += '(G(?P<prio>\d+)_(?P<group>\S+))))?$' + self.specific = re.compile(pattern) + + def get_matching(self, metadata): + return [item for item in list(self.entries.values()) + if item.specific.matches(metadata)] + + def best_matching(self, metadata): + """ Return the appropriate interpreted template from the set of + available templates. """ + matching = self.get_matching(metadata) + + hspec = [ent for ent in matching if ent.specific.hostname] + if hspec: + return hspec[0] + + gspec = [ent for ent in matching if ent.specific.group] + if gspec: + gspec.sort(self.group_sortfunc) + return gspec[-1] + + aspec = [ent for ent in matching if ent.specific.all] + if aspec: + return aspec[0] + + raise PluginExecutionError + + def handle_event(self, event): + """Handle FAM events for the TemplateSet.""" + action = event.code2str() + + if event.filename in ['info', 'info.xml', ':info']: + if action in ['exists', 'created', 'changed']: + self.update_metadata(event) + elif action == 'deleted': + self.reset_metadata(event) + return + + if action in ['exists', 'created']: + self.entry_init(event) + else: + if event.filename not in self.entries: + logger.warning("Got %s event for unknown file %s" % + (action, event.filename)) + if action == 'changed': + # received a bogus changed event; warn, but treat + # it like a created event + self.entry_init(event) + return + if action == 'changed': + self.entries[event.filename].handle_event(event) + elif action == 'deleted': + del self.entries[event.filename] + + def entry_init(self, event): + """Handle template and info file creation.""" + if event.filename in self.entries: + logger.warn("Got duplicate add for %s" % event.filename) + else: + fpath = "%s/%s" % (self.path, event.filename) + try: + spec = self.specificity_from_filename(event.filename) + except SpecificityError: + if not self.ignore.match(event.filename): + logger.error("Could not process filename %s; ignoring" % + fpath) + return + self.entries[event.filename] = self.entry_type(fpath, + spec, self.encoding) + self.entries[event.filename].handle_event(event) + + def specificity_from_filename(self, fname): + """Construct a specificity instance from a filename and regex.""" + data = self.specific.match(fname) + if not data: + raise SpecificityError(fname) + kwargs = {} + if data.group('hostname'): + kwargs['hostname'] = data.group('hostname') + elif data.group('group'): + kwargs['group'] = data.group('group') + kwargs['prio'] = int(data.group('prio')) + else: + kwargs['all'] = True + if 'delta' in data.groupdict(): + kwargs['delta'] = data.group('delta') + return Specificity(**kwargs) + + def update_metadata(self, event): + """Process info and info.xml files for the templates.""" + fpath = "%s/%s" % (self.path, event.filename) + if event.filename == 'info.xml': + if not self.infoxml: + self.infoxml = InfoXML(fpath, True) + self.infoxml.HandleEvent(event) + elif event.filename in [':info', 'info']: + for line in open(fpath).readlines(): + match = info_regex.match(line) + if not match: + logger.warning("Failed to match line in %s: %s" % (fpath, + line)) + continue + else: + mgd = match.groupdict() + for key, value in list(mgd.items()): + if value: + self.metadata[key] = value + if len(self.metadata['perms']) == 3: + self.metadata['perms'] = "0%s" % \ + (self.metadata['perms']) + + def reset_metadata(self, event): + """Reset metadata to defaults if info or info.xml removed.""" + if event.filename == 'info.xml': + self.infoxml = None + elif event.filename in [':info', 'info']: + self.metadata = default_file_metadata.copy() + + def group_sortfunc(self, x, y): + """sort groups by their priority""" + return cmp(x.specific.prio, y.specific.prio) + + def bind_info_to_entry(self, entry, metadata): + # first set defaults from global metadata/:info + for key in self.metadata: + entry.set(key, self.metadata[key]) + if self.infoxml: + mdata = {} + self.infoxml.pnode.Match(metadata, mdata, entry=entry) + if 'Info' not in mdata: + logger.error("Failed to set metadata for file %s" % \ + (entry.get('name'))) + raise PluginExecutionError + [entry.attrib.__setitem__(key, value) \ + for (key, value) in list(mdata['Info'][None].items())] + + def bind_entry(self, entry, metadata): + """Return the appropriate interpreted template from the set of available templates.""" + self.bind_info_to_entry(entry, metadata) + return self.best_matching(metadata).bind_entry(entry, metadata) + + +class GroupSpool(Plugin, Generator): + """Unified interface for handling group-specific data (e.g. .G## files).""" + name = 'GroupSpool' + __author__ = 'bcfg-dev@mcs.anl.gov' + filename_pattern = "" + es_child_cls = object + es_cls = EntrySet + + def __init__(self, core, datastore): + Plugin.__init__(self, core, datastore) + Generator.__init__(self) + if self.data[-1] == '/': + self.data = self.data[:-1] + self.Entries['Path'] = {} + self.entries = {} + self.handles = {} + self.AddDirectoryMonitor('') + self.encoding = core.encoding + + def add_entry(self, event): + epath = self.event_path(event) + ident = self.event_id(event) + if posixpath.isdir(epath): + self.AddDirectoryMonitor(epath[len(self.data):]) + if ident not in self.entries and posixpath.isfile(epath): + dirpath = "".join([self.data, ident]) + self.entries[ident] = self.es_cls(self.filename_pattern, + dirpath, + self.es_child_cls, + self.encoding) + self.Entries['Path'][ident] = self.entries[ident].bind_entry + if not posixpath.isdir(epath): + # do not pass through directory events + self.entries[ident].handle_event(event) + + def event_path(self, event): + return "".join([self.data, self.handles[event.requestID], + event.filename]) + + def event_id(self, event): + epath = self.event_path(event) + if posixpath.isdir(epath): + return self.handles[event.requestID] + event.filename + else: + return self.handles[event.requestID][:-1] + + def HandleEvent(self, event): + """Unified FAM event handler for GroupSpool.""" + action = event.code2str() + if event.filename[0] == '/': + return + ident = self.event_id(event) + + if action in ['exists', 'created']: + self.add_entry(event) + if action == 'changed': + if ident in self.entries: + self.entries[ident].handle_event(event) + else: + # got a changed event for a file we didn't know + # about. go ahead and process this as a 'created', but + # warn + self.logger.warning("Got changed event for unknown file %s" % + ident) + self.add_entry(event) + elif action == 'deleted': + fbase = self.handles[event.requestID] + event.filename + if fbase in self.entries: + # a directory was deleted + del self.entries[fbase] + del self.Entries['Path'][fbase] + elif ident in self.entries: + self.entries[ident].handle_event(event) + elif ident not in self.entries: + self.logger.warning("Got deleted event for unknown file %s" % + ident) + + def AddDirectoryMonitor(self, relative): + """Add new directory to FAM structures.""" + if not relative.endswith('/'): + relative += '/' + name = self.data + relative + if relative not in list(self.handles.values()): + if not posixpath.isdir(name): + print("Failed to open directory %s" % (name)) + return + reqid = self.core.fam.AddMonitor(name, self) + self.handles[reqid] = relative + +class SimpleConfig(FileBacked, + ConfigParser.SafeConfigParser): + ''' a simple plugin config using ConfigParser ''' + _required = True + + def __init__(self, plugin): + filename = os.path.join(plugin.data, plugin.name.lower() + ".conf") + self.plugin = plugin + self.fam = self.plugin.core.fam + self.read_files = set() + Bcfg2.Server.Plugin.FileBacked.__init__(self, filename) + ConfigParser.SafeConfigParser.__init__(self) + + if (self._required or + (not self._required and os.path.exists(self.name))): + self.fam.AddMonitor(self.name, self) + + def Index(self): + """ Build local data structures """ + for section in self.sections(): + self.remove_section(section) + self.read_files.update(self.read(self.name)) + + def get(self, section, option, **kwargs): + """ convenience method for getting config items """ + default = None + if 'default' in kwargs: + default = kwargs['default'] + del kwargs['default'] + try: + return ConfigParser.SafeConfigParser.get(self, section, option, + **kwargs) + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + if default is not None: + return default + else: + raise + + def getboolean(self, section, option, **kwargs): + """ convenience method for getting boolean config items """ + default = None + if 'default' in kwargs: + default = kwargs['default'] + del kwargs['default'] + try: + return ConfigParser.SafeConfigParser.getboolean(self, section, + option, **kwargs) + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError, + ValueError): + if default is not None: + return default + else: + raise + + @property + def loaded(self): + if os.path.exists(self.name): + return self.name in self.read_files + else: + return True + diff --git a/src/lib/Bcfg2/Server/Plugins/Account.py b/src/lib/Bcfg2/Server/Plugins/Account.py new file mode 100644 index 000000000..f2703dccb --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Account.py @@ -0,0 +1,101 @@ +"""This handles authentication setup.""" + +import Bcfg2.Server.Plugin + + +class Account(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Generator): + """This module generates account config files, + based on an internal data repo: + static.(passwd|group|limits.conf) -> static entries + dyn.(passwd|group) -> dynamic entries (usually acquired from yp or somesuch) + useraccess -> users to be granted login access on some hosts + superusers -> users to be granted root privs on all hosts + rootlike -> users to be granted root privs on some hosts + + """ + name = 'Account' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Generator.__init__(self) + self.Entries = {'ConfigFile': {'/etc/passwd': self.from_yp_cb, + '/etc/group': self.from_yp_cb, + '/etc/security/limits.conf': self.gen_limits_cb, + '/root/.ssh/authorized_keys': self.gen_root_keys_cb, + '/etc/sudoers': self.gen_sudoers}} + try: + self.repository = Bcfg2.Server.Plugin.DirectoryBacked(self.data, + self.core.fam) + except: + self.logger.error("Failed to load repos: %s, %s" % \ + (self.data, "%s/ssh" % (self.data))) + raise Bcfg2.Server.Plugin.PluginInitError + + def from_yp_cb(self, entry, metadata): + """Build password file from cached yp data.""" + fname = entry.attrib['name'].split('/')[-1] + entry.text = self.repository.entries["static.%s" % (fname)].data + entry.text += self.repository.entries["dyn.%s" % (fname)].data + perms = {'owner': 'root', + 'group': 'root', + 'perms': '0644'} + [entry.attrib.__setitem__(key, value) for (key, value) in \ + list(perms.items())] + + def gen_limits_cb(self, entry, metadata): + """Build limits entries based on current ACLs.""" + entry.text = self.repository.entries["static.limits.conf"].data + superusers = self.repository.entries["superusers"].data.split() + useraccess = [line.split(':') for line in \ + self.repository.entries["useraccess"].data.split()] + users = [user for (user, host) in \ + useraccess if host == metadata.hostname.split('.')[0]] + perms = {'owner': 'root', + 'group': 'root', + 'perms': '0600'} + [entry.attrib.__setitem__(key, value) for (key, value) in \ + list(perms.items())] + entry.text += "".join(["%s hard maxlogins 1024\n" % uname for uname in superusers + users]) + if "*" not in users: + entry.text += "* hard maxlogins 0\n" + + def gen_root_keys_cb(self, entry, metadata): + """Build root authorized keys file based on current ACLs.""" + superusers = self.repository.entries['superusers'].data.split() + try: + rootlike = [line.split(':', 1) for line in \ + self.repository.entries['rootlike'].data.split()] + superusers += [user for (user, host) in rootlike \ + if host == metadata.hostname.split('.')[0]] + except: + pass + rdata = self.repository.entries + entry.text = "".join([rdata["%s.key" % user].data for user \ + in superusers if \ + ("%s.key" % user) in rdata]) + perms = {'owner': 'root', + 'group': 'root', + 'perms': '0600'} + [entry.attrib.__setitem__(key, value) for (key, value) \ + in list(perms.items())] + + def gen_sudoers(self, entry, metadata): + """Build root authorized keys file based on current ACLs.""" + superusers = self.repository.entries['superusers'].data.split() + try: + rootlike = [line.split(':', 1) for line in \ + self.repository.entries['rootlike'].data.split()] + superusers += [user for (user, host) in rootlike \ + if host == metadata.hostname.split('.')[0]] + except: + pass + entry.text = self.repository.entries['static.sudoers'].data + entry.text += "".join(["%s ALL=(ALL) ALL\n" % uname \ + for uname in superusers]) + perms = {'owner': 'root', + 'group': 'root', + 'perms': '0440'} + [entry.attrib.__setitem__(key, value) for (key, value) \ + in list(perms.items())] diff --git a/src/lib/Bcfg2/Server/Plugins/BB.py b/src/lib/Bcfg2/Server/Plugins/BB.py new file mode 100644 index 000000000..c015ec47c --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/BB.py @@ -0,0 +1,83 @@ +import lxml.etree +import Bcfg2.Server.Plugin +import glob +import os +import socket + +#manage boot symlinks + #add statistics check to do build->boot mods + +#map profiles: first array is not empty we replace the -p with a determined profile. +logger = Bcfg2.Server.Plugin.logger + +class BBfile(Bcfg2.Server.Plugin.XMLFileBacked): + """Class for bb files.""" + def Index(self): + """Build data into an xml object.""" + + try: + self.data = lxml.etree.XML(self.data) + except lxml.etree.XMLSyntaxError: + Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name) + return + self.tftppath = self.data.get('tftp', '/tftpboot') + self.macs = {} + self.users = {} + self.actions = {} + self.bootlinks = [] + + for node in self.data.findall('Node'): + iface = node.find('Interface') + if iface != None: + mac = "01-%s" % (iface.get('mac'.replace(':','-').lower())) + self.actions[node.get('name')] = node.get('action') + self.bootlinks.append((mac, node.get('action'))) + try: + ip = socket.gethostbyname(node.get('name')) + except: + logger.error("failed host resolution for %s" % node.get('name')) + + self.macs[node.get('name')] = (iface.get('mac'), ip) + else: + logger.error("%s" % lxml.etree.tostring(node)) + self.users[node.get('name')] = node.get('user',"").split(':') + + def enforce_bootlinks(self): + for mac, target in self.bootlinks: + path = self.tftppath + '/' + mac + if not os.path.islink(path): + logger.error("Boot file %s not a link" % path) + if target != os.readlink(path): + try: + os.unlink(path) + os.symlink(target, path) + except: + logger.error("Failed to modify link %s" % path) + +class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): + __child__ = BBfile + + +class BB(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + """The BB plugin maps users to machines and metadata to machines.""" + name = 'BB' + deprecated = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.store = BBDirectoryBacked(self.data, core.fam) + + def get_additional_data(self, metadata): + + users = {} + for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []): + pubkeys = [] + for fname in glob.glob('/home/%s/.ssh/*.pub'%user): + pubkeys.append(open(fname).read()) + + users[user] = pubkeys + + return dict([('users', users), + ('macs', self.store.entries['bb.xml'].macs)]) diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py new file mode 100644 index 000000000..389ca7a95 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Base.py @@ -0,0 +1,43 @@ +"""This module sets up a base list of configuration entries.""" + +import copy +import lxml.etree +import sys +# py3k compatibility +if sys.hexversion >= 0x03000000: + from functools import reduce + +import Bcfg2.Server.Plugin + + +class Base(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure, + Bcfg2.Server.Plugin.XMLDirectoryBacked): + """This Structure is good for the pile of independent configs + needed for most actual systems. + """ + name = 'Base' + __author__ = 'bcfg-dev@mcs.anl.gov' + __child__ = Bcfg2.Server.Plugin.StructFile + deprecated = True + + """Base creates independent clauses based on client metadata.""" + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + try: + Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, + self.data, + self.core.fam) + except OSError: + self.logger.error("Failed to load Base repository") + raise Bcfg2.Server.Plugin.PluginInitError + + def BuildStructures(self, metadata): + """Build structures for client described by metadata.""" + ret = lxml.etree.Element("Independent", version='2.0') + fragments = reduce(lambda x, y: x + y, + [base.Match(metadata) for base + in list(self.entries.values())], []) + [ret.append(copy.copy(frag)) for frag in fragments] + return [ret] diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py new file mode 100644 index 000000000..ccb99481e --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py @@ -0,0 +1,99 @@ +"""This provides bundle clauses with translation functionality.""" + +import copy +import lxml.etree +import os +import os.path +import re +import sys + +import Bcfg2.Server.Plugin + +try: + import genshi.template + import genshi.template.base + import Bcfg2.Server.Plugins.SGenshi + have_genshi = True +except: + have_genshi = False + + +class BundleFile(Bcfg2.Server.Plugin.StructFile): + + def get_xml_value(self, metadata): + bundlename = os.path.splitext(os.path.basename(self.name))[0] + bundle = lxml.etree.Element('Bundle', name=bundlename) + [bundle.append(copy.copy(item)) for item in self.Match(metadata)] + return bundle + + +class Bundler(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure, + Bcfg2.Server.Plugin.XMLDirectoryBacked): + """The bundler creates dependent clauses based on the + bundle/translation scheme from Bcfg1. + """ + name = 'Bundler' + __author__ = 'bcfg-dev@mcs.anl.gov' + patterns = re.compile('^(?P<name>.*)\.(xml|genshi)$') + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + self.encoding = core.encoding + self.__child__ = self.template_dispatch + try: + Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, + self.data, + self.core.fam) + except OSError: + self.logger.error("Failed to load Bundle repository") + raise Bcfg2.Server.Plugin.PluginInitError + + def template_dispatch(self, name): + bundle = lxml.etree.parse(name) + nsmap = bundle.getroot().nsmap + if name.endswith('.xml'): + if have_genshi and \ + (nsmap == {'py': 'http://genshi.edgewall.org/'}): + # allow for genshi bundles with .xml extensions + spec = Bcfg2.Server.Plugin.Specificity() + return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name, + spec, + self.encoding) + else: + return BundleFile(name) + elif name.endswith('.genshi'): + if have_genshi: + spec = Bcfg2.Server.Plugin.Specificity() + return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name, + spec, + self.encoding) + + def BuildStructures(self, metadata): + """Build all structures for client (metadata).""" + bundleset = [] + + bundle_entries = {} + for key, item in self.entries.items(): + bundle_entries.setdefault(self.patterns.match(os.path.basename(key)).group('name'), + []).append(item) + + for bundlename in metadata.bundles: + try: + entries = bundle_entries[bundlename] + except KeyError: + self.logger.error("Bundler: Bundle %s does not exist" % + bundlename) + continue + try: + bundleset.append(entries[0].get_xml_value(metadata)) + except genshi.template.base.TemplateError: + t = sys.exc_info()[1] + self.logger.error("Bundler: Failed to template genshi bundle %s" + % bundlename) + self.logger.error(t) + except: + self.logger.error("Bundler: Unexpected bundler error for %s" % + bundlename, exc_info=1) + return bundleset diff --git a/src/lib/Bcfg2/Server/Plugins/Bzr.py b/src/lib/Bcfg2/Server/Plugins/Bzr.py new file mode 100644 index 000000000..a71021cb5 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Bzr.py @@ -0,0 +1,35 @@ +import Bcfg2.Server.Plugin +from bzrlib.workingtree import WorkingTree +from bzrlib import errors + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Bzr') + +class Bzr(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Bzr is a version plugin for dealing with Bcfg2 repos.""" + name = 'Bzr' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # Read revision from bcfg2 repo + revision = self.get_revision() + + logger.debug("Initialized Bazaar plugin with directory = %(dir)s at revision = %(rev)s" % {'dir': datastore, 'rev': revision}) + + def get_revision(self): + """Read Bazaar revision information for the Bcfg2 repository.""" + try: + working_tree = WorkingTree.open(self.datastore) + revision = str(working_tree.branch.revno()) + if working_tree.has_changes(working_tree.basis_tree()) or working_tree.unknowns(): + revision += "+" + except errors.NotBranchError: + logger.error("Failed to read Bazaar branch; disabling Bazaar support") + raise Bcfg2.Server.Plugin.PluginInitError + return revision diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg.py b/src/lib/Bcfg2/Server/Plugins/Cfg.py new file mode 100644 index 000000000..81904d082 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Cfg.py @@ -0,0 +1,293 @@ +"""This module implements a config file repository.""" + +import binascii +import logging +import lxml +import operator +import os +import os.path +import re +import stat +import sys +import tempfile +from subprocess import Popen, PIPE +from Bcfg2.Bcfg2Py3k import u_str + +import Bcfg2.Server.Plugin + +try: + import genshi.core + import genshi.input + from genshi.template import TemplateLoader, NewTextTemplate + have_genshi = True +except: + have_genshi = False + +try: + import Cheetah.Template + import Cheetah.Parser + have_cheetah = True +except: + have_cheetah = False + +# setup logging +logger = logging.getLogger('Bcfg2.Plugins.Cfg') + + +# snipped from TGenshi +def removecomment(stream): + """A genshi filter that removes comments from the stream.""" + for kind, data, pos in stream: + if kind is genshi.core.COMMENT: + continue + yield kind, data, pos + + +def process_delta(data, delta): + if not delta.specific.delta: + return data + if delta.specific.delta == 'cat': + datalines = data.strip().split('\n') + for line in delta.data.split('\n'): + if not line: + continue + if line[0] == '+': + datalines.append(line[1:]) + elif line[0] == '-': + if line[1:] in datalines: + datalines.remove(line[1:]) + return "\n".join(datalines) + "\n" + elif delta.specific.delta == 'diff': + basehandle, basename = tempfile.mkstemp() + basefile = open(basename, 'w') + basefile.write(data) + basefile.close() + os.close(basehandle) + + cmd = ["patch", "-u", "-f", basefile.name] + patch = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + stderr = patch.communicate(input=delta.data)[1] + ret = patch.wait() + output = open(basefile.name, 'r').read() + os.unlink(basefile.name) + if ret >> 8 != 0: + logger.error("Error applying diff %s: %s" % (delta.name, stderr)) + raise Bcfg2.Server.Plugin.PluginExecutionError('delta', delta) + return output + + +class CfgMatcher: + + def __init__(self, fname): + name = re.escape(fname) + self.basefile_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+?)|.G(?P<prio>\d+)_(?P<group>\S+?))((?P<genshi>\\.genshi)|(?P<cheetah>\\.cheetah))?$' % name) + self.delta_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|\\.G(?P<prio>\d+)_(?P<group>\S+))\\.(?P<delta>(cat|diff))$' % name) + self.cat_count = fname.count(".cat") + self.diff_count = fname.count(".diff") + + def match(self, fname): + if fname.count(".cat") > self.cat_count \ + or fname.count('.diff') > self.diff_count: + return self.delta_reg.match(fname) + return self.basefile_reg.match(fname) + + +class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): + + def __init__(self, basename, path, entry_type, encoding): + Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, + entry_type, encoding) + self.specific = CfgMatcher(path.split('/')[-1]) + path = path + + def debug_log(self, message, flag=None): + if (flag is None and self.debug_flag) or flag: + logger.error(message) + + def sort_by_specific(self, one, other): + return cmp(one.specific, other.specific) + + def get_pertinent_entries(self, entry, metadata): + """return a list of all entries pertinent + to a client => [base, delta1, delta2] + """ + matching = [ent for ent in list(self.entries.values()) if \ + ent.specific.matches(metadata)] + matching.sort(key=operator.attrgetter('specific')) + # base entries which apply to a client + # (e.g. foo, foo.G##_groupname, foo.H_hostname) + base_files = [matching.index(m) for m in matching + if not m.specific.delta] + if not base_files: + msg = "No base file found for %s" % entry.get('name') + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + base = min(base_files) + used = matching[:base + 1] + used.reverse() + return used + + def bind_entry(self, entry, metadata): + self.bind_info_to_entry(entry, metadata) + used = self.get_pertinent_entries(entry, metadata) + basefile = used.pop(0) + if entry.get('perms').lower() == 'inherit': + # use on-disk permissions + fname = os.path.join(self.path, entry.get('name')) + entry.set('perms', + str(oct(stat.S_IMODE(os.stat(fname).st_mode)))) + if entry.tag == 'Path': + entry.set('type', 'file') + if basefile.name.endswith(".genshi"): + if not have_genshi: + msg = "Cfg: Genshi is not available: %s" % entry.get("name") + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + try: + template_cls = NewTextTemplate + loader = TemplateLoader() + template = loader.load(basefile.name, cls=template_cls, + encoding=self.encoding) + fname = entry.get('realname', entry.get('name')) + stream = template.generate(name=fname, + metadata=metadata, + path=basefile.name).filter(removecomment) + try: + data = stream.render('text', encoding=self.encoding, + strip_whitespace=False) + except TypeError: + data = stream.render('text', encoding=self.encoding) + if data == '': + entry.set('empty', 'true') + except Exception: + msg = "Cfg: genshi exception (%s): %s" % (entry.get("name"), + sys.exc_info()[1]) + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + elif basefile.name.endswith(".cheetah"): + if not have_cheetah: + msg = "Cfg: Cheetah is not available: %s" % entry.get("name") + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + try: + fname = entry.get('realname', entry.get('name')) + s = {'useStackFrames': False} + template = Cheetah.Template.Template(open(basefile.name).read(), + compilerSettings=s) + template.metadata = metadata + template.path = fname + template.source_path = basefile.name + data = template.respond() + if data == '': + entry.set('empty', 'true') + except Exception: + msg = "Cfg: cheetah exception (%s): %s" % (entry.get("name"), + sys.exc_info()[1]) + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + else: + data = basefile.data + for delta in used: + data = process_delta(data, delta) + if entry.get('encoding') == 'base64': + entry.text = binascii.b2a_base64(data) + else: + try: + entry.text = u_str(data, self.encoding) + except UnicodeDecodeError: + msg = "Failed to decode %s: %s" % (entry.get('name'), + sys.exc_info()[1]) + logger.error(msg) + logger.error("Please verify you are using the proper encoding.") + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + except ValueError: + msg = "Error in specification for %s: %s" % (entry.get('name'), + sys.exc_info()[1]) + logger.error(msg) + logger.error("You need to specify base64 encoding for %s." % + entry.get('name')) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + if entry.text in ['', None]: + entry.set('empty', 'true') + + def list_accept_choices(self, entry, metadata): + '''return a list of candidate pull locations''' + used = self.get_pertinent_entries(entry, metadata) + ret = [] + if used: + ret.append(used[0].specific) + if not ret[0].hostname: + ret.append(Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)) + return ret + + def build_filename(self, specific): + bfname = self.path + '/' + self.path.split('/')[-1] + if specific.all: + return bfname + elif specific.group: + return "%s.G%02d_%s" % (bfname, specific.prio, specific.group) + elif specific.hostname: + return "%s.H_%s" % (bfname, specific.hostname) + + def write_update(self, specific, new_entry, log): + if 'text' in new_entry: + name = self.build_filename(specific) + if os.path.exists("%s.genshi" % name): + msg = "Cfg: Unable to pull data for genshi types" + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + elif os.path.exists("%s.cheetah" % name): + msg = "Cfg: Unable to pull data for cheetah types" + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + try: + etext = new_entry['text'].encode(self.encoding) + except: + msg = "Cfg: Cannot encode content of %s as %s" % (name, + self.encoding) + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginExecutionError(msg) + open(name, 'w').write(etext) + self.debug_log("Wrote file %s" % name, flag=log) + badattr = [attr for attr in ['owner', 'group', 'perms'] + if attr in new_entry] + if badattr: + # check for info files and inform user of their removal + if os.path.exists(self.path + "/:info"): + logger.info("Removing :info file and replacing with " + "info.xml") + os.remove(self.path + "/:info") + if os.path.exists(self.path + "/info"): + logger.info("Removing info file and replacing with " + "info.xml") + os.remove(self.path + "/info") + metadata_updates = {} + metadata_updates.update(self.metadata) + for attr in badattr: + metadata_updates[attr] = new_entry.get(attr) + infoxml = lxml.etree.Element('FileInfo') + infotag = lxml.etree.SubElement(infoxml, 'Info') + [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \ + for attr in metadata_updates] + ofile = open(self.path + "/info.xml", "w") + ofile.write(lxml.etree.tostring(infoxml, pretty_print=True)) + ofile.close() + self.debug_log("Wrote file %s" % (self.path + "/info.xml"), + flag=log) + + +class Cfg(Bcfg2.Server.Plugin.GroupSpool, + Bcfg2.Server.Plugin.PullTarget): + """This generator in the configuration file repository for Bcfg2.""" + name = 'Cfg' + __author__ = 'bcfg-dev@mcs.anl.gov' + es_cls = CfgEntrySet + es_child_cls = Bcfg2.Server.Plugin.SpecificData + + def AcceptChoices(self, entry, metadata): + return self.entries[entry.get('name')].list_accept_choices(entry, metadata) + + def AcceptPullData(self, specific, new_entry, log): + return self.entries[new_entry.get('name')].write_update(specific, + new_entry, + log) diff --git a/src/lib/Bcfg2/Server/Plugins/Cvs.py b/src/lib/Bcfg2/Server/Plugins/Cvs.py new file mode 100644 index 000000000..6ce72acd2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Cvs.py @@ -0,0 +1,46 @@ +import os +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Cvs') + +class Cvs(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """CVS is a version plugin for dealing with Bcfg2 repository.""" + name = 'Cvs' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # path to cvs directory for Bcfg2 repo + cvs_dir = "%s/CVSROOT" % datastore + + # Read revision from Bcfg2 repo + if os.path.isdir(cvs_dir): + self.get_revision() + else: + logger.error("%s is not a directory" % cvs_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized cvs plugin with cvs directory = %s" % cvs_dir) + + def get_revision(self): + """Read cvs revision information for the Bcfg2 repository.""" + try: + data = Popen("env LC_ALL=C cvs log", + shell=True, + cwd=self.datastore, + stdout=PIPE).stdout.readlines() + revision = data[3].strip('\n') + except IndexError: + logger.error("Failed to read cvs log; disabling cvs support") + logger.error('''Ran command "cvs log %s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError + diff --git a/src/lib/Bcfg2/Server/Plugins/DBStats.py b/src/lib/Bcfg2/Server/Plugins/DBStats.py new file mode 100644 index 000000000..999e078b9 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/DBStats.py @@ -0,0 +1,115 @@ +import binascii +import difflib +import logging +import lxml.etree +import platform +import time + +try: + from django.core.exceptions import MultipleObjectsReturned +except ImportError: + pass + +import Bcfg2.Server.Plugin +import Bcfg2.Server.Reports.importscript +from Bcfg2.Server.Reports.reports.models import Client +import Bcfg2.Server.Reports.settings +from Bcfg2.Server.Reports.updatefix import update_database +# for debugging output only +logger = logging.getLogger('Bcfg2.Plugins.DBStats') + +class DBStats(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.ThreadedStatistics, + Bcfg2.Server.Plugin.PullSource): + name = 'DBStats' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore) + Bcfg2.Server.Plugin.PullSource.__init__(self) + self.cpath = "%s/Metadata/clients.xml" % datastore + self.core = core + logger.debug("Searching for new models to add to the statistics database") + try: + update_database() + except Exception: + inst = sys.exc_info()[1] + logger.debug(str(inst)) + logger.debug(str(type(inst))) + + def handle_statistic(self, metadata, data): + newstats = data.find("Statistics") + newstats.set('time', time.asctime(time.localtime())) + # ick + data = lxml.etree.tostring(newstats) + ndx = lxml.etree.XML(data) + e = lxml.etree.Element('Node', name=metadata.hostname) + e.append(ndx) + container = lxml.etree.Element("ConfigStatistics") + container.append(e) + + # FIXME need to build a metadata interface to expose a list of clients + start = time.time() + for i in [1, 2, 3]: + try: + Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clients_xml.xdata, + container, + self.core.encoding, + 0, + logger, + True, + platform.node()) + logger.info("Imported data for %s in %s seconds" \ + % (metadata.hostname, time.time() - start)) + return + except MultipleObjectsReturned: + e = sys.exc_info()[1] + logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \ + (metadata.hostname, e)) + logger.error("DBStats: Data is inconsistent") + break + except: + logger.error("DBStats: Failed to write to db (lock); retrying", + exc_info=1) + logger.error("DBStats: Retry limit failed for %s; aborting operation" \ + % metadata.hostname) + + def GetExtra(self, client): + c_inst = Client.objects.filter(name=client)[0] + return [(a.entry.kind, a.entry.name) for a in + c_inst.current_interaction.extra()] + + def GetCurrentEntry(self, client, e_type, e_name): + try: + c_inst = Client.objects.filter(name=client)[0] + except IndexError: + self.logger.error("Unknown client: %s" % client) + raise Bcfg2.Server.Plugin.PluginExecutionError + result = c_inst.current_interaction.bad().filter(entry__kind=e_type, + entry__name=e_name) + if not result: + raise Bcfg2.Server.Plugin.PluginExecutionError + entry = result[0] + ret = [] + data = ('owner', 'group', 'perms') + for t in data: + if getattr(entry.reason, "current_%s" % t) == '': + ret.append(getattr(entry.reason, t)) + else: + ret.append(getattr(entry.reason, "current_%s" % t)) + if entry.reason.is_sensitive: + raise Bcfg2.Server.Plugin.PluginExecutionError + elif len(entry.reason.unpruned) != 0: + ret.append('\n'.join(entry.reason.unpruned)) + elif entry.reason.current_diff != '': + if entry.reason.is_binary: + ret.append(binascii.a2b_base64(entry.reason.current_diff)) + else: + ret.append('\n'.join(difflib.restore(\ + entry.reason.current_diff.split('\n'), 1))) + elif entry.reason.is_binary: + # If len is zero the object was too large to store + raise Bcfg2.Server.Plugin.PluginExecutionError + else: + ret.append(None) + return ret diff --git a/src/lib/Bcfg2/Server/Plugins/Darcs.py b/src/lib/Bcfg2/Server/Plugins/Darcs.py new file mode 100644 index 000000000..9fb9ff4f1 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Darcs.py @@ -0,0 +1,48 @@ +import os +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Darcs') + +class Darcs(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Darcs is a version plugin for dealing with Bcfg2 repos.""" + name = 'Darcs' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # path to darcs directory for bcfg2 repo + darcs_dir = "%s/_darcs" % datastore + + # Read changeset from bcfg2 repo + if os.path.isdir(darcs_dir): + self.get_revision() + else: + logger.error("%s is not present." % darcs_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized Darcs plugin with darcs directory = %s" % darcs_dir) + + def get_revision(self): + """Read Darcs changeset information for the Bcfg2 repository.""" + try: + data = Popen("env LC_ALL=C darcs changes", + shell=True, + cwd=self.datastore, + stdout=PIPE).stdout.readlines() + revision = data[0].strip('\n') + except: + logger.error("Failed to read darcs repository; disabling Darcs support") + logger.error('''Ran command "darcs changes" from directory "%s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError + return revision + diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py new file mode 100644 index 000000000..b432474f2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py @@ -0,0 +1,66 @@ +import logging +import lxml.etree +import sys + +import Bcfg2.Server.Plugin +logger = logging.getLogger('Bcfg2.Plugins.Decisions') + +class DecisionFile(Bcfg2.Server.Plugin.SpecificData): + def handle_event(self, event): + Bcfg2.Server.Plugin.SpecificData.handle_event(self, event) + self.contents = lxml.etree.XML(self.data) + + def get_decisions(self): + return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')] + +class DecisionSet(Bcfg2.Server.Plugin.EntrySet): + def __init__(self, path, fam, encoding): + """Container for decision specification files. + + Arguments: + - `path`: repository path + - `fam`: reference to the file monitor + - `encoding`: XML character encoding + + """ + pattern = '(white|black)list' + Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \ + DecisionFile, encoding) + try: + fam.AddMonitor(path, self) + except OSError: + e = sys.exc_info()[1] + logger.error('Adding filemonitor for %s failed. ' + 'Make sure directory exists' % path) + raise Bcfg2.Server.Plugin.PluginInitError(e) + + def HandleEvent(self, event): + if event.filename != self.path: + return self.handle_event(event) + + def GetDecisions(self, metadata, mode): + ret = [] + candidates = [c for c in self.get_matching(metadata) + if c.name.split('/')[-1].startswith(mode)] + for c in candidates: + ret += c.get_decisions() + return ret + +class Decisions(DecisionSet, + Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Decision): + name = 'Decisions' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + """Decisions plugins + + Arguments: + - `core`: Bcfg2.Core instance + - `datastore`: File repository location + + """ + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Decision.__init__(self) + DecisionSet.__init__(self, self.data, core.fam, core.encoding) + diff --git a/src/lib/Bcfg2/Server/Plugins/Defaults.py b/src/lib/Bcfg2/Server/Plugins/Defaults.py new file mode 100644 index 000000000..718192e2a --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Defaults.py @@ -0,0 +1,53 @@ +"""This generator provides rule-based entry mappings.""" + +import re +import Bcfg2.Server.Plugin +import Bcfg2.Server.Plugins.Rules + +class Defaults(Bcfg2.Server.Plugins.Rules.Rules, + Bcfg2.Server.Plugin.StructureValidator): + """Set default attributes on bound entries""" + name = 'Defaults' + __author__ = 'bcfg-dev@mcs.anl.gov' + + # Rules is a Generator that happens to implement all of the + # functionality we want, so we overload it, but Defaults should + # _not_ handle any entries; it does its stuff in the structure + # validation phase. so we overload Handle(s)Entry and HandleEvent + # to ensure that Defaults handles no entries, even though it's a + # Generator. + + def HandlesEntry(self, entry, metadata): + return False + + def HandleEntry(self, entry, metadata): + raise PluginExecutionError + + def HandleEvent(self, event): + Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event) + + def validate_structures(self, metadata, structures): + """ Apply defaults """ + for struct in structures: + for entry in struct.iter(): + if entry.tag.startswith("Bound"): + is_bound = True + entry.tag = entry.tag[5:] + else: + is_bound = False + try: + try: + self.BindEntry(entry, metadata) + except Bcfg2.Server.Plugin.PluginExecutionError: + # either no matching defaults (which is okay), + # or multiple matching defaults (which is not + # okay, but is logged). either way, we don't + # care about the error. + pass + finally: + if is_bound: + entry.tag = "Bound" + entry.tag + + def _regex_enabled(self): + """ Defaults depends on regex matching, so force it enabled """ + return True diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py new file mode 100644 index 000000000..9b848baae --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Deps.py @@ -0,0 +1,134 @@ +"""This plugin provides automatic dependency handling.""" + +import lxml.etree + +import Bcfg2.Server.Plugin + + +class DNode(Bcfg2.Server.Plugin.INode): + """DNode provides supports for single predicate types for dependencies.""" + raw = {'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"} + containers = ['Group'] + + def __init__(self, data, idict, parent=None): + self.data = data + self.contents = {} + if parent == None: + self.predicate = lambda x, d: True + else: + predicate = parent.predicate + if data.tag in list(self.raw.keys()): + self.predicate = eval(self.raw[data.tag] % + {'name': data.get('name')}, + {'predicate': predicate}) + else: + raise Exception + mytype = self.__class__ + self.children = [] + for item in data.getchildren(): + if item.tag in self.containers: + self.children.append(mytype(item, idict, self)) + else: + data = [(child.tag, child.get('name')) + for child in item.getchildren()] + try: + self.contents[item.tag][item.get('name')] = data + except KeyError: + self.contents[item.tag] = {item.get('name'): data} + + +class DepXMLSrc(Bcfg2.Server.Plugin.XMLSrc): + __node__ = DNode + + +class Deps(Bcfg2.Server.Plugin.PrioDir, + Bcfg2.Server.Plugin.StructureValidator): + name = 'Deps' + __author__ = 'bcfg-dev@mcs.anl.gov' + __child__ = DepXMLSrc + + # Override the default sort_order (of 500) so that this plugin + # gets handled after others running at the default. In particular, + # we want to run after Packages, so we can see the final set of + # packages that will be installed on the client. + sort_order = 750 + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore) + Bcfg2.Server.Plugin.StructureValidator.__init__(self) + self.cache = {} + + def HandleEvent(self, event): + self.cache = {} + Bcfg2.Server.Plugin.PrioDir.HandleEvent(self, event) + + def validate_structures(self, metadata, structures): + """Examine the passed structures and append any additional + prerequisite entries as defined by the files in Deps. + """ + entries = [] + for structure in structures: + for entry in structure.getchildren(): + tag = entry.tag + if tag.startswith('Bound'): + tag = tag[5:] + if (tag, entry.get('name')) not in entries \ + and not isinstance(entry, lxml.etree._Comment): + entries.append((tag, entry.get('name'))) + entries.sort() + entries = tuple(entries) + gdata = list(metadata.groups) + gdata.sort() + gdata = tuple(gdata) + + # Check to see if we have cached the prereqs already + if (entries, gdata) in self.cache: + prereqs = self.cache[(entries, gdata)] + else: + prereqs = self.calculate_prereqs(metadata, entries) + self.cache[(entries, gdata)] = prereqs + + newstruct = lxml.etree.Element("Independent") + for tag, name in prereqs: + try: + lxml.etree.SubElement(newstruct, tag, name=name) + except: + self.logger.error("Failed to add dep entry for %s:%s" % (tag, name)) + structures.append(newstruct) + + + def calculate_prereqs(self, metadata, entries): + """Calculate the prerequisites defined in Deps for the passed + set of entries. + """ + prereqs = [] + [src.Cache(metadata) for src in self.entries.values()] + + toexamine = list(entries[:]) + while toexamine: + entry = toexamine.pop() + matching = [src for src in list(self.entries.values()) + if src.cache and entry[0] in src.cache[1] + and entry[1] in src.cache[1][entry[0]]] + if len(matching) > 1: + prio = [int(src.priority) for src in matching] + if prio.count(max(prio)) > 1: + self.logger.error("Found conflicting %s sources with same priority for %s, pkg %s" % + (entry[0].lower(), metadata.hostname, entry[1])) + raise Bcfg2.Server.Plugin.PluginExecutionError + index = prio.index(max(prio)) + matching = [matching[index]] + elif len(matching) == 1: + for prq in matching[0].cache[1][entry[0]][entry[1]]: + # XML comments seem to show up in the cache as a + # tuple with item 0 being callable. The logic + # below filters them out. Would be better to + # exclude them when we load the cache in the first + # place. + if prq not in prereqs and prq not in entries and not callable(prq[0]): + toexamine.append(prq) + prereqs.append(prq) + else: + continue + + return prereqs diff --git a/src/lib/Bcfg2/Server/Plugins/Editor.py b/src/lib/Bcfg2/Server/Plugins/Editor.py new file mode 100644 index 000000000..c0d2cfbad --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Editor.py @@ -0,0 +1,80 @@ +import Bcfg2.Server.Plugin +import re +import lxml.etree + + +def linesub(pattern, repl, filestring): + """Substitutes instances of pattern with repl in filestring.""" + if filestring == None: + filestring = '' + output = list() + fileread = filestring.split('\n') + for line in fileread: + output.append(re.sub(pattern, repl, filestring)) + return '\n'.join(output) + + +class EditDirectives(Bcfg2.Server.Plugin.SpecificData): + """This object handles the editing directives.""" + def ProcessDirectives(self, input): + """Processes a list of edit directives on input.""" + temp = input + for directive in self.data.split('\n'): + directive = directive.split(',') + temp = linesub(directive[0], directive[1], temp) + return temp + + +class EditEntrySet(Bcfg2.Server.Plugin.EntrySet): + def __init__(self, basename, path, entry_type, encoding): + self.ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|%s\.H_.*)$" % path.split('/')[-1]) + Bcfg2.Server.Plugin.EntrySet.__init__(self, + basename, + path, + entry_type, + encoding) + self.inputs = dict() + + def bind_entry(self, entry, metadata): + client = metadata.hostname + filename = entry.get('name') + permdata = {'owner': 'root', + 'group': 'root', + 'perms': '0644'} + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + entry.text = self.entries['edits'].ProcessDirectives(self.get_client_data(client)) + if not entry.text: + entry.set('empty', 'true') + try: + f = open('%s/%s.H_%s' % (self.path, filename.split('/')[-1], client), 'w') + f.write(entry.text) + f.close() + except: + pass + + def get_client_data(self, client): + return self.inputs[client] + + +class Editor(Bcfg2.Server.Plugin.GroupSpool, + Bcfg2.Server.Plugin.Probing): + name = 'Editor' + __author__ = 'bcfg2-dev@mcs.anl.gov' + filename_pattern = 'edits' + es_child_cls = EditDirectives + es_cls = EditEntrySet + + def GetProbes(self, _): + '''Return a set of probes for execution on client''' + probelist = list() + for name in list(self.entries.keys()): + probe = lxml.etree.Element('probe') + probe.set('name', name) + probe.set('source', "Editor") + probe.text = "cat %s" % name + probelist.append(probe) + return probelist + + def ReceiveData(self, client, datalist): + for data in datalist: + self.entries[data.get('name')].inputs[client.hostname] = data.text diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py new file mode 100644 index 000000000..5beec7be0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py @@ -0,0 +1,230 @@ +""" This module allows you to probe a client for a file, which is then +added to the specification. On subsequent runs, the file will be +replaced on the client if it is missing; if it has changed on the +client, it can either be updated in the specification or replaced on +the client """ + +import os +import sys +import errno +import binascii +import lxml.etree +import Bcfg2.Options +import Bcfg2.Server.Plugin + +probecode = """#!/usr/bin/env python + +import os +import pwd +import grp +import binascii +import lxml.etree + +path = "%s" + +if not os.path.exists(path): + print "%%s does not exist" %% path + raise SystemExit(1) + +stat = os.stat(path) +data = lxml.etree.Element("ProbedFileData", + name=path, + owner=pwd.getpwuid(stat[4])[0], + group=grp.getgrgid(stat[5])[0], + perms=oct(stat[0] & 07777)) +data.text = binascii.b2a_base64(open(path).read()) +print lxml.etree.tostring(data) +""" + +class FileProbesConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked, + Bcfg2.Server.Plugin.StructFile): + """ Config file handler for FileProbes """ + def __init__(self, filename, fam): + Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam) + Bcfg2.Server.Plugin.StructFile.__init__(self, filename) + + +class FileProbes(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Probing): + """ This module allows you to probe a client for a file, which is then + added to the specification. On subsequent runs, the file will be + replaced on the client if it is missing; if it has changed on the + client, it can either be updated in the specification or replaced on + the client """ + + name = 'FileProbes' + experimental = True + __author__ = 'chris.a.st.pierre@gmail.com' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Probing.__init__(self) + self.config = FileProbesConfig(os.path.join(self.data, 'config.xml'), + core.fam) + self.entries = dict() + self.probes = dict() + + def GetProbes(self, metadata): + """Return a set of probes for execution on client.""" + if metadata.hostname not in self.probes: + cfg = self.core.plugins['Cfg'] + self.entries[metadata.hostname] = dict() + self.probes[metadata.hostname] = [] + for entry in self.config.Match(metadata): + path = entry.get("name") + # do not probe for files that are already in Cfg and + # for which update is false; we can't possibly do + # anything with the data we get from such a probe + try: + if (entry.get('update', 'false').lower() == "false" and + cfg.entries[path].get_pertinent_entries(entry, + metadata)): + continue + except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError): + pass + self.entries[metadata.hostname][path] = entry + probe = lxml.etree.Element('probe', name=path, + source=self.name, + interpreter="/usr/bin/env python") + probe.text = probecode % path + self.probes[metadata.hostname].append(probe) + self.debug_log("Adding file probe for %s to %s" % + (path, metadata.hostname)) + return self.probes[metadata.hostname] + + def ReceiveData(self, metadata, datalist): + """Receive data from probe.""" + self.debug_log("Receiving file probe data from %s" % metadata.hostname) + + for data in datalist: + if data.text is None: + self.logger.error("Got null response to %s file probe from %s" % + (data.get('name'), metadata.hostname)) + else: + try: + self.write_data(lxml.etree.XML(data.text), metadata) + except lxml.etree.XMLSyntaxError: + # if we didn't get XML back from the probe, assume + # it's an error message + self.logger.error(data.text) + + def write_data(self, data, metadata): + """Write the probed file data to the bcfg2 specification.""" + filename = data.get("name") + contents = binascii.a2b_base64(data.text) + entry = self.entries[metadata.hostname][filename] + cfg = self.core.plugins['Cfg'] + specific = "%s.H_%s" % (os.path.basename(filename), metadata.hostname) + # we can't use os.path.join() for this because specific + # already has a leading /, which confuses os.path.join() + fileloc = "%s%s" % (cfg.data, os.path.join(filename, specific)) + + create = False + try: + cfg.entries[filename].bind_entry(entry, metadata) + except Bcfg2.Server.Plugin.PluginExecutionError: + create = True + + # get current entry data + if entry.text and entry.get("encoding") == "base64": + entrydata = binascii.a2b_base64(entry.text) + else: + entrydata = entry.text + + if create: + self.logger.info("Writing new probed file %s" % fileloc) + self.write_file(fileloc, contents) + self.verify_file(filename, contents, metadata) + infoxml = os.path.join("%s%s" % (cfg.data, filename), "info.xml") + self.write_infoxml(infoxml, entry, data) + elif entrydata == contents: + self.debug_log("Existing %s contents match probed contents" % + filename) + return + elif (entry.get('update', 'false').lower() == "true"): + self.logger.info("Writing updated probed file %s" % fileloc) + self.write_file(fileloc, contents) + self.verify_file(filename, contents, metadata) + else: + self.logger.info("Skipping updated probed file %s" % fileloc) + return + + def write_file(self, fileloc, contents): + try: + os.makedirs(os.path.dirname(fileloc)) + except OSError: + err = sys.exc_info()[1] + if err.errno == errno.EEXIST: + pass + else: + self.logger.error("Could not create parent directories for %s: " + "%s" % (fileloc, err)) + return + + try: + open(fileloc, 'wb').write(contents) + except IOError: + err = sys.exc_info()[1] + self.logger.error("Could not write %s: %s" % (fileloc, err)) + return + + def verify_file(self, filename, contents, metadata): + # Service the FAM events queued up by the key generation so + # the data structure entries will be available for binding. + # + # NOTE: We wait for up to ten seconds. There is some potential + # for race condition, because if the file monitor doesn't get + # notified about the new key files in time, those entries + # won't be available for binding. In practice, this seems + # "good enough". + entry = self.entries[metadata.hostname][filename] + cfg = self.core.plugins['Cfg'] + tries = 0 + updated = False + while not updated: + if tries >= 10: + self.logger.error("%s still not registered" % filename) + return + self.core.fam.handle_events_in_interval(1) + try: + cfg.entries[filename].bind_entry(entry, metadata) + except Bcfg2.Server.Plugin.PluginExecutionError: + tries += 1 + continue + + # get current entry data + if entry.get("encoding") == "base64": + entrydata = binascii.a2b_base64(entry.text) + else: + entrydata = entry.text + if entrydata == contents: + updated = True + tries += 1 + + def write_infoxml(self, infoxml, entry, data): + """ write an info.xml for the file """ + if os.path.exists(infoxml): + return + + self.logger.info("Writing info.xml at %s for %s" % + (infoxml, data.get("name"))) + info = \ + lxml.etree.Element("Info", + owner=data.get("owner", + Bcfg2.Options.MDATA_OWNER.value), + group=data.get("group", + Bcfg2.Options.MDATA_GROUP.value), + perms=data.get("perms", + Bcfg2.Options.MDATA_PERMS.value), + encoding=entry.get("encoding", + Bcfg2.Options.ENCODING.value)) + + root = lxml.etree.Element("FileInfo") + root.append(info) + try: + open(infoxml, "w").write(lxml.etree.tostring(root, + pretty_print=True)) + except IOError: + err = sys.exc_info()[1] + self.logger.error("Could not write %s: %s" % (fileloc, err)) + return diff --git a/src/lib/Bcfg2/Server/Plugins/Fossil.py b/src/lib/Bcfg2/Server/Plugins/Fossil.py new file mode 100644 index 000000000..1b1627688 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Fossil.py @@ -0,0 +1,51 @@ +import os +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Fossil') + +class Fossil(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Fossil is a version plugin for dealing with Bcfg2 repos.""" + name = 'Fossil' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # path to fossil file for bcfg2 repo + fossil_file = "%s/_FOSSIL_" % datastore + + # Read revision from bcfg2 repo + if os.path.isfile(fossil_file): + revision = self.get_revision() + elif not os.path.isdir(datastore): + logger.error("%s is not a directory" % datastore) + raise Bcfg2.Server.Plugin.PluginInitError + else: + logger.error("%s is not a file" % fossil_file) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized Fossil.py plugin with %(ffile)s at revision %(frev)s" \ + % {'ffile': fossil_file, 'frev': revision}) + + def get_revision(self): + """Read fossil revision information for the Bcfg2 repository.""" + try: + data = Popen("env LC_ALL=C fossil info", + shell=True, + cwd=self.datastore, + stdout=PIPE).stdout.readlines() + revline = [line.split(': ')[1].strip() for line in data if \ + line.split(': ')[0].strip() == 'checkout'][-1] + revision = revline.split(' ')[0] + except IndexError: + logger.error("Failed to read fossil info; disabling fossil support") + logger.error('''Ran command "fossil info" from directory "%s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError + return revision diff --git a/src/lib/Bcfg2/Server/Plugins/Git.py b/src/lib/Bcfg2/Server/Plugins/Git.py new file mode 100644 index 000000000..8f8ea87f1 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Git.py @@ -0,0 +1,44 @@ +"""The Git plugin provides a revision interface for Bcfg2 repos using git.""" + +import os +from dulwich.repo import Repo +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Git') + + +class Git(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Git is a version plugin for dealing with Bcfg2 repos.""" + name = 'Git' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # path to git directory for bcfg2 repo + git_dir = "%s/.git" % datastore + + # Read revision from bcfg2 repo + if os.path.isdir(git_dir): + self.get_revision() + else: + logger.error("%s is not a directory" % git_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized git plugin with git directory %s" % git_dir) + + def get_revision(self): + """Read git revision information for the Bcfg2 repository.""" + try: + repo = Repo(self.datastore) + revision = repo.head() + except: + logger.error("Failed to read git repository; disabling git support") + raise Bcfg2.Server.Plugin.PluginInitError + return revision diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py new file mode 100644 index 000000000..58b4d4afb --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py @@ -0,0 +1,124 @@ +import re +import logging +import lxml.etree +import Bcfg2.Server.Plugin + +class PackedDigitRange(object): + def __init__(self, digit_range): + self.sparse = list() + self.ranges = list() + for item in digit_range.split(','): + if '-' in item: + self.ranges.append(tuple([int(x) for x in item.split('-')])) + else: + self.sparse.append(int(item)) + + def includes(self, other): + iother = int(other) + if iother in self.sparse: + return True + for (start, end) in self.ranges: + if iother in range(start, end + 1): + return True + return False + + +class PatternMap(object): + range_finder = r'\[\[[\d\-,]+\]\]' + + def __init__(self, pattern, rangestr, groups): + self.pattern = pattern + self.rangestr = rangestr + self.groups = groups + if pattern != None: + self.re = re.compile(pattern) + self.process = self.process_re + elif rangestr != None: + if '\\' in rangestr: + raise Exception("Backslashes are not allowed in NameRanges") + self.process = self.process_range + self.re = re.compile('^' + re.sub(self.range_finder, '(\d+)', + rangestr)) + dmatcher = re.compile(re.sub(self.range_finder, + r'\[\[([\d\-,]+)\]\]', + rangestr)) + self.dranges = [PackedDigitRange(x) + for x in dmatcher.match(rangestr).groups()] + else: + raise Exception("No pattern or range given") + + def process_range(self, name): + match = self.re.match(name) + if not match: + return None + digits = match.groups() + for i in range(len(digits)): + if not self.dranges[i].includes(digits[i]): + return None + return self.groups + + def process_re(self, name): + match = self.re.match(name) + if not match: + return None + ret = list() + sub = match.groups() + for group in self.groups: + newg = group + for idx in range(len(sub)): + newg = newg.replace('$%s' % (idx + 1), sub[idx]) + ret.append(newg) + return ret + + +class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked): + __identifier__ = None + + def __init__(self, filename, fam): + Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam) + self.patterns = [] + self.logger = logging.getLogger(self.__class__.__name__) + + def Index(self): + Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self) + self.patterns = [] + for entry in self.xdata.xpath('//GroupPattern'): + try: + groups = [g.text for g in entry.findall('Group')] + for pat_ent in entry.findall('NamePattern'): + pat = pat_ent.text + self.patterns.append(PatternMap(pat, None, groups)) + for range_ent in entry.findall('NameRange'): + rng = range_ent.text + self.patterns.append(PatternMap(None, rng, groups)) + except: + self.logger.error("GroupPatterns: Failed to initialize pattern " + "%s" % entry.get('pattern')) + + def process_patterns(self, hostname): + ret = [] + for pattern in self.patterns: + try: + gn = pattern.process(hostname) + if gn is not None: + ret.extend(gn) + except: + self.logger.error("GroupPatterns: Failed to process pattern %s " + "for %s" % (pattern.pattern, hostname), + exc_info=1) + return ret + + +class GroupPatterns(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + name = "GroupPatterns" + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.config = PatternFile(self.data + '/config.xml', + core.fam) + + def get_additional_groups(self, metadata): + return self.config.process_patterns(metadata.hostname) diff --git a/src/lib/Bcfg2/Server/Plugins/Guppy.py b/src/lib/Bcfg2/Server/Plugins/Guppy.py new file mode 100644 index 000000000..eea92f30f --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Guppy.py @@ -0,0 +1,62 @@ +""" +This plugin is used to trace memory leaks within the bcfg2-server +process using Guppy. By default the remote debugger is started +when this plugin is enabled. The debugger can be shutoff in a running +process using "bcfg2-admin xcmd Guppy.Disable" and reenabled using +"bcfg2-admin xcmd Guppy.Enable". + +To attach the console run: + +python -c "from guppy import hpy;hpy().monitor()" + +For example: + +# python -c "from guppy import hpy;hpy().monitor()" +<Monitor> +*** Connection 1 opened *** +<Monitor> lc +CID PID ARGV + 1 25063 ['/usr/sbin/bcfg2-server', '-D', '/var/run/bcfg2-server.pid'] +<Monitor> sc 1 +Remote connection 1. To return to Monitor, type <Ctrl-C> or .<RETURN> +<Annex> int +Remote interactive console. To return to Annex, type '-'. +>>> hp.heap() +... + + +""" +import re +import Bcfg2.Server.Plugin + +class Guppy(Bcfg2.Server.Plugin.Plugin): + """Guppy is a debugging plugin to help trace memory leaks""" + name = 'Guppy' + __author__ = 'bcfg-dev@mcs.anl.gov' + + experimental = True + __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable','Disable'] + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + + self.Enable() + + def Enable(self): + """Enable remote debugging""" + try: + from guppy.heapy import Remote + Remote.on() + except: + self.logger.error("Failed to create Heapy context") + raise Bcfg2.Server.Plugin.PluginInitError + + def Disable(self): + """Disable remote debugging""" + try: + from guppy.heapy import Remote + Remote.off() + except: + self.logger.error("Failed to disable Heapy") + raise Bcfg2.Server.Plugin.PluginInitError + diff --git a/src/lib/Bcfg2/Server/Plugins/Hg.py b/src/lib/Bcfg2/Server/Plugins/Hg.py new file mode 100644 index 000000000..0c3537613 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Hg.py @@ -0,0 +1,45 @@ +import os +from mercurial import ui, hg +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Mercurial') + +class Hg(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Mercurial is a version plugin for dealing with Bcfg2 repository.""" + name = 'Mercurial' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # path to hg directory for Bcfg2 repo + hg_dir = "%s/.hg" % datastore + + # Read changeset from bcfg2 repo + if os.path.isdir(hg_dir): + self.get_revision() + else: + logger.error("%s is not present." % hg_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized hg plugin with hg directory = %s" % hg_dir) + + def get_revision(self): + """Read hg revision information for the Bcfg2 repository.""" + try: + repo_path = "%s/" % self.datastore + repo = hg.repository(ui.ui(), repo_path) + tip = repo.changelog.tip() + revision = repo.changelog.rev(tip) + except: + logger.error("Failed to read hg repository; disabling mercurial support") + raise Bcfg2.Server.Plugin.PluginInitError + return revision + diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py new file mode 100644 index 000000000..e9c1c1cff --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Hostbase.py @@ -0,0 +1,593 @@ +""" +This file provides the Hostbase plugin. +It manages dns/dhcp/nis host information +""" + +import os +os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings' +from lxml.etree import Element, SubElement +import Bcfg2.Server.Plugin +from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError +from time import strftime +from sets import Set +from django.template import Context, loader +from django.db import connection +import re +# Compatibility imports +from Bcfg2.Bcfg2Py3k import StringIO + + +class Hostbase(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure, + Bcfg2.Server.Plugin.Generator): + """The Hostbase plugin handles host/network info.""" + name = 'Hostbase' + __author__ = 'bcfg-dev@mcs.anl.gov' + filepath = '/my/adm/hostbase/files/bind' + + def __init__(self, core, datastore): + + self.ready = False + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + Bcfg2.Server.Plugin.Generator.__init__(self) + files = ['zone.tmpl', + 'reversesoa.tmpl', + 'named.tmpl', + 'reverseappend.tmpl', + 'dhcpd.tmpl', + 'hosts.tmpl', + 'hostsappend.tmpl'] + self.filedata = {} + self.dnsservers = [] + self.dhcpservers = [] + self.templates = {'zone': loader.get_template('zone.tmpl'), + 'reversesoa': loader.get_template('reversesoa.tmpl'), + 'named': loader.get_template('named.tmpl'), + 'namedviews': loader.get_template('namedviews.tmpl'), + 'reverseapp': loader.get_template('reverseappend.tmpl'), + 'dhcp': loader.get_template('dhcpd.tmpl'), + 'hosts': loader.get_template('hosts.tmpl'), + 'hostsapp': loader.get_template('hostsappend.tmpl'), + } + self.Entries['ConfigFile'] = {} + self.__rmi__ = ['rebuildState'] + try: + self.rebuildState(None) + except: + raise PluginInitError + + def FetchFile(self, entry, metadata): + """Return prebuilt file data.""" + fname = entry.get('name').split('/')[-1] + if not fname in self.filedata: + raise PluginExecutionError + perms = {'owner': 'root', + 'group': 'root', + 'perms': '644'} + [entry.attrib.__setitem__(key, value) + for (key, value) in list(perms.items())] + entry.text = self.filedata[fname] + + def BuildStructures(self, metadata): + """Build hostbase bundle.""" + if metadata.hostname not in self.dnsservers or metadata.hostname not in self.dhcpservers: + return [] + output = Element("Bundle", name='hostbase') + if metadata.hostname in self.dnsservers: + for configfile in self.Entries['ConfigFile']: + if re.search('/etc/bind/', configfile): + SubElement(output, "ConfigFile", name=configfile) + if metadata.hostname in self.dhcpservers: + SubElement(output, "ConfigFile", name="/etc/dhcp3/dhcpd.conf") + return [output] + + def rebuildState(self, _): + """Pre-cache all state information for hostbase config files + callable as an XMLRPC function. + + """ + self.buildZones() + self.buildDHCP() + self.buildHosts() + self.buildHostsLPD() + self.buildPrinters() + self.buildNetgroups() + return True + + def buildZones(self): + """Pre-build and stash zone files.""" + cursor = connection.cursor() + + cursor.execute("SELECT id, serial FROM hostbase_zone") + zones = cursor.fetchall() + + for zone in zones: + # update the serial number for all zone files + todaydate = (strftime('%Y%m%d')) + try: + if todaydate == str(zone[1])[:8]: + serial = zone[1] + 1 + else: + serial = int(todaydate) * 100 + except (KeyError): + serial = int(todaydate) * 100 + cursor.execute("""UPDATE hostbase_zone SET serial = \'%s\' WHERE id = \'%s\'""" % (str(serial), zone[0])) + + cursor.execute("SELECT * FROM hostbase_zone WHERE zone NOT LIKE \'%%.rev\'") + zones = cursor.fetchall() + + iplist = [] + hosts = {} + + for zone in zones: + zonefile = StringIO() + externalzonefile = StringIO() + cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z + INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id + WHERE z.zone_id = \'%s\'""" % zone[0]) + nameservers = cursor.fetchall() + cursor.execute("""SELECT i.ip_addr FROM hostbase_zone_addresses z + INNER JOIN hostbase_zoneaddress i ON z.zoneaddress_id = i.id + WHERE z.zone_id = \'%s\'""" % zone[0]) + addresses = cursor.fetchall() + cursor.execute("""SELECT m.priority, m.mx FROM hostbase_zone_mxs z + INNER JOIN hostbase_mx m ON z.mx_id = m.id + WHERE z.zone_id = \'%s\'""" % zone[0]) + mxs = cursor.fetchall() + context = Context({ + 'zone': zone, + 'nameservers': nameservers, + 'addresses': addresses, + 'mxs': mxs + }) + zonefile.write(self.templates['zone'].render(context)) + externalzonefile.write(self.templates['zone'].render(context)) + + querystring = """SELECT h.hostname, p.ip_addr, + n.name, c.cname, m.priority, m.mx, n.dns_view + FROM (((((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id) + INNER JOIN hostbase_name_mxs x ON n.id = x.name_id) + INNER JOIN hostbase_mx m ON m.id = x.mx_id) + LEFT JOIN hostbase_cname c ON n.id = c.name_id + WHERE n.name LIKE '%%%%%s' + AND h.status = 'active' + ORDER BY h.hostname, n.name, p.ip_addr + """ % zone[1] + cursor.execute(querystring) + zonehosts = cursor.fetchall() + prevhost = (None, None, None, None) + cnames = StringIO() + cnamesexternal = StringIO() + for host in zonehosts: + if not host[2].split(".", 1)[1] == zone[1]: + zonefile.write(cnames.getvalue()) + externalzonefile.write(cnamesexternal.getvalue()) + cnames = StringIO() + cnamesexternal = StringIO() + continue + if not prevhost[1] == host[1] or not prevhost[2] == host[2]: + zonefile.write(cnames.getvalue()) + externalzonefile.write(cnamesexternal.getvalue()) + cnames = StringIO() + cnamesexternal = StringIO() + zonefile.write("%-32s%-10s%-32s\n" % + (host[2].split(".", 1)[0], 'A', host[1])) + zonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + if host[6] == 'global': + externalzonefile.write("%-32s%-10s%-32s\n" % + (host[2].split(".", 1)[0], 'A', host[1])) + externalzonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + elif not prevhost[5] == host[5]: + zonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + if host[6] == 'global': + externalzonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + + if host[3]: + try: + if host[3].split(".", 1)[1] == zone[1]: + cnames.write("%-32s%-10s%-32s\n" % + (host[3].split(".", 1)[0], + 'CNAME', host[2].split(".", 1)[0])) + if host[6] == 'global': + cnamesexternal.write("%-32s%-10s%-32s\n" % + (host[3].split(".", 1)[0], + 'CNAME', host[2].split(".", 1)[0])) + else: + cnames.write("%-32s%-10s%-32s\n" % + (host[3] + ".", + 'CNAME', + host[2].split(".", 1)[0])) + if host[6] == 'global': + cnamesexternal.write("%-32s%-10s%-32s\n" % + (host[3] + ".", + 'CNAME', + host[2].split(".", 1)[0])) + + except: + pass + prevhost = host + zonefile.write(cnames.getvalue()) + externalzonefile.write(cnamesexternal.getvalue()) + zonefile.write("\n\n%s" % zone[9]) + externalzonefile.write("\n\n%s" % zone[9]) + self.filedata[zone[1]] = zonefile.getvalue() + self.filedata[zone[1] + ".external"] = externalzonefile.getvalue() + zonefile.close() + externalzonefile.close() + self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone[1])] = self.FetchFile + self.Entries['ConfigFile']["%s/%s.external" % (self.filepath, zone[1])] = self.FetchFile + + cursor.execute("SELECT * FROM hostbase_zone WHERE zone LIKE \'%%.rev\' AND zone <> \'.rev\'") + reversezones = cursor.fetchall() + + reversenames = [] + for reversezone in reversezones: + cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z + INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id + WHERE z.zone_id = \'%s\'""" % reversezone[0]) + reverse_nameservers = cursor.fetchall() + + context = Context({ + 'inaddr': reversezone[1].rstrip('.rev'), + 'zone': reversezone, + 'nameservers': reverse_nameservers, + }) + + self.filedata[reversezone[1]] = self.templates['reversesoa'].render(context) + self.filedata[reversezone[1] + '.external'] = self.templates['reversesoa'].render(context) + self.filedata[reversezone[1]] += reversezone[9] + self.filedata[reversezone[1] + '.external'] += reversezone[9] + + subnet = reversezone[1].split(".") + subnet.reverse() + reversenames.append((reversezone[1].rstrip('.rev'), ".".join(subnet[1:]))) + + for filename in reversenames: + cursor.execute(""" + SELECT DISTINCT h.hostname, p.ip_addr, n.dns_view FROM ((hostbase_host h + INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON n.ip_id = p.id + WHERE p.ip_addr LIKE '%s%%%%' AND h.status = 'active' ORDER BY p.ip_addr + """ % filename[1]) + reversehosts = cursor.fetchall() + zonefile = StringIO() + externalzonefile = StringIO() + if len(filename[0].split(".")) == 2: + originlist = [] + [originlist.append((".".join([ip[1].split(".")[2], filename[0]]), + ".".join([filename[1], ip[1].split(".")[2]]))) + for ip in reversehosts + if (".".join([ip[1].split(".")[2], filename[0]]), + ".".join([filename[1], ip[1].split(".")[2]])) not in originlist] + for origin in originlist: + hosts = [(host[1].split("."), host[0]) + for host in reversehosts + if host[1].rstrip('0123456789').rstrip('.') == origin[1]] + hosts_external = [(host[1].split("."), host[0]) + for host in reversehosts + if (host[1].rstrip('0123456789').rstrip('.') == origin[1] + and host[2] == 'global')] + context = Context({ + 'hosts': hosts, + 'inaddr': origin[0], + 'fileorigin': filename[0], + }) + zonefile.write(self.templates['reverseapp'].render(context)) + context = Context({ + 'hosts': hosts_external, + 'inaddr': origin[0], + 'fileorigin': filename[0], + }) + externalzonefile.write(self.templates['reverseapp'].render(context)) + else: + originlist = [filename[0]] + hosts = [(host[1].split("."), host[0]) + for host in reversehosts + if (host[1].split("."), host[0]) not in hosts] + hosts_external = [(host[1].split("."), host[0]) + for host in reversehosts + if ((host[1].split("."), host[0]) not in hosts_external + and host[2] == 'global')] + context = Context({ + 'hosts': hosts, + 'inaddr': filename[0], + 'fileorigin': None, + }) + zonefile.write(self.templates['reverseapp'].render(context)) + context = Context({ + 'hosts': hosts_external, + 'inaddr': filename[0], + 'fileorigin': None, + }) + externalzonefile.write(self.templates['reverseapp'].render(context)) + self.filedata['%s.rev' % filename[0]] += zonefile.getvalue() + self.filedata['%s.rev.external' % filename[0]] += externalzonefile.getvalue() + zonefile.close() + externalzonefile.close() + self.Entries['ConfigFile']['%s/%s.rev' % (self.filepath, filename[0])] = self.FetchFile + self.Entries['ConfigFile']['%s/%s.rev.external' % (self.filepath, filename[0])] = self.FetchFile + + ## here's where the named.conf file gets written + context = Context({ + 'zones': zones, + 'reverses': reversenames, + }) + self.filedata['named.conf'] = self.templates['named'].render(context) + self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf'] = self.FetchFile + self.filedata['named.conf.views'] = self.templates['namedviews'].render(context) + self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf.views'] = self.FetchFile + + def buildDHCP(self): + """Pre-build dhcpd.conf and stash in the filedata table.""" + + # fetches all the hosts with DHCP == True + cursor = connection.cursor() + cursor.execute(""" + SELECT hostname, mac_addr, ip_addr + FROM (hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip ip ON i.id = ip.interface_id + WHERE i.dhcp=1 AND h.status='active' AND i.mac_addr <> '' + AND i.mac_addr <> 'float' AND i.mac_addr <> 'unknown' + ORDER BY h.hostname, i.mac_addr + """) + + dhcphosts = cursor.fetchall() + count = 0 + hosts = [] + hostdata = [dhcphosts[0][0], dhcphosts[0][1], dhcphosts[0][2]] + if len(dhcphosts) > 1: + for x in range(1, len(dhcphosts)): + # if an interface has 2 or more ip addresses + # adds the ip to the current interface + if hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0] and hostdata[1] == dhcphosts[x][1]: + hostdata[2] = ", ".join([hostdata[2], dhcphosts[x][2]]) + # if a host has 2 or more interfaces + # writes the current one and grabs the next + elif hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0]: + hosts.append(hostdata) + count += 1 + hostdata = ["-".join([dhcphosts[x][0], str(count)]), dhcphosts[x][1], dhcphosts[x][2]] + # new host found, writes current data to the template + else: + hosts.append(hostdata) + count = 0 + hostdata = [dhcphosts[x][0], dhcphosts[x][1], dhcphosts[x][2]] + #makes sure the last of the data gets written out + if hostdata not in hosts: + hosts.append(hostdata) + + context = Context({ + 'hosts': hosts, + 'numips': len(hosts), + }) + + self.filedata['dhcpd.conf'] = self.templates['dhcp'].render(context) + self.Entries['ConfigFile']['/my/adm/hostbase/files/dhcpd.conf'] = self.FetchFile + + def buildHosts(self): + """Pre-build and stash /etc/hosts file.""" + + append_data = [] + + cursor = connection.cursor() + cursor.execute(""" + SELECT hostname FROM hostbase_host ORDER BY hostname + """) + hostbase = cursor.fetchall() + domains = [host[0].split(".", 1)[1] for host in hostbase] + domains_set = Set(domains) + domain_data = [(domain, domains.count(domain)) for domain in domains_set] + domain_data.sort() + + cursor.execute(""" + SELECT ip_addr FROM hostbase_ip ORDER BY ip_addr + """) + ips = cursor.fetchall() + three_octets = [ip[0].rstrip('0123456789').rstrip('.') \ + for ip in ips] + three_octets_set = Set(three_octets) + three_octets_data = [(octet, three_octets.count(octet)) \ + for octet in three_octets_set] + three_octets_data.sort() + + for three_octet in three_octets_data: + querystring = """SELECT h.hostname, h.primary_user, + p.ip_addr, n.name, c.cname + FROM (((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id) + LEFT JOIN hostbase_cname c ON n.id = c.name_id + WHERE p.ip_addr LIKE \'%s.%%%%\' AND h.status = 'active'""" % three_octet[0] + cursor.execute(querystring) + tosort = list(cursor.fetchall()) + tosort.sort(lambda x, y: cmp(int(x[2].split(".")[-1]), int(y[2].split(".")[-1]))) + append_data.append((three_octet, tuple(tosort))) + + two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets] + two_octets_set = Set(two_octets) + two_octets_data = [(octet, two_octets.count(octet)) + for octet in two_octets_set] + two_octets_data.sort() + + context = Context({ + 'domain_data': domain_data, + 'three_octets_data': three_octets_data, + 'two_octets_data': two_octets_data, + 'three_octets': three_octets, + 'num_ips': len(three_octets), + }) + + self.filedata['hosts'] = self.templates['hosts'].render(context) + + for subnet in append_data: + ips = [] + simple = True + namelist = [name.split('.', 1)[0] for name in [subnet[1][0][3]]] + cnamelist = [] + if subnet[1][0][4]: + cnamelist.append(subnet[1][0][4].split('.', 1)[0]) + simple = False + appenddata = subnet[1][0] + for ip in subnet[1][1:]: + if appenddata[2] == ip[2]: + namelist.append(ip[3].split('.', 1)[0]) + if ip[4]: + cnamelist.append(ip[4].split('.', 1)[0]) + simple = False + appenddata = ip + else: + if appenddata[0] == ip[0]: + simple = False + ips.append((appenddata[2], appenddata[0], Set(namelist), + cnamelist, simple, appenddata[1])) + appenddata = ip + simple = True + namelist = [ip[3].split('.', 1)[0]] + cnamelist = [] + if ip[4]: + cnamelist.append(ip[4].split('.', 1)[0]) + simple = False + ips.append((appenddata[2], appenddata[0], Set(namelist), + cnamelist, simple, appenddata[1])) + context = Context({ + 'subnet': subnet[0], + 'ips': ips, + }) + self.filedata['hosts'] += self.templates['hostsapp'].render(context) + self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile + + def buildPrinters(self): + """The /mcs/etc/printers.data file""" + header = """# This file is automatically generated. DO NOT EDIT IT! +# +Name Room User Type Notes +============== ========== ============================== ======================== ==================== +""" + + cursor = connection.cursor() + # fetches all the printers from the database + cursor.execute(""" + SELECT printq, location, primary_user, comments + FROM hostbase_host + WHERE whatami='printer' AND printq <> '' AND status = 'active' + ORDER BY printq + """) + printers = cursor.fetchall() + + printersfile = header + for printer in printers: + # splits up the printq line and gets the + # correct description out of the comments section + temp = printer[3].split('\n') + for printq in re.split(',[ ]*', printer[0]): + if len(temp) > 1: + printersfile += ("%-16s%-12s%-32s%-26s%s\n" % + (printq, printer[1], printer[2], temp[1], temp[0])) + else: + printersfile += ("%-16s%-12s%-32s%-26s%s\n" % + (printq, printer[1], printer[2], '', printer[3])) + self.filedata['printers.data'] = printersfile + self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile + + def buildHostsLPD(self): + """Creates the /mcs/etc/hosts.lpd file""" + + # this header needs to be changed to be more generic + header = """+@machines ++@all-machines +achilles.ctd.anl.gov +raven.ops.anl.gov +seagull.hr.anl.gov +parrot.ops.anl.gov +condor.ops.anl.gov +delphi.esh.anl.gov +anlcv1.ctd.anl.gov +anlvms.ctd.anl.gov +olivia.ctd.anl.gov\n\n""" + + cursor = connection.cursor() + cursor.execute(""" + SELECT hostname FROM hostbase_host WHERE netgroup=\"red\" AND status = 'active' + ORDER BY hostname""") + redmachines = list(cursor.fetchall()) + cursor.execute(""" + SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id + WHERE netgroup=\"red\" AND n.only=1 AND h.status = 'active' + """) + redmachines.extend(list(cursor.fetchall())) + cursor.execute(""" + SELECT hostname FROM hostbase_host WHERE netgroup=\"win\" AND status = 'active' + ORDER BY hostname""") + winmachines = list(cursor.fetchall()) + cursor.execute(""" + SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id + WHERE netgroup=\"win\" AND n.only=1 AND h.status = 'active' + """) + winmachines.__add__(list(cursor.fetchall())) + hostslpdfile = header + for machine in redmachines: + hostslpdfile += machine[0] + "\n" + hostslpdfile += "\n" + for machine in winmachines: + hostslpdfile += machine[0] + "\n" + self.filedata['hosts.lpd'] = hostslpdfile + self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile + + def buildNetgroups(self): + """Makes the *-machine files""" + header = """################################################################### +# This file lists hosts in the '%s' machine netgroup, it is +# automatically generated. DO NOT EDIT THIS FILE! +# +# Number of hosts in '%s' machine netgroup: %i +#\n\n""" + + cursor = connection.cursor() + # fetches all the hosts that with valid netgroup entries + cursor.execute(""" + SELECT h.hostname, n.name, h.netgroup, n.only FROM ((hostbase_host h + INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id + WHERE h.netgroup <> '' AND h.netgroup <> 'none' AND h.status = 'active' + ORDER BY h.netgroup, h.hostname + """) + nameslist = cursor.fetchall() + # gets the first host and initializes the hash + hostdata = nameslist[0] + netgroups = {hostdata[2]: [hostdata[0]]} + for row in nameslist: + # if new netgroup, create it + if row[2] not in netgroups: + netgroups.update({row[2]: []}) + # if it belongs in the netgroup and has multiple interfaces, put them in + if hostdata[0] == row[0] and row[3]: + netgroups[row[2]].append(row[1]) + hostdata = row + # if its a new host, write the old one to the hash + elif hostdata[0] != row[0]: + netgroups[row[2]].append(row[0]) + hostdata = row + + for netgroup in netgroups: + fileoutput = StringIO() + fileoutput.write(header % (netgroup, netgroup, len(netgroups[netgroup]))) + for each in netgroups[netgroup]: + fileoutput.write(each + "\n") + self.filedata['%s-machines' % netgroup] = fileoutput.getvalue() + fileoutput.close() + self.Entries['ConfigFile']['/my/adm/hostbase/makenets/machines/%s-machines' % netgroup] = self.FetchFile + + cursor.execute(""" + UPDATE hostbase_host SET dirty=0 + """) diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py new file mode 100644 index 000000000..29abf5b13 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py @@ -0,0 +1,245 @@ +import imp +import logging +import sys +import time +import traceback +import Bcfg2.Options +import Bcfg2.Server.Plugin + +logger = logging.getLogger('Bcfg2.Plugins.Ldap') + +try: + import ldap +except: + logger.error("Unable to load ldap module. Is python-ldap installed?") + raise ImportError + +# time in seconds between retries after failed LDAP connection +RETRY_DELAY = 5 +# how many times to try reaching the LDAP server if a connection is broken +# at the very minimum, one retry is needed to handle a restarted LDAP daemon +RETRY_COUNT = 3 + +SCOPE_MAP = { + "base" : ldap.SCOPE_BASE, + "one" : ldap.SCOPE_ONELEVEL, + "sub" : ldap.SCOPE_SUBTREE, +} + +LDAP_QUERIES = [] + +def register_query(query): + LDAP_QUERIES.append(query) + +class ConfigFile(Bcfg2.Server.Plugin.FileBacked): + """ + Config file for the Ldap plugin + + The config file cannot be 'parsed' in the traditional sense as we would + need some serious type checking ugliness to just get the LdapQuery + subclasses. The alternative would be to have the user create a list with + a predefined name that contains all queries. + The approach implemented here is having the user call a registering + decorator that updates a global variable in this module. + """ + def __init__(self, filename, fam): + self.filename = filename + Bcfg2.Server.Plugin.FileBacked.__init__(self, self.filename) + fam.AddMonitor(self.filename, self) + + def Index(self): + """ + Reregisters the queries in the config file + + The config will take care of actually registering the queries, + so we just load it once and don't keep it. + """ + global LDAP_QUERIES + LDAP_QUERIES = [] + imp.load_source("ldap_cfg", self.filename) + +class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector): + """ + The Ldap plugin allows adding data from an LDAP server to your metadata. + """ + name = "Ldap" + experimental = True + debug_flag = False + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.config = ConfigFile(self.data + "/config.py", core.fam) + + def debug_log(self, message, flag = None): + if (flag is None) and self.debug_flag or flag: + self.logger.error(message) + + def get_additional_data(self, metadata): + query = None + try: + data = {} + self.debug_log("LdapPlugin debug: found queries " + + str(LDAP_QUERIES)) + for QueryClass in LDAP_QUERIES: + query = QueryClass() + if query.is_applicable(metadata): + self.debug_log("LdapPlugin debug: processing query '" + + query.name + "'") + data[query.name] = query.get_result(metadata) + else: + self.debug_log("LdapPlugin debug: query '" + query.name + + "' not applicable to host '" + metadata.hostname + "'") + return data + except Exception: + if hasattr(query, "name"): + Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " + + "Exception during processing of query named '" + + str(query.name) + + "', query results will be empty" + + " and may cause bind failures") + for line in traceback.format_exception(sys.exc_info()[0], + sys.exc_info()[1], + sys.exc_info()[2]): + Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " + + line.replace("\n", "")) + return {} + +class LdapConnection(object): + """ + Connection to an LDAP server. + """ + def __init__(self, host = "localhost", port = 389, + binddn = None, bindpw = None): + self.host = host + self.port = port + self.binddn = binddn + self.bindpw = bindpw + self.conn = None + + def __del__(self): + if self.conn: + self.conn.unbind() + + def init_conn(self): + self.conn = ldap.initialize(self.url) + if self.binddn is not None and self.bindpw is not None: + self.conn.simple_bind_s(self.binddn, self.bindpw) + + def run_query(self, query): + result = None + for attempt in range(RETRY_COUNT + 1): + if attempt >= 1: + Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " + + "LDAP server down (retry " + str(attempt) + "/" + + str(RETRY_COUNT) + ")") + try: + if not self.conn: + self.init_conn() + result = self.conn.search_s( + query.base, + SCOPE_MAP[query.scope], + query.filter, + query.attrs, + ) + break + except ldap.SERVER_DOWN: + self.conn = None + time.sleep(RETRY_DELAY) + return result + + @property + def url(self): + return "ldap://" + self.host + ":" + str(self.port) + +class LdapQuery(object): + """ + Query referencing an LdapConnection and providing several + methods for query manipulation. + """ + + name = "unknown" + base = "" + scope = "sub" + filter = "(objectClass=*)" + attrs = None + connection = None + result = None + + def __unicode__(self): + return "LdapQuery:" + self.name + + def is_applicable(self, metadata): + """ + Overrideable method to determine if the query is to be executed for + the given metadata object. + Defaults to true. + """ + return True + + def prepare_query(self, metadata): + """ + Overrideable method to alter the query based on metadata. + Defaults to doing nothing. + + In most cases, you will do something like + + self.filter = "(cn=" + metadata.hostname + ")" + + here. + """ + pass + + def process_result(self, metadata): + """ + Overrideable method to post-process the query result. + Defaults to returning the unaltered result. + """ + return self.result + + def get_result(self, metadata): + """ + Method to handle preparing, executing and processing the query. + """ + if isinstance(self.connection, LdapConnection): + self.prepare_query(metadata) + self.result = self.connection.run_query(self) + self.result = self.process_result(metadata) + return self.result + else: + Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " + + "No valid connection defined for query " + str(self)) + return None + +class LdapSubQuery(LdapQuery): + """ + SubQueries are meant for internal use only and are not added + to the metadata object. They are useful for situations where + you need to run more than one query to obtain some data. + """ + def prepare_query(self, metadata, **kwargs): + """ + Overrideable method to alter the query based on metadata. + Defaults to doing nothing. + """ + pass + + def process_result(self, metadata, **kwargs): + """ + Overrideable method to post-process the query result. + Defaults to returning the unaltered result. + """ + return self.result + + def get_result(self, metadata, **kwargs): + """ + Method to handle preparing, executing and processing the query. + """ + if isinstance(self.connection, LdapConnection): + self.prepare_query(metadata, **kwargs) + self.result = self.connection.run_query(self) + return self.process_result(metadata, **kwargs) + else: + Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " + + "No valid connection defined for query " + str(self)) + return None diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py new file mode 100644 index 000000000..970126b80 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py @@ -0,0 +1,866 @@ +""" +This file stores persistent metadata for the Bcfg2 Configuration Repository. +""" + +import copy +import fcntl +import lxml.etree +import os +import os.path +import socket +import sys +import time + +import Bcfg2.Server.FileMonitor +import Bcfg2.Server.Plugin + + +def locked(fd): + """Aquire a lock on a file""" + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + return True + return False + + +class MetadataConsistencyError(Exception): + """This error gets raised when metadata is internally inconsistent.""" + pass + + +class MetadataRuntimeError(Exception): + """This error is raised when the metadata engine + is called prior to reading enough data. + """ + pass + + +class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked): + """Handles xml config files and all XInclude statements""" + def __init__(self, metadata, watch_clients, basefile): + Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, + os.path.join(metadata.data, + basefile), + metadata.core.fam) + self.metadata = metadata + self.basefile = basefile + self.should_monitor = watch_clients + self.data = None + self.basedata = None + self.basedir = metadata.data + self.logger = metadata.logger + self.pseudo_monitor = isinstance(metadata.core.fam, + Bcfg2.Server.FileMonitor.Pseudo) + + @property + def xdata(self): + if not self.data: + raise MetadataRuntimeError + return self.data + + @property + def base_xdata(self): + if not self.basedata: + raise MetadataRuntimeError + return self.basedata + + def add_monitor(self, fpath, fname): + """Add a fam monitor for an included file""" + if self.should_monitor: + self.metadata.core.fam.AddMonitor(fpath, self.metadata) + self.extras.append(fname) + + def load_xml(self): + """Load changes from XML""" + try: + xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile)) + except lxml.etree.XMLSyntaxError: + self.logger.error('Failed to parse %s' % self.basefile) + return + self.extras = [] + self.basedata = copy.copy(xdata) + self._follow_xincludes(xdata=xdata) + if self.extras: + try: + xdata.xinclude() + except lxml.etree.XIncludeError: + self.logger.error("Failed to process XInclude for file %s" % + self.basefile) + self.data = xdata + + def write(self): + """Write changes to xml back to disk.""" + self.write_xml(os.path.join(self.basedir, self.basefile), + self.basedata) + + def write_xml(self, fname, xmltree): + """Write changes to xml back to disk.""" + tmpfile = "%s.new" % fname + try: + datafile = open(tmpfile, 'w') + except IOError: + e = sys.exc_info()[1] + self.logger.error("Failed to write %s: %s" % (tmpfile, e)) + raise MetadataRuntimeError + # prep data + dataroot = xmltree.getroot() + newcontents = lxml.etree.tostring(dataroot, pretty_print=True) + + fd = datafile.fileno() + while locked(fd) == True: + pass + try: + datafile.write(newcontents) + except: + fcntl.lockf(fd, fcntl.LOCK_UN) + self.logger.error("Metadata: Failed to write new xml data to %s" % + tmpfile, exc_info=1) + os.unlink(tmpfile) + raise MetadataRuntimeError + datafile.close() + + # check if clients.xml is a symlink + if os.path.islink(fname): + fname = os.readlink(fname) + + try: + os.rename(tmpfile, fname) + except: + self.logger.error("Metadata: Failed to rename %s" % tmpfile) + raise MetadataRuntimeError + + def find_xml_for_xpath(self, xpath): + """Find and load xml file containing the xpath query""" + if self.pseudo_monitor: + # Reload xml if we don't have a real monitor + self.load_xml() + cli = self.basedata.xpath(xpath) + if len(cli) > 0: + return {'filename': os.path.join(self.basedir, self.basefile), + 'xmltree': self.basedata, + 'xquery': cli} + else: + """Try to find the data in included files""" + for included in self.extras: + try: + xdata = lxml.etree.parse(os.path.join(self.basedir, + included)) + cli = xdata.xpath(xpath) + if len(cli) > 0: + return {'filename': os.path.join(self.basedir, + included), + 'xmltree': xdata, + 'xquery': cli} + except lxml.etree.XMLSyntaxError: + self.logger.error('Failed to parse %s' % (included)) + return {} + + def HandleEvent(self, event): + """Handle fam events""" + filename = event.filename.split('/')[-1] + if filename in self.extras: + if event.code2str() == 'exists': + return False + elif filename != self.basefile: + return False + if event.code2str() == 'endExist': + return False + self.load_xml() + return True + + +class ClientMetadata(object): + """This object contains client metadata.""" + def __init__(self, client, profile, groups, bundles, + aliases, addresses, categories, uuid, password, query): + self.hostname = client + self.profile = profile + self.bundles = bundles + self.aliases = aliases + self.addresses = addresses + self.groups = groups + self.categories = categories + self.uuid = uuid + self.password = password + self.connectors = [] + self.query = query + + def inGroup(self, group): + """Test to see if client is a member of group.""" + return group in self.groups + + def group_in_category(self, category): + for grp in self.query.all_groups_in_category(category): + if grp in self.groups: + return grp + return '' + + +class MetadataQuery(object): + def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category): + # resolver is set later + self.by_name = by_name + self.names_by_groups = by_groups + self.names_by_profiles = by_profiles + self.all_clients = get_clients + self.all_groups = all_groups + self.all_groups_in_category = all_groups_in_category + + def by_groups(self, groups): + return [self.by_name(name) for name in self.names_by_groups(groups)] + + def by_profiles(self, profiles): + return [self.by_name(name) for name in self.names_by_profiles(profiles)] + + def all(self): + return [self.by_name(name) for name in self.all_clients()] + + +class Metadata(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Metadata, + Bcfg2.Server.Plugin.Statistics): + """This class contains data for bcfg2 server metadata.""" + __author__ = 'bcfg-dev@mcs.anl.gov' + name = "Metadata" + sort_order = 500 + + def __init__(self, core, datastore, watch_clients=True): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Metadata.__init__(self) + Bcfg2.Server.Plugin.Statistics.__init__(self) + if watch_clients: + try: + core.fam.AddMonitor(os.path.join(self.data, "groups.xml"), self) + core.fam.AddMonitor(os.path.join(self.data, "clients.xml"), self) + except: + print("Unable to add file monitor for groups.xml or clients.xml") + raise Bcfg2.Server.Plugin.PluginInitError + + self.clients_xml = XMLMetadataConfig(self, watch_clients, 'clients.xml') + self.groups_xml = XMLMetadataConfig(self, watch_clients, 'groups.xml') + self.states = {} + if watch_clients: + self.states = {"groups.xml": False, + "clients.xml": False} + self.addresses = {} + self.auth = dict() + self.clients = {} + self.aliases = {} + self.groups = {} + self.cgroups = {} + self.public = [] + self.private = [] + self.profiles = [] + self.categories = {} + self.bad_clients = {} + self.uuid = {} + self.secure = [] + self.floating = [] + self.passwords = {} + self.session_cache = {} + self.default = None + self.pdirty = False + self.extra = {'groups.xml': [], + 'clients.xml': []} + self.password = core.password + self.query = MetadataQuery(core.build_metadata, + lambda: list(self.clients.keys()), + self.get_client_names_by_groups, + self.get_client_names_by_profiles, + self.get_all_group_names, + self.get_all_groups_in_category) + + @classmethod + def init_repo(cls, repo, groups, os_selection, clients): + path = os.path.join(repo, cls.name) + os.makedirs(path) + open(os.path.join(repo, "Metadata", "groups.xml"), + "w").write(groups % os_selection) + open(os.path.join(repo, "Metadata", "clients.xml"), + "w").write(clients % socket.getfqdn()) + + def get_groups(self): + '''return groups xml tree''' + groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml")) + root = groups_tree.getroot() + return root + + def _search_xdata(self, tag, name, tree, alias=False): + for node in tree.findall("//%s" % tag): + if node.get("name") == name: + return node + elif alias: + for child in node: + if (child.tag == "Alias" and + child.attrib["name"] == name): + return node + return None + + def search_group(self, group_name, tree): + """Find a group.""" + return self._search_xdata("Group", group_name, tree) + + def search_bundle(self, bundle_name, tree): + """Find a bundle.""" + return self._search_xdata("Bundle", bundle_name, tree) + + def search_client(self, client_name, tree): + return self._search_xdata("Client", client_name, tree, alias=True) + + def _add_xdata(self, config, tag, name, attribs=None, alias=False): + node = self._search_xdata(tag, name, config.xdata, alias=alias) + if node != None: + self.logger.error("%s \"%s\" already exists" % (tag, name)) + raise MetadataConsistencyError + element = lxml.etree.SubElement(config.base_xdata.getroot(), + tag, name=name) + if attribs: + for key, val in list(attribs.items()): + element.set(key, val) + config.write() + + def add_group(self, group_name, attribs): + """Add group to groups.xml.""" + return self._add_xdata(self.groups_xml, "Group", group_name, + attribs=attribs) + + def add_bundle(self, bundle_name): + """Add bundle to groups.xml.""" + return self._add_xdata(self.groups_xml, "Bundle", bundle_name) + + def add_client(self, client_name, attribs): + """Add client to clients.xml.""" + return self._add_xdata(self.clients_xml, "Client", client_name, + attribs=attribs, alias=True) + + def _update_xdata(self, config, tag, name, attribs, alias=False): + node = self._search_xdata(tag, name, config.xdata, alias=alias) + if node == None: + self.logger.error("%s \"%s\" does not exist" % (tag, name)) + raise MetadataConsistencyError + xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % + (tag, node.get('name'))) + if not xdict: + self.logger.error("Unexpected error finding %s \"%s\"" % + (tag, name)) + raise MetadataConsistencyError + for key, val in list(attribs.items()): + xdict['xquery'][0].set(key, val) + config.write_xml(xdict['filename'], xdict['xmltree']) + + def update_group(self, group_name, attribs): + """Update a groups attributes.""" + return self._update_xdata(self.groups_xml, "Group", group_name, attribs) + + def update_client(self, client_name, attribs): + """Update a clients attributes.""" + return self._update_xdata(self.clients_xml, "Client", client_name, + attribs, alias=True) + + def _remove_xdata(self, config, tag, name, alias=False): + node = self._search_xdata(tag, name, config.xdata) + if node == None: + self.logger.error("%s \"%s\" does not exist" % (tag, name)) + raise MetadataConsistencyError + xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % + (tag, node.get('name'))) + if not xdict: + self.logger.error("Unexpected error finding %s \"%s\"" % + (tag, name)) + raise MetadataConsistencyError + xdict['xquery'][0].getparent().remove(xdict['xquery'][0]) + self.groups_xml.write_xml(xdict['filename'], xdict['xmltree']) + + def remove_group(self, group_name): + """Remove a group.""" + return self._remove_xdata(self.groups_xml, "Group", group_name) + + def remove_bundle(self, bundle_name): + """Remove a bundle.""" + return self._remove_xdata(self.groups_xml, "Bundle", bundle_name) + + def _handle_clients_xml_event(self, event): + xdata = self.clients_xml.xdata + self.clients = {} + self.aliases = {} + self.raliases = {} + self.bad_clients = {} + self.secure = [] + self.floating = [] + self.addresses = {} + self.raddresses = {} + for client in xdata.findall('.//Client'): + clname = client.get('name').lower() + if 'address' in client.attrib: + caddr = client.get('address') + if caddr in self.addresses: + self.addresses[caddr].append(clname) + else: + self.addresses[caddr] = [clname] + if clname not in self.raddresses: + self.raddresses[clname] = set() + self.raddresses[clname].add(caddr) + if 'auth' in client.attrib: + self.auth[client.get('name')] = client.get('auth', + 'cert+password') + if 'uuid' in client.attrib: + self.uuid[client.get('uuid')] = clname + if client.get('secure', 'false') == 'true': + self.secure.append(clname) + if client.get('location', 'fixed') == 'floating': + self.floating.append(clname) + if 'password' in client.attrib: + self.passwords[clname] = client.get('password') + + self.raliases[clname] = set() + for alias in client.findall('Alias'): + self.aliases.update({alias.get('name'): clname}) + self.raliases[clname].add(alias.get('name')) + if 'address' not in alias.attrib: + continue + if alias.get('address') in self.addresses: + self.addresses[alias.get('address')].append(clname) + else: + self.addresses[alias.get('address')] = [clname] + if clname not in self.raddresses: + self.raddresses[clname] = set() + self.raddresses[clname].add(alias.get('address')) + self.clients.update({clname: client.get('profile')}) + self.states['clients.xml'] = True + + def _handle_groups_xml_event(self, event): + xdata = self.groups_xml.xdata + self.public = [] + self.private = [] + self.profiles = [] + self.groups = {} + grouptmp = {} + self.categories = {} + groupseen = list() + for group in xdata.xpath('//Groups/Group'): + if group.get('name') not in groupseen: + groupseen.append(group.get('name')) + else: + self.logger.error("Metadata: Group %s defined multiply" % + group.get('name')) + grouptmp[group.get('name')] = \ + ([item.get('name') for item in group.findall('./Bundle')], + [item.get('name') for item in group.findall('./Group')]) + grouptmp[group.get('name')][1].append(group.get('name')) + if group.get('default', 'false') == 'true': + self.default = group.get('name') + if group.get('profile', 'false') == 'true': + self.profiles.append(group.get('name')) + if group.get('public', 'false') == 'true': + self.public.append(group.get('name')) + elif group.get('public', 'true') == 'false': + self.private.append(group.get('name')) + if 'category' in group.attrib: + self.categories[group.get('name')] = group.get('category') + + for group in grouptmp: + # self.groups[group] => (bundles, groups, categories) + self.groups[group] = (set(), set(), {}) + tocheck = [group] + group_cat = self.groups[group][2] + while tocheck: + now = tocheck.pop() + self.groups[group][1].add(now) + if now in grouptmp: + (bundles, groups) = grouptmp[now] + for ggg in groups: + if ggg in self.groups[group][1]: + continue + if (ggg not in self.categories or \ + self.categories[ggg] not in self.groups[group][2]): + self.groups[group][1].add(ggg) + tocheck.append(ggg) + if ggg in self.categories: + group_cat[self.categories[ggg]] = ggg + elif ggg in self.categories: + self.logger.info("Group %s: %s cat-suppressed %s" % \ + (group, + group_cat[self.categories[ggg]], + ggg)) + [self.groups[group][0].add(bund) for bund in bundles] + self.states['groups.xml'] = True + + def HandleEvent(self, event): + """Handle update events for data files.""" + if self.clients_xml.HandleEvent(event): + self._handle_clients_xml_event(event) + elif self.groups_xml.HandleEvent(event): + self._handle_groups_xml_event(event) + + if False not in list(self.states.values()): + # check that all client groups are real and complete + real = list(self.groups.keys()) + for client in list(self.clients.keys()): + if self.clients[client] not in self.profiles: + self.logger.error("Client %s set as nonexistent or " + "incomplete group %s" % + (client, self.clients[client])) + self.logger.error("Removing client mapping for %s" % client) + self.bad_clients[client] = self.clients[client] + del self.clients[client] + for bclient in list(self.bad_clients.keys()): + if self.bad_clients[bclient] in self.profiles: + self.logger.info("Restored profile mapping for client %s" % + bclient) + self.clients[bclient] = self.bad_clients[bclient] + del self.bad_clients[bclient] + + def set_profile(self, client, profile, addresspair): + """Set group parameter for provided client.""" + self.logger.info("Asserting client %s profile to %s" % (client, profile)) + if False in list(self.states.values()): + raise MetadataRuntimeError + if profile not in self.public: + self.logger.error("Failed to set client %s to private group %s" % + (client, profile)) + raise MetadataConsistencyError + if client in self.clients: + self.logger.info("Changing %s group from %s to %s" % + (client, self.clients[client], profile)) + self.update_client(client, dict(profile=profile)) + else: + self.logger.info("Creating new client: %s, profile %s" % + (client, profile)) + if addresspair in self.session_cache: + # we are working with a uuid'd client + self.add_client(self.session_cache[addresspair][1], + dict(uuid=client, profile=profile, + address=addresspair[0])) + else: + self.add_client(client, dict(profile=profile)) + self.clients[client] = profile + self.clients_xml.write() + + def resolve_client(self, addresspair, cleanup_cache=False): + """Lookup address locally or in DNS to get a hostname.""" + if addresspair in self.session_cache: + # client _was_ cached, so there can be some expired + # entries. we need to clean them up to avoid potentially + # infinite memory swell + cache_ttl = 90 + if cleanup_cache: + # remove entries for this client's IP address with + # _any_ port numbers - perhaps a priority queue could + # be faster? + curtime = time.time() + for addrpair in self.session_cache.keys(): + if addresspair[0] == addrpair[0]: + (stamp, _) = self.session_cache[addrpair] + if curtime - stamp > cache_ttl: + del self.session_cache[addrpair] + # return the cached data + try: + (stamp, uuid) = self.session_cache[addresspair] + if time.time() - stamp < cache_ttl: + return self.session_cache[addresspair][1] + except KeyError: + # we cleaned all cached data for this client in cleanup_cache + pass + address = addresspair[0] + if address in self.addresses: + if len(self.addresses[address]) != 1: + self.logger.error("Address %s has multiple reverse assignments; " + "a uuid must be used" % (address)) + raise MetadataConsistencyError + return self.addresses[address][0] + try: + cname = socket.gethostbyaddr(address)[0].lower() + if cname in self.aliases: + return self.aliases[cname] + return cname + except socket.herror: + warning = "address resolution error for %s" % (address) + self.logger.warning(warning) + raise MetadataConsistencyError + + def get_initial_metadata(self, client): + """Return the metadata for a given client.""" + if False in list(self.states.values()): + raise MetadataRuntimeError + client = client.lower() + if client in self.aliases: + client = self.aliases[client] + if client in self.clients: + profile = self.clients[client] + (bundles, groups, categories) = self.groups[profile] + else: + if self.default == None: + self.logger.error("Cannot set group for client %s; " + "no default group set" % client) + raise MetadataConsistencyError + self.set_profile(client, self.default, (None, None)) + profile = self.default + [bundles, groups, categories] = self.groups[self.default] + aliases = self.raliases.get(client, set()) + addresses = self.raddresses.get(client, set()) + newgroups = set(groups) + newbundles = set(bundles) + newcategories = {} + newcategories.update(categories) + if client in self.passwords: + password = self.passwords[client] + else: + password = None + uuids = [item for item, value in list(self.uuid.items()) + if value == client] + if uuids: + uuid = uuids[0] + else: + uuid = None + for group in self.cgroups.get(client, []): + if group in self.groups: + nbundles, ngroups, ncategories = self.groups[group] + else: + nbundles, ngroups, ncategories = ([], [group], {}) + [newbundles.add(b) for b in nbundles if b not in newbundles] + [newgroups.add(g) for g in ngroups if g not in newgroups] + newcategories.update(ncategories) + return ClientMetadata(client, profile, newgroups, newbundles, aliases, + addresses, newcategories, uuid, password, + self.query) + + def get_all_group_names(self): + all_groups = set() + [all_groups.update(g[1]) for g in list(self.groups.values())] + return all_groups + + def get_all_groups_in_category(self, category): + all_groups = set() + [all_groups.add(g) for g in self.categories \ + if self.categories[g] == category] + return all_groups + + def get_client_names_by_profiles(self, profiles): + return [client for client, profile in list(self.clients.items()) \ + if profile in profiles] + + def get_client_names_by_groups(self, groups): + mdata = [self.core.build_metadata(client) + for client in list(self.clients.keys())] + return [md.hostname for md in mdata if md.groups.issuperset(groups)] + + def merge_additional_groups(self, imd, groups): + for group in groups: + if (group in self.categories and + self.categories[group] in imd.categories): + continue + newbundles, newgroups, _ = self.groups.get(group, + (list(), + [group], + dict())) + for newbundle in newbundles: + if newbundle not in imd.bundles: + imd.bundles.add(newbundle) + for newgroup in newgroups: + if newgroup not in imd.groups: + if (newgroup in self.categories and + self.categories[newgroup] in imd.categories): + continue + if newgroup in self.private: + self.logger.error("Refusing to add dynamic membership " + "in private group %s for client %s" % + (newgroup, imd.hostname)) + continue + imd.groups.add(newgroup) + + def merge_additional_data(self, imd, source, data): + if not hasattr(imd, source): + setattr(imd, source, data) + imd.connectors.append(source) + + def validate_client_address(self, client, addresspair): + """Check address against client.""" + address = addresspair[0] + if client in self.floating: + self.debug_log("Client %s is floating" % client) + return True + if address in self.addresses: + if client in self.addresses[address]: + self.debug_log("Client %s matches address %s" % + (client, address)) + return True + else: + self.logger.error("Got request for non-float client %s from %s" % + (client, address)) + return False + resolved = self.resolve_client(addresspair) + if resolved.lower() == client.lower(): + return True + else: + self.logger.error("Got request for %s from incorrect address %s" % + (client, address)) + self.logger.error("Resolved to %s" % resolved) + return False + + def AuthenticateConnection(self, cert, user, password, address): + """This function checks auth creds.""" + if cert: + id_method = 'cert' + certinfo = dict([x[0] for x in cert['subject']]) + # look at cert.cN + client = certinfo['commonName'] + self.debug_log("Got cN %s; using as client name" % client) + auth_type = self.auth.get(client, 'cert+password') + elif user == 'root': + id_method = 'address' + try: + client = self.resolve_client(address) + except MetadataConsistencyError: + self.logger.error("Client %s failed to resolve; metadata problem" + % address[0]) + return False + else: + id_method = 'uuid' + # user maps to client + if user not in self.uuid: + client = user + self.uuid[user] = user + else: + client = self.uuid[user] + + # we have the client name + self.debug_log("Authenticating client %s" % client) + + # next we validate the address + if id_method == 'uuid': + addr_is_valid = True + else: + addr_is_valid = self.validate_client_address(client, address) + + if not addr_is_valid: + return False + + if id_method == 'cert' and auth_type != 'cert+password': + # remember the cert-derived client name for this connection + if client in self.floating: + self.session_cache[address] = (time.time(), client) + # we are done if cert+password not required + return True + + if client not in self.passwords: + if client in self.secure: + self.logger.error("Client %s in secure mode but has no password" + % address[0]) + return False + if password != self.password: + self.logger.error("Client %s used incorrect global password" % + address[0]) + return False + if client not in self.secure: + if client in self.passwords: + plist = [self.password, self.passwords[client]] + else: + plist = [self.password] + if password not in plist: + self.logger.error("Client %s failed to use either allowed " + "password" % address[0]) + return False + else: + # client in secure mode and has a client password + if password != self.passwords[client]: + self.logger.error("Client %s failed to use client password in " + "secure mode" % address[0]) + return False + # populate the session cache + if user.decode('utf-8') != 'root': + self.session_cache[address] = (time.time(), client) + return True + + def process_statistics(self, meta, _): + """Hook into statistics interface to toggle clients in bootstrap mode.""" + client = meta.hostname + if client in self.auth and self.auth[client] == 'bootstrap': + self.update_client(client, dict(auth='cert')) + + def viz(self, hosts, bundles, key, only_client, colors): + """Admin mode viz support.""" + if only_client: + clientmeta = self.core.build_metadata(only_client) + + def include_client(client): + return not only_client or client != only_client + + def include_bundle(bundle): + return not only_client or bundle in clientmeta.bundles + + def include_group(group): + return not only_client or group in clientmeta.groups + + groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml")) + try: + groups_tree.xinclude() + except lxml.etree.XIncludeError: + self.logger.error("Failed to process XInclude for file %s: %s" % + (dest, sys.exc_info()[1])) + groups = groups_tree.getroot() + categories = {'default': 'grey83'} + viz_str = [] + egroups = groups.findall("Group") + groups.findall('.//Groups/Group') + for group in egroups: + if not group.get('category') in categories: + categories[group.get('category')] = colors.pop() + group.set('color', categories[group.get('category')]) + if None in categories: + del categories[None] + if hosts: + instances = {} + clients = self.clients + for client, profile in list(clients.items()): + if include_client(client): + continue + if profile in instances: + instances[profile].append(client) + else: + instances[profile] = [client] + for profile, clist in list(instances.items()): + clist.sort() + viz_str.append('"%s-instances" [ label="%s", shape="record" ];' % + (profile, '|'.join(clist))) + viz_str.append('"%s-instances" -> "group-%s";' % + (profile, profile)) + if bundles: + bundles = [] + [bundles.append(bund.get('name')) \ + for bund in groups.findall('.//Bundle') \ + if bund.get('name') not in bundles \ + and include_bundle(bund.get('name'))] + bundles.sort() + for bundle in bundles: + viz_str.append('"bundle-%s" [ label="%s", shape="septagon"];' % + (bundle, bundle)) + gseen = [] + for group in egroups: + if group.get('profile', 'false') == 'true': + style = "filled, bold" + else: + style = "filled" + gseen.append(group.get('name')) + if include_group(group.get('name')): + viz_str.append('"group-%s" [label="%s", style="%s", fillcolor=%s];' % + (group.get('name'), group.get('name'), style, + group.get('color'))) + if bundles: + for bundle in group.findall('Bundle'): + viz_str.append('"group-%s" -> "bundle-%s";' % + (group.get('name'), bundle.get('name'))) + gfmt = '"group-%s" [label="%s", style="filled", fillcolor="grey83"];' + for group in egroups: + for parent in group.findall('Group'): + if parent.get('name') not in gseen and include_group(parent.get('name')): + viz_str.append(gfmt % (parent.get('name'), + parent.get('name'))) + gseen.append(parent.get("name")) + if include_group(group.get('name')): + viz_str.append('"group-%s" -> "group-%s";' % + (group.get('name'), parent.get('name'))) + if key: + for category in categories: + viz_str.append('"%s" [label="%s", shape="record", style="filled", fillcolor="%s"];' % + (category, category, categories[category])) + return "\n".join("\t" + s for s in viz_str) diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py new file mode 100644 index 000000000..4dbd57d16 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py @@ -0,0 +1,151 @@ +'''This module implements a Nagios configuration generator''' + +import os +import re +import sys +import glob +import socket +import logging +import lxml.etree + +import Bcfg2.Server.Plugin + +LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen') + +line_fmt = '\t%-32s %s' + +class NagiosGenConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked, + Bcfg2.Server.Plugin.StructFile): + def __init__(self, filename, fam): + Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam) + Bcfg2.Server.Plugin.StructFile.__init__(self, filename) + + +class NagiosGen(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Generator): + """NagiosGen is a Bcfg2 plugin that dynamically generates + Nagios configuration file based on Bcfg2 data. + """ + name = 'NagiosGen' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Generator.__init__(self) + self.config = NagiosGenConfig(os.path.join(self.data, 'config.xml'), + core.fam) + self.Entries = {'Path': + {'/etc/nagiosgen.status': self.createhostconfig, + '/etc/nagios/nagiosgen.cfg': self.createserverconfig}} + + self.client_attrib = {'encoding': 'ascii', + 'owner': 'root', + 'group': 'root', + 'type': 'file', + 'perms': '0400'} + self.server_attrib = {'encoding': 'ascii', + 'owner': 'nagios', + 'group': 'nagios', + 'type': 'file', + 'perms': '0440'} + + def createhostconfig(self, entry, metadata): + """Build host specific configuration file.""" + host_address = socket.gethostbyname(metadata.hostname) + host_groups = [grp for grp in metadata.groups + if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))] + host_config = ['define host {', + line_fmt % ('host_name', metadata.hostname), + line_fmt % ('alias', metadata.hostname), + line_fmt % ('address', host_address)] + + if host_groups: + host_config.append(line_fmt % ("hostgroups", + ",".join(host_groups))) + + # read the old-style Properties config, but emit a warning. + xtra = dict() + props = None + if (hasattr(metadata, 'Properties') and + 'NagiosGen.xml' in metadata.Properties): + props = metadata.Properties['NagiosGen.xml'].data + if props is not None: + LOGGER.warn("Parsing deprecated Properties/NagiosGen.xml. " + "Update to the new-style config with " + "nagiosgen-convert.py.") + xtra = dict((el.tag, el.text) + for el in props.find(metadata.hostname)) + # hold off on parsing the defaults until we've checked for + # a new-style config + + # read the old-style parents.xml, but emit a warning + pfile = os.path.join(self.data, "parents.xml") + if os.path.exists(pfile): + LOGGER.warn("Parsing deprecated NagiosGen/parents.xml. " + "Update to the new-style config with " + "nagiosgen-convert.py.") + parents = lxml.etree.parse(pfile) + for el in parents.xpath("//Depend[@name='%s']" % metadata.hostname): + if 'parent' in xtra: + xtra['parent'] += "," + el.get("on") + else: + xtra['parent'] = el.get("on") + + # read the new-style config and overwrite the old-style config + for el in self.config.Match(metadata): + if el.tag == 'Option': + xtra[el.get("name")] = el.text + + # if we haven't found anything in the new- or old-style + # configs, finally read defaults from old-style config + if not xtra and props is not None: + xtra = dict((el.tag, el.text) for el in props.find('default')) + + if xtra: + host_config.extend([line_fmt % (opt, val) + for opt, val in list(xtra.items())]) + else: + host_config.append(line_fmt % ('use', 'default')) + + host_config.append('}') + entry.text = "%s\n" % "\n".join(host_config) + [entry.attrib.__setitem__(key, value) + for (key, value) in list(self.client_attrib.items())] + try: + fileh = open("%s/%s-host.cfg" % + (self.data, metadata.hostname), 'w') + fileh.write(entry.text) + fileh.close() + except OSError: + ioerr = sys.exc_info()[1] + LOGGER.error("Failed to write %s/%s-host.cfg" % + (self.data, metadata.hostname)) + LOGGER.error(ioerr) + + def createserverconfig(self, entry, _): + """Build monolithic server configuration file.""" + host_configs = glob.glob('%s/*-host.cfg' % self.data) + group_configs = glob.glob('%s/*-group.cfg' % self.data) + host_data = [] + group_data = [] + for host in host_configs: + host_data.append(open(host, 'r').read()) + + for group in group_configs: + group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group) + if "\n".join(host_data).find(group_name) != -1: + groupfile = open(group, 'r') + group_data.append(groupfile.read()) + groupfile.close() + + entry.text = "%s\n\n%s" % ("\n".join(group_data), "\n".join(host_data)) + [entry.attrib.__setitem__(key, value) + for (key, value) in list(self.server_attrib.items())] + try: + fileh = open("%s/nagiosgen.cfg" % self.data, 'w') + fileh.write(entry.text) + fileh.close() + except OSError: + ioerr = sys.exc_info()[1] + LOGGER.error("Failed to write %s/nagiosgen.cfg" % self.data) + LOGGER.error(ioerr) diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py new file mode 100644 index 000000000..5fff20d98 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py @@ -0,0 +1,92 @@ +import lxml.etree +import os + +import logging +logger = logging.getLogger('Bcfg2.Plugins.Ohai') + +import Bcfg2.Server.Plugin + +try: + import json +except: + # FIXME: can be removed when server prereq is >= python 2.6 + # necessary for clients without the in-tree json module + try: + import simplejson as json + except: + logger.error("Unable to load any json modules. Make sure " + "python-simplejson is installed.") + raise ImportError + + +probecode = """#!/bin/sh + +export PATH=$PATH:/sbin:/usr/sbin + +if type ohai >& /dev/null; then + ohai +else + # an empty dict, so "'foo' in metadata.Ohai" tests succeed + echo '{}' +fi +""" + +class OhaiCache(object): + def __init__(self, dirname): + self.dirname = dirname + self.cache = dict() + + def __setitem__(self, item, value): + if value == None: + # simply return if the client returned nothing + return + self.cache[item] = json.loads(value) + file("%s/%s.json" % (self.dirname, item), 'w').write(value) + + def __getitem__(self, item): + if item not in self.cache: + try: + data = open("%s/%s.json" % (self.dirname, item)).read() + except: + raise KeyError(item) + self.cache[item] = json.loads(data) + return self.cache[item] + + def __iter__(self): + data = list(self.cache.keys()) + data.extend([x[:-5] for x in os.listdir(self.dirname)]) + return data.__iter__() + + +class Ohai(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Probing, + Bcfg2.Server.Plugin.Connector): + """The Ohai plugin is used to detect information + about the client operating system. + """ + name = 'Ohai' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Probing.__init__(self) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai', + interpreter='/bin/sh') + self.probe.text = probecode + try: + os.stat(self.data) + except: + os.makedirs(self.data) + self.cache = OhaiCache(self.data) + + def GetProbes(self, meta, force=False): + return [self.probe] + + def ReceiveData(self, meta, datalist): + self.cache[meta.hostname] = datalist[0].text + + def get_additional_data(self, meta): + if meta.hostname in self.cache: + return self.cache[meta.hostname] + return dict() diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py new file mode 100644 index 000000000..49e9d417b --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py @@ -0,0 +1,140 @@ +import re +import gzip +from Bcfg2.Server.Plugins.Packages.Collection import Collection +from Bcfg2.Server.Plugins.Packages.Source import Source +from Bcfg2.Bcfg2Py3k import cPickle, file + +class AptCollection(Collection): + def get_group(self, group): + self.logger.warning("Packages: Package groups are not supported by APT") + return [] + +class AptSource(Source): + basegroups = ['apt', 'debian', 'ubuntu', 'nexenta'] + ptype = 'deb' + + def __init__(self, basepath, xsource, config): + Source.__init__(self, basepath, xsource, config) + self.pkgnames = set() + + self.url_map = [{'rawurl': self.rawurl, 'url': self.url, + 'version': self.version, + 'components': self.components, 'arches': self.arches}] + + def save_state(self): + cache = file(self.cachefile, 'wb') + cPickle.dump((self.pkgnames, self.deps, self.provides), + cache, 2) + cache.close() + + def load_state(self): + data = file(self.cachefile) + self.pkgnames, self.deps, self.provides = cPickle.load(data) + + def filter_unknown(self, unknown): + filtered = set([u for u in unknown if u.startswith('choice')]) + unknown.difference_update(filtered) + + def get_urls(self): + if not self.rawurl: + rv = [] + for part in self.components: + for arch in self.arches: + rv.append("%sdists/%s/%s/binary-%s/Packages.gz" % + (self.url, self.version, part, arch)) + return rv + else: + return ["%sPackages.gz" % self.rawurl] + urls = property(get_urls) + + def read_files(self): + bdeps = dict() + bprov = dict() + depfnames = ['Depends', 'Pre-Depends'] + if self.recommended: + depfnames.append('Recommends') + for fname in self.files: + if not self.rawurl: + barch = [x + for x in fname.split('@') + if x.startswith('binary-')][0][7:] + else: + # RawURL entries assume that they only have one <Arch></Arch> + # element and that it is the architecture of the source. + barch = self.arches[0] + if barch not in bdeps: + bdeps[barch] = dict() + bprov[barch] = dict() + try: + reader = gzip.GzipFile(fname) + except: + self.logger.error("Packages: Failed to read file %s" % fname) + raise + for line in reader.readlines(): + words = str(line.strip()).split(':', 1) + if words[0] == 'Package': + pkgname = words[1].strip().rstrip() + self.pkgnames.add(pkgname) + bdeps[barch][pkgname] = [] + elif words[0] == 'Essential' and self.essential: + self.essentialpkgs.add(pkgname) + elif words[0] in depfnames: + vindex = 0 + for dep in words[1].split(','): + if '|' in dep: + cdeps = [re.sub('\s+', '', + re.sub('\(.*\)', '', cdep)) + for cdep in dep.split('|')] + dyn_dname = "choice-%s-%s-%s" % (pkgname, + barch, + vindex) + vindex += 1 + bdeps[barch][pkgname].append(dyn_dname) + bprov[barch][dyn_dname] = set(cdeps) + else: + raw_dep = re.sub('\(.*\)', '', dep) + raw_dep = raw_dep.rstrip().strip() + bdeps[barch][pkgname].append(raw_dep) + elif words[0] == 'Provides': + for pkg in words[1].split(','): + dname = pkg.rstrip().strip() + if dname not in bprov[barch]: + bprov[barch][dname] = set() + bprov[barch][dname].add(pkgname) + + self.deps['global'] = dict() + self.provides['global'] = dict() + for barch in bdeps: + self.deps[barch] = dict() + self.provides[barch] = dict() + for pkgname in self.pkgnames: + pset = set() + for barch in bdeps: + if pkgname not in bdeps[barch]: + bdeps[barch][pkgname] = [] + pset.add(tuple(bdeps[barch][pkgname])) + if len(pset) == 1: + self.deps['global'][pkgname] = pset.pop() + else: + for barch in bdeps: + self.deps[barch][pkgname] = bdeps[barch][pkgname] + provided = set() + for bprovided in list(bprov.values()): + provided.update(set(bprovided)) + for prov in provided: + prset = set() + for barch in bprov: + if prov not in bprov[barch]: + continue + prset.add(tuple(bprov[barch].get(prov, ()))) + if len(prset) == 1: + self.provides['global'][prov] = prset.pop() + else: + for barch in bprov: + self.provides[barch][prov] = bprov[barch].get(prov, ()) + self.save_state() + + def is_package(self, _, pkg): + return (pkg in self.pkgnames and + pkg not in self.blacklist and + (len(self.whitelist) == 0 or pkg in self.whitelist)) diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py new file mode 100644 index 000000000..959dac03b --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py @@ -0,0 +1,349 @@ +import copy +import logging +import Bcfg2.Server.Plugin + +logger = logging.getLogger(__name__) + +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + +# we have to cache Collection objects so that calling Packages.Refresh +# or .Reload can tell the collection objects to clean up their cache, +# but we don't actually use the cache to return a Collection object +# when one is requested, because that prevents new machines from +# working, since a Collection object gets created by +# get_additional_data(), which is called for all clients at server +# startup. (It would also prevent machines that change groups from +# working properly; e.g., if you reinstall a machine with a new OS, +# then returning a cached Collection object would give the wrong +# sources to that client.) +collections = dict() + +class Collection(Bcfg2.Server.Plugin.Debuggable): + def __init__(self, metadata, sources, basepath, debug=False): + """ don't call this directly; use the factory function """ + Bcfg2.Server.Plugin.Debuggable.__init__(self) + self.debug_flag = debug + self.metadata = metadata + self.sources = sources + self.basepath = basepath + self.virt_pkgs = dict() + + try: + self.config = sources[0].config + self.cachepath = sources[0].basepath + self.ptype = sources[0].ptype + except IndexError: + self.config = None + self.cachepath = None + self.ptype = "unknown" + + self.cachefile = None + + @property + def cachekey(self): + return md5(self.get_config()).hexdigest() + + def get_config(self): + self.logger.error("Packages: Cannot generate config for host with " + "multiple source types (%s)" % self.metadata.hostname) + return "" + + def get_relevant_groups(self): + groups = [] + for source in self.sources: + groups.extend(source.get_relevant_groups(self.metadata)) + return sorted(list(set(groups))) + + @property + def basegroups(self): + groups = set() + for source in self.sources: + groups.update(source.basegroups) + return list(groups) + + @property + def cachefiles(self): + cachefiles = set([self.cachefile]) + for source in self.sources: + cachefiles.add(source.cachefile) + return list(cachefiles) + + def get_group(self, group, ptype=None): + for source in self.sources: + pkgs = source.get_group(self.metadata, group, ptype=ptype) + if pkgs: + return pkgs + self.logger.warning("Packages: '%s' is not a valid group" % group) + return [] + + def is_package(self, package): + for source in self.sources: + if source.is_package(self.metadata, package): + return True + return False + + def is_virtual_package(self, package): + for source in self.sources: + if source.is_virtual_package(self.metadata, package): + return True + return False + + def get_deps(self, package): + for source in self.sources: + if source.is_package(self.metadata, package): + return source.get_deps(self.metadata, package) + return [] + + def get_essential(self): + essential = set() + for source in self.sources: + essential |= source.essentialpkgs + return essential + + def get_provides(self, package): + for source in self.sources: + providers = source.get_provides(self.metadata, package) + if providers: + return providers + return [] + + def get_vpkgs(self): + """ get virtual packages """ + vpkgs = dict() + for source in self.sources: + s_vpkgs = source.get_vpkgs(self.metadata) + for name, prov_set in list(s_vpkgs.items()): + if name not in vpkgs: + vpkgs[name] = set(prov_set) + else: + vpkgs[name].update(prov_set) + return vpkgs + + def filter_unknown(self, unknown): + for source in self.sources: + source.filter_unknown(unknown) + + def magic_groups_match(self): + for source in self.sources: + if source.magic_groups_match(self.metadata): + return True + + def build_extra_structures(self, independent): + pass + + def get_additional_data(self): + sdata = [] + for source in self.sources: + sdata.extend(copy.deepcopy(source.url_map)) + return sdata + + def setup_data(self, force_update=False): + """ do any collection-level data setup tasks """ + pass + + def complete(self, packagelist): + '''Build the transitive closure of all package dependencies + + Arguments: + packageslist - set of package names + returns => (set(packages), set(unsatisfied requirements)) + ''' + + # setup vpkg cache + pgrps = tuple(self.get_relevant_groups()) + if pgrps not in self.virt_pkgs: + self.virt_pkgs[pgrps] = self.get_vpkgs() + vpkg_cache = self.virt_pkgs[pgrps] + + # unclassified is set of unsatisfied requirements (may be pkg + # for vpkg) + unclassified = set(packagelist) + vpkgs = set() + both = set() + pkgs = set(packagelist) + + packages = set() + examined = set() + unknown = set() + + final_pass = False + really_done = False + # do while unclassified or vpkgs or both or pkgs + while unclassified or pkgs or both or final_pass: + if really_done: + break + if len(unclassified) + len(pkgs) + len(both) == 0: + # one more pass then exit + really_done = True + + while unclassified: + current = unclassified.pop() + examined.add(current) + is_pkg = False + if self.is_package(current): + is_pkg = True + + is_vpkg = current in vpkg_cache + + if is_pkg and is_vpkg: + both.add(current) + elif is_pkg and not is_vpkg: + pkgs.add(current) + elif is_vpkg and not is_pkg: + vpkgs.add(current) + elif not is_vpkg and not is_pkg: + unknown.add(current) + + while pkgs: + # direct packages; current can be added, and all deps + # should be resolved + current = pkgs.pop() + self.debug_log("Packages: handling package requirement %s" % + current) + packages.add(current) + deps = self.get_deps(current) + newdeps = set(deps).difference(examined) + if newdeps: + self.debug_log("Packages: Package %s added requirements %s" + % (current, newdeps)) + unclassified.update(newdeps) + + satisfied_vpkgs = set() + for current in vpkgs: + # virtual dependencies, satisfied if one of N in the + # config, or can be forced if only one provider + if len(vpkg_cache[current]) == 1: + self.debug_log("Packages: requirement %s satisfied by %s" % + (current, vpkg_cache[current])) + unclassified.update(vpkg_cache[current].difference(examined)) + satisfied_vpkgs.add(current) + else: + satisfiers = [item for item in vpkg_cache[current] + if item in packages] + self.debug_log("Packages: requirement %s satisfied by %s" % + (current, satisfiers)) + satisfied_vpkgs.add(current) + vpkgs.difference_update(satisfied_vpkgs) + + satisfied_both = set() + for current in both: + # packages that are both have virtual providers as + # well as a package with that name. allow use of virt + # through explicit specification, then fall back to + # forcing current on last pass + satisfiers = [item for item in vpkg_cache[current] + if item in packages] + if satisfiers: + self.debug_log("Packages: requirement %s satisfied by %s" % + (current, satisfiers)) + satisfied_both.add(current) + elif current in packagelist or final_pass: + pkgs.add(current) + satisfied_both.add(current) + both.difference_update(satisfied_both) + + if len(unclassified) + len(pkgs) == 0: + final_pass = True + else: + final_pass = False + + self.filter_unknown(unknown) + + return packages, unknown + + def __len__(self): + return len(self.sources) + + def __getitem__(self, item): + return self.sources[item] + + def __setitem__(self, item, value): + self.sources[item] = value + + def __delitem__(self, item): + del self.sources[item] + + def append(self, item): + self.sources.append(item) + + def count(self): + return self.sources.count() + + def index(self, item): + return self.sources.index(item) + + def extend(self, items): + self.sources.extend(items) + + def insert(self, index, item): + self.sources.insert(index, item) + + def pop(self, index=None): + self.sources.pop(index) + + def remove(self, item): + self.sources.remove(item) + + def sort(self, cmp=None, key=None, reverse=False): + self.sources.sort(cmp, key, reverse) + +def clear_cache(): + global collections + collections = dict() + +def factory(metadata, sources, basepath, debug=False): + global collections + + if not sources.loaded: + # if sources.xml has not received a FAM event yet, defer; + # instantiate a dummy Collection object + return Collection(metadata, [], basepath) + + sclasses = set() + relevant = list() + + for source in sources: + if source.applies(metadata): + relevant.append(source) + sclasses.update([source.__class__]) + + if len(sclasses) > 1: + logger.warning("Packages: Multiple source types found for %s: %s" % + ",".join([s.__name__ for s in sclasses])) + cclass = Collection + elif len(sclasses) == 0: + # you'd think this should be a warning, but it happens all the + # freaking time if you have a) machines in your clients.xml + # that do not have the proper groups set up yet (e.g., if you + # have multiple Bcfg2 servers and Packages-relevant groups set + # by probes); and b) templates that query all or multiple + # machines (e.g., with metadata.query.all_clients()) + if debug: + logger.error("Packages: No sources found for %s" % + metadata.hostname) + cclass = Collection + else: + stype = sclasses.pop().__name__.replace("Source", "") + try: + module = \ + getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" % + stype.title()).Server.Plugins.Packages, + stype.title()) + cclass = getattr(module, "%sCollection" % stype.title()) + except ImportError: + logger.error("Packages: Unknown source type %s" % stype) + except AttributeError: + logger.warning("Packages: No collection class found for %s sources" + % stype) + + if debug: + logger.error("Packages: Using %s for Collection of sources for %s" % + (cclass.__name__, metadata.hostname)) + + collection = cclass(metadata, relevant, basepath, debug=debug) + collections[metadata.hostname] = collection + return collection + diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py new file mode 100644 index 000000000..99a090739 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py @@ -0,0 +1,119 @@ +import gzip +import tarfile +from Bcfg2.Bcfg2Py3k import cPickle, file +from Bcfg2.Server.Plugins.Packages.Collection import Collection +from Bcfg2.Server.Plugins.Packages.Source import Source + +class PacCollection(Collection): + def get_group(self, group): + self.logger.warning("Packages: Package groups are not supported by Pacman") + return [] + +class PacSource(Source): + basegroups = ['arch', 'parabola'] + ptype = 'pacman' + + def __init__(self, basepath, xsource, config): + Source.__init__(self, basepath, xsource, config) + self.pkgnames = set() + + self.url_map = [{'rawurl': self.rawurl, 'url': self.url, + 'version': self.version, + 'components': self.components, 'arches': self.arches}] + + def save_state(self): + cache = file(self.cachefile, 'wb') + cPickle.dump((self.pkgnames, self.deps, self.provides), + cache, 2) + cache.close() + + def load_state(self): + data = file(self.cachefile) + self.pkgnames, self.deps, self.provides = cPickle.load(data) + + def filter_unknown(self, unknown): + filtered = set([u for u in unknown if u.startswith('choice')]) + unknown.difference_update(filtered) + + def get_urls(self): + if not self.rawurl: + rv = [] + for part in self.components: + for arch in self.arches: + rv.append("%s%s/os/%s/%s.db.tar.gz" % + (self.url, part, arch, part)) + return rv + else: + raise Exception("PacSource : RAWUrl not supported (yet)") + urls = property(get_urls) + + def read_files(self): + bdeps = dict() + bprov = dict() + + depfnames = ['Depends', 'Pre-Depends'] + if self.recommended: + depfnames.append('Recommends') + + for fname in self.files: + if not self.rawurl: + barch = [x for x in fname.split('@') if x in self.arches][0] + else: + # RawURL entries assume that they only have one <Arch></Arch> + # element and that it is the architecture of the source. + barch = self.arches[0] + + if barch not in bdeps: + bdeps[barch] = dict() + bprov[barch] = dict() + try: + self.debug_log("Packages: try to read %s" % fname) + tar = tarfile.open(fname, "r") + reader = gzip.GzipFile(fname) + except: + self.logger.error("Packages: Failed to read file %s" % fname) + raise + + for tarinfo in tar: + if tarinfo.isdir(): + self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0]) + self.debug_log("Packages: added %s" % + tarinfo.name.rsplit("-", 2)[0]) + tar.close() + + self.deps['global'] = dict() + self.provides['global'] = dict() + for barch in bdeps: + self.deps[barch] = dict() + self.provides[barch] = dict() + for pkgname in self.pkgnames: + pset = set() + for barch in bdeps: + if pkgname not in bdeps[barch]: + bdeps[barch][pkgname] = [] + pset.add(tuple(bdeps[barch][pkgname])) + if len(pset) == 1: + self.deps['global'][pkgname] = pset.pop() + else: + for barch in bdeps: + self.deps[barch][pkgname] = bdeps[barch][pkgname] + provided = set() + for bprovided in list(bprov.values()): + provided.update(set(bprovided)) + for prov in provided: + prset = set() + for barch in bprov: + if prov not in bprov[barch]: + continue + prset.add(tuple(bprov[barch].get(prov, ()))) + if len(prset) == 1: + self.provides['global'][prov] = prset.pop() + else: + for barch in bprov: + self.provides[barch][prov] = bprov[barch].get(prov, ()) + self.save_state() + + def is_package(self, _, pkg): + return (pkg in self.pkgnames and + pkg not in self.blacklist and + (len(self.whitelist) == 0 or pkg in self.whitelist)) diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesConfig.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesConfig.py new file mode 100644 index 000000000..3846c06ce --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesConfig.py @@ -0,0 +1,15 @@ +import Bcfg2.Server.Plugin + +class PackagesConfig(Bcfg2.Server.Plugin.SimpleConfig): + _required = False + + def Index(self): + """ Build local data structures """ + Bcfg2.Server.Plugin.SimpleConfig.Index(self) + + if hasattr(self.plugin, "sources") and self.plugin.sources.loaded: + # only reload Packages plugin if sources have been loaded. + # otherwise, this is getting called on server startup, and + # we have to wait until all sources have been indexed + # before we can call Packages.Reload() + self.plugin.Reload() diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py new file mode 100644 index 000000000..a966268c0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py @@ -0,0 +1,109 @@ +import os +import sys +import lxml.etree +import Bcfg2.Server.Plugin +from Bcfg2.Server.Plugins.Packages.Source import SourceInitError + +class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked, + Bcfg2.Server.Plugin.StructFile, + Bcfg2.Server.Plugin.Debuggable): + __identifier__ = None + + def __init__(self, filename, cachepath, fam, packages, config): + Bcfg2.Server.Plugin.Debuggable.__init__(self) + try: + Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, + filename, + fam) + except OSError: + err = sys.exc_info()[1] + msg = "Packages: Failed to read configuration file: %s" % err + if not os.path.exists(self.name): + msg += " Have you created it?" + self.logger.error(msg) + raise Bcfg2.Server.Plugin.PluginInitError(msg) + Bcfg2.Server.Plugin.StructFile.__init__(self, filename) + self.cachepath = cachepath + self.config = config + if not os.path.exists(self.cachepath): + # create cache directory if needed + try: + os.makedirs(self.cachepath) + except OSError: + err = sys.exc_info()[1] + self.logger.error("Could not create Packages cache at %s: %s" % + (self.cachepath, err)) + self.pkg_obj = packages + self.parsed = set() + + def toggle_debug(self): + Bcfg2.Server.Plugin.Debuggable.toggle_debug(self) + for source in self.entries: + source.toggle_debug() + + def HandleEvent(self, event=None): + Bcfg2.Server.Plugin.SingleXMLFileBacked.HandleEvent(self, event=event) + if event and event.filename != self.name: + for fname in self.extras: + fpath = None + if fname.startswith("/"): + fpath = os.path.abspath(fname) + else: + fpath = \ + os.path.abspath(os.path.join(os.path.dirname(self.name), + fname)) + if fpath == os.path.abspath(event.filename): + self.parsed.add(fname) + break + + if self.config.loaded and self.loaded: + self.logger.info("Reloading Packages plugin") + self.pkg_obj.Reload() + + @property + def loaded(self): + return sorted(list(self.parsed)) == sorted(self.extras) + + def Index(self): + Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self) + self.entries = [] + for xsource in self.xdata.findall('.//Source'): + source = self.source_from_xml(xsource) + if source is not None: + self.entries.append(source) + + def source_from_xml(self, xsource): + """ create a *Source object from its XML representation in + sources.xml """ + stype = xsource.get("type") + if stype is None: + self.logger.error("Packages: No type specified for source, " + "skipping") + return None + + try: + module = getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" % + stype.title()).Server.Plugins.Packages, + stype.title()) + cls = getattr(module, "%sSource" % stype.title()) + except (ImportError, AttributeError): + self.logger.error("Packages: Unknown source type %s" % stype) + return None + + try: + source = cls(self.cachepath, xsource, self.config) + except SourceInitError: + err = sys.exc_info()[1] + self.logger.error("Packages: %s" % err) + source = None + + return source + + def __getitem__(self, key): + return self.entries[key] + + def __repr__(self): + return "PackagesSources: %s" % repr(self.entries) + + def __str__(self): + return "PackagesSources: %s" % str(self.entries) diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py new file mode 100644 index 000000000..ada04c067 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py @@ -0,0 +1,282 @@ +import os +import re +import sys +import base64 +import Bcfg2.Server.Plugin +from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \ + HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \ + urlopen, file, cPickle + +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + +def fetch_url(url): + if '@' in url: + mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url) + if not mobj: + raise ValueError + user = mobj.group(2) + passwd = mobj.group(3) + url = mobj.group(1) + mobj.group(4) + auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm()) + auth.add_password(None, url, user, passwd) + install_opener(build_opener(auth)) + return urlopen(url).read() + + +class SourceInitError(Exception): + pass + + +class Source(Bcfg2.Server.Plugin.Debuggable): + mrepo_re = re.compile(r'/RPMS\.([^/]+)') + pulprepo_re = re.compile(r'pulp/repos/([^/]+)') + genericrepo_re = re.compile('https?://.*?/([^/]+)/?$') + basegroups = [] + + def __init__(self, basepath, xsource, config): + Bcfg2.Server.Plugin.Debuggable.__init__(self) + self.basepath = basepath + self.xsource = xsource + self.config = config + self.essentialpkgs = set() + + try: + self.version = xsource.find('Version').text + except AttributeError: + pass + + for key, tag in [('components', 'Component'), ('arches', 'Arch'), + ('blacklist', 'Blacklist'), + ('whitelist', 'Whitelist')]: + self.__dict__[key] = [item.text for item in xsource.findall(tag)] + + self.gpgkeys = [el.text for el in xsource.findall("GPGKey")] + + self.essential = xsource.get('essential', 'true').lower() == 'true' + self.recommended = xsource.get('recommended', 'false').lower() == 'true' + + self.rawurl = xsource.get('rawurl', '') + if self.rawurl and not self.rawurl.endswith("/"): + self.rawurl += "/" + self.url = xsource.get('url', '') + if self.url and not self.url.endswith("/"): + self.url += "/" + self.version = xsource.get('version', '') + + # build the set of conditions to see if this source applies to + # a given set of metadata + self.conditions = [] + self.groups = [] # provided for some limited backwards compat + for el in xsource.iterancestors(): + if el.tag == "Group": + if el.get("negate", "false").lower() == "true": + self.conditions.append(lambda m, el=el: + el.get("name") not in m.groups) + else: + self.groups.append(el.get("name")) + self.conditions.append(lambda m, el=el: + el.get("name") in m.groups) + elif el.tag == "Client": + if el.get("negate", "false").lower() == "true": + self.conditions.append(lambda m, el=el: + el.get("name") != m.hostname) + else: + self.conditions.append(lambda m, el=el: + el.get("name") == m.hostname) + + self.deps = dict() + self.provides = dict() + + self.cachefile = os.path.join(self.basepath, + "cache-%s" % self.cachekey) + self.url_map = [] + + @property + def cachekey(self): + return md5(cPickle.dumps([self.version, self.components, self.url, + self.rawurl, self.arches])).hexdigest() + + def get_relevant_groups(self, metadata): + return sorted(list(set([g for g in metadata.groups + if (g in self.basegroups or + g in self.groups or + g in self.arches)]))) + + def load_state(self): + pass + + def setup_data(self, force_update=False): + should_read = True + should_download = False + if os.path.exists(self.cachefile): + try: + self.load_state() + should_read = False + except: + self.logger.error("Packages: Cachefile %s load failed; " + "falling back to file read" % self.cachefile) + if should_read: + try: + self.read_files() + except: + self.logger.error("Packages: File read failed; " + "falling back to file download") + should_download = True + + if should_download or force_update: + try: + self.update() + self.read_files() + except: + self.logger.error("Packages: Failed to load data for Source " + "of %s. Some Packages will be missing." % + self.urls) + + def get_repo_name(self, url_map): + # try to find a sensible name for a repo + if 'components' in url_map and url_map['components']: + # use the first component as the name + rname = url_map['components'][0] + else: + name = None + for repo_re in (self.mrepo_re, + self.pulprepo_re, + self.genericrepo_re): + match = repo_re.search(url_map['url']) + if match: + name = match.group(1) + break + if name is None: + # couldn't figure out the name from the URL or URL map + # (which probably means its a screwy URL), so we just + # generate a random one + name = base64.b64encode(os.urandom(16))[:-2] + rname = "%s-%s" % (self.groups[0], name) + # see yum/__init__.py in the yum source, lines 441-449, for + # the source of this regex. yum doesn't like anything but + # string.ascii_letters, string.digits, and [-_.:]. There + # doesn't seem to be a reason for this, because yum. + return re.sub(r'[^A-Za-z0-9-_.:]', '-', rname) + + def __str__(self): + if self.rawurl: + return "%s at %s" % (self.__class__.__name__, self.rawurl) + elif self.url: + return "%s at %s" % (self.__class__.__name__, self.url) + else: + return self.__class__.__name__ + + def get_urls(self): + return [] + urls = property(get_urls) + + def get_files(self): + return [self.escape_url(url) for url in self.urls] + files = property(get_files) + + def get_vpkgs(self, metadata): + agroups = ['global'] + [a for a in self.arches + if a in metadata.groups] + vdict = dict() + for agrp in agroups: + for key, value in list(self.provides[agrp].items()): + if key not in vdict: + vdict[key] = set(value) + else: + vdict[key].update(value) + return vdict + + def is_virtual_package(self, metadata, package): + """ called to determine if a package is a virtual package. + this is only invoked if the package is not listed in the dict + returned by get_vpkgs """ + return False + + def escape_url(self, url): + return os.path.join(self.basepath, url.replace('/', '@')) + + def file_init(self): + pass + + def read_files(self): + pass + + def filter_unknown(self, unknown): + pass + + def update(self): + for url in self.urls: + self.logger.info("Packages: Updating %s" % url) + fname = self.escape_url(url) + try: + data = fetch_url(url) + file(fname, 'w').write(data) + except ValueError: + self.logger.error("Packages: Bad url string %s" % url) + raise + except HTTPError: + err = sys.exc_info()[1] + self.logger.error("Packages: Failed to fetch url %s. HTTP " + "response code=%s" % (url, err.code)) + raise + + def applies(self, metadata): + # check base groups + if not self.magic_groups_match(metadata): + return False + + # check Group/Client tags from sources.xml + for condition in self.conditions: + if not condition(metadata): + return False + + return True + + def get_arches(self, metadata): + return ['global'] + [a for a in self.arches if a in metadata.groups] + + def get_deps(self, metadata, pkgname): + for arch in self.get_arches(metadata): + if pkgname in self.deps[arch]: + return self.deps[arch][pkgname] + return [] + + def get_provides(self, metadata, required): + for arch in self.get_arches(metadata): + if required in self.provides[arch]: + return self.provides[arch][required] + return [] + + def is_package(self, metadata, _): + return False + + def get_package(self, metadata, package): + return package + + def get_group(self, metadata, group, ptype=None): + return [] + + def magic_groups_match(self, metadata): + """ check to see if this source applies to the given host + metadata by checking 'magic' (base) groups only, or if magic + groups are off """ + # we always check that arch matches + found_arch = False + for arch in self.arches: + if arch in metadata.groups: + found_arch = True + break + if not found_arch: + return False + + if self.config.getboolean("global", "magic_groups", + default=True) == False: + return True + else: + for group in self.basegroups: + if group in metadata.groups: + return True + return False diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py new file mode 100644 index 000000000..941203db3 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py @@ -0,0 +1,696 @@ +import os +import sys +import time +import copy +import glob +import socket +import random +import logging +import threading +import lxml.etree +from UserDict import DictMixin +from subprocess import Popen, PIPE, STDOUT +import Bcfg2.Server.Plugin +from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file +from Bcfg2.Server.Plugins.Packages.Collection import Collection +from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \ + fetch_url +from Bcfg2.Server.Plugins.Packages.PackagesConfig import PackagesConfig + +logger = logging.getLogger(__name__) + +try: + from pulp.client.consumer.config import ConsumerConfig + from pulp.client.api.repository import RepositoryAPI + from pulp.client.api.consumer import ConsumerAPI + from pulp.client.api import server + has_pulp = True +except ImportError: + has_pulp = False + +try: + import yum + has_yum = True +except ImportError: + has_yum = False + logger.info("Packages: No yum libraries found; forcing use of internal " + "dependency resolver") + +try: + import json +except ImportError: + import simplejson as json + +XP = '{http://linux.duke.edu/metadata/common}' +RP = '{http://linux.duke.edu/metadata/rpm}' +RPO = '{http://linux.duke.edu/metadata/repo}' +FL = '{http://linux.duke.edu/metadata/filelists}' + +PULPSERVER = None +PULPCONFIG = None + + +def _setup_pulp(config): + global PULPSERVER, PULPCONFIG + if not has_pulp: + msg = "Packages: Cannot create Pulp collection: Pulp libraries not found" + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginInitError(msg) + + if PULPSERVER is None: + try: + username = config.get("pulp", "username") + password = config.get("pulp", "password") + except ConfigParser.NoSectionError: + msg = "Packages: No [pulp] section found in Packages/packages.conf" + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginInitError(msg) + except ConfigParser.NoOptionError: + msg = "Packages: Required option not found in Packages/packages.conf: %s" % sys.exc_info()[1] + logger.error(msg) + raise Bcfg2.Server.Plugin.PluginInitError(msg) + + PULPCONFIG = ConsumerConfig() + serveropts = PULPCONFIG.server + + PULPSERVER = server.PulpServer(serveropts['host'], + int(serveropts['port']), + serveropts['scheme'], + serveropts['path']) + PULPSERVER.set_basic_auth_credentials(username, password) + server.set_active_server(PULPSERVER) + return PULPSERVER + + +class YumCollection(Collection): + # options that are included in the [yum] section but that should + # not be included in the temporary yum.conf we write out + option_blacklist = ["use_yum_libraries", "helper"] + + def __init__(self, metadata, sources, basepath, debug=False): + Collection.__init__(self, metadata, sources, basepath, debug=debug) + self.keypath = os.path.join(self.basepath, "keys") + + if len(sources): + self.config = sources[0].config + else: + self.config = PackageConfig('Packages') + + if self.use_yum: + self.cachefile = os.path.join(self.cachepath, + "cache-%s" % self.cachekey) + if not os.path.exists(self.cachefile): + os.mkdir(self.cachefile) + + self.configdir = os.path.join(self.basepath, "yum") + if not os.path.exists(self.configdir): + os.mkdir(self.configdir) + self.cfgfile = os.path.join(self.configdir, + "%s-yum.conf" % self.cachekey) + self.write_config() + if has_pulp and self.has_pulp_sources: + _setup_pulp(self.config) + + @property + def helper(self): + return self.config.get("yum", "helper", + default="/usr/sbin/bcfg2-yum-helper") + + @property + def use_yum(self): + return has_yum and self.config.getboolean("yum", "use_yum_libraries", + default=False) + + @property + def has_pulp_sources(self): + """ see if there are any pulp sources to handle """ + for source in self.sources: + if source.pulp_id: + return True + return False + + def write_config(self): + if not os.path.exists(self.cfgfile): + yumconf = self.get_config(raw=True) + yumconf.add_section("main") + + mainopts = dict(cachedir=self.cachefile, + keepcache="0", + sslverify="0", + debuglevel="0", + reposdir="/dev/null") + try: + for opt in self.config.options("yum"): + if opt not in self.option_blacklist: + mainopts[opt] = self.config.get("yum", opt) + except ConfigParser.NoSectionError: + pass + + for opt, val in list(mainopts.items()): + yumconf.set("main", opt, val) + + yumconf.write(open(self.cfgfile, 'w')) + + def get_config(self, raw=False): + config = ConfigParser.SafeConfigParser() + for source in self.sources: + # get_urls() loads url_map as a side-effect + source.get_urls() + for url_map in source.url_map: + if url_map['arch'] in self.metadata.groups: + basereponame = source.get_repo_name(url_map) + reponame = basereponame + + added = False + while not added: + try: + config.add_section(reponame) + added = True + except ConfigParser.DuplicateSectionError: + match = re.match("-(\d)", reponame) + if match: + rid = int(match.group(1)) + 1 + else: + rid = 1 + reponame = "%s-%d" % (basereponame, rid) + + config.set(reponame, "name", reponame) + config.set(reponame, "baseurl", url_map['url']) + config.set(reponame, "enabled", "1") + if len(source.gpgkeys): + config.set(reponame, "gpgcheck", "1") + config.set(reponame, "gpgkey", + " ".join(source.gpgkeys)) + else: + config.set(reponame, "gpgcheck", "0") + + if len(source.blacklist): + config.set(reponame, "exclude", + " ".join(source.blacklist)) + if len(source.whitelist): + config.set(reponame, "includepkgs", + " ".join(source.whitelist)) + + if raw: + return config + else: + # configparser only writes to file, so we have to use a + # StringIO object to get the data out as a string + buf = StringIO() + config.write(buf) + return "# This config was generated automatically by the Bcfg2 " \ + "Packages plugin\n\n" + buf.getvalue() + + def build_extra_structures(self, independent): + """ build list of gpg keys to be added to the specification by + validate_structures() """ + needkeys = set() + for source in self.sources: + for key in source.gpgkeys: + needkeys.add(key) + + if len(needkeys): + if has_yum: + # this must be be has_yum, not use_yum, because + # regardless of whether the user wants to use the yum + # resolver we want to include gpg key data + keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey", + type=self.ptype, origin='Packages') + else: + self.logger.warning("GPGKeys were specified for yum sources in " + "sources.xml, but no yum libraries were " + "found") + self.logger.warning("GPG key version/release data cannot be " + "determined automatically") + self.logger.warning("Install yum libraries, or manage GPG keys " + "manually") + keypkg = None + + for key in needkeys: + # figure out the path of the key on the client + keydir = self.config.get("global", "gpg_keypath", + default="/etc/pki/rpm-gpg") + remotekey = os.path.join(keydir, os.path.basename(key)) + localkey = os.path.join(self.keypath, os.path.basename(key)) + kdata = open(localkey).read() + + # copy the key to the client + keypath = lxml.etree.Element("BoundPath", name=remotekey, + encoding='ascii', + owner='root', group='root', + type='file', perms='0644', + important='true') + keypath.text = kdata + + # hook to add version/release info if possible + self._add_gpg_instances(keypkg, kdata, localkey, remotekey) + independent.append(keypath) + if keypkg is not None: + independent.append(keypkg) + + if self.has_pulp_sources: + consumerapi = ConsumerAPI() + consumer = self._get_pulp_consumer(consumerapi=consumerapi) + if consumer is None: + consumer = consumerapi.create(self.metadata.hostname, + self.metadata.hostname) + lxml.etree.SubElement(independent, "BoundAction", + name="pulp-update", timing="pre", + when="always", status="check", + command="pulp-consumer consumer update") + + for source in self.sources: + # each pulp source can only have one arch, so we don't + # have to check the arch in url_map + if (source.pulp_id and + source.pulp_id not in consumer['repoids']): + consumerapi.bind(self.metadata.hostname, source.pulp_id) + + crt = lxml.etree.SubElement(independent, "BoundPath", + name="/etc/pki/consumer/cert.pem", + type="file", owner="root", + group="root", perms="0644") + crt.text = consumerapi.certificate(self.metadata.hostname) + + def _get_pulp_consumer(self, consumerapi=None): + if consumerapi is None: + consumerapi = ConsumerAPI() + consumer = None + try: + consumer = consumerapi.consumer(self.metadata.hostname) + except server.ServerRequestError: + # consumer does not exist + pass + except socket.error: + err = sys.exc_info()[1] + self.logger.error("Packages: Could not contact Pulp server: %s" % + err) + except: + err = sys.exc_info()[1] + self.logger.error("Packages: Unknown error querying Pulp server: %s" + % err) + return consumer + + def _add_gpg_instances(self, keyentry, keydata, localkey, remotekey): + """ add gpg keys to the specification to ensure they get + installed """ + # this must be be has_yum, not use_yum, because regardless of + # whether the user wants to use the yum resolver we want to + # include gpg key data + if not has_yum: + return + + try: + kinfo = yum.misc.getgpgkeyinfo(keydata) + version = yum.misc.keyIdToRPMVer(kinfo['keyid']) + release = yum.misc.keyIdToRPMVer(kinfo['timestamp']) + + lxml.etree.SubElement(keyentry, 'Instance', + version=version, + release=release, + simplefile=remotekey) + except ValueError: + err = sys.exc_info()[1] + self.logger.error("Packages: Could not read GPG key %s: %s" % + (localkey, err)) + + def is_package(self, package): + if not self.use_yum: + return Collection.is_package(self, package) + elif isinstance(package, tuple): + if package[1] is None and package[2] == (None, None, None): + package = package[0] + else: + return None + else: + # this should really never get called; it's just provided + # for API completeness + return self.call_helper("is_package", package) + + def is_virtual_package(self, package): + if not self.use_yum: + return Collection.is_virtual_package(self, package) + else: + # this should really never get called; it's just provided + # for API completeness + return self.call_helper("is_virtual_package", package) + + def get_deps(self, package): + if not self.use_yum: + return Collection.get_deps(self, package) + else: + # this should really never get called; it's just provided + # for API completeness + return self.call_helper("get_deps", package) + + def get_provides(self, required, all=False, silent=False): + if not self.use_yum: + return Collection.get_provides(self, package) + else: + # this should really never get called; it's just provided + # for API completeness + return self.call_helper("get_provides", package) + + def get_group(self, group, ptype="default"): + if not self.use_yum: + self.logger.warning("Packages: Package groups are not supported by " + "Bcfg2's internal Yum dependency generator") + return [] + + if group.startswith("@"): + group = group[1:] + + pkgs = self.call_helper("get_group", dict(group=group, type=ptype)) + return pkgs + + def complete(self, packagelist): + if not self.use_yum: + return Collection.complete(self, packagelist) + + packages = set() + unknown = set(packagelist) + + if unknown: + result = \ + self.call_helper("complete", + dict(packages=list(unknown), + groups=list(self.get_relevant_groups()))) + if result and "packages" in result and "unknown" in result: + # we stringify every package because it gets returned + # in unicode; set.update() doesn't work if some + # elements are unicode and other are strings. (I.e., + # u'foo' and 'foo' get treated as unique elements.) + packages.update([str(p) for p in result['packages']]) + unknown = set([str(p) for p in result['unknown']]) + + self.filter_unknown(unknown) + + return packages, unknown + + def call_helper(self, command, input=None): + """ Make a call to bcfg2-yum-helper. The yum libs have + horrific memory leaks, so apparently the right way to get + around that in long-running processes it to have a short-lived + helper. No, seriously -- check out the yum-updatesd code. + It's pure madness. """ + # it'd be nice if we could change this to be more verbose if + # -v was given to bcfg2-server, but Collection objects don't + # get the 'setup' variable, so we don't know how verbose + # bcfg2-server is. It'd also be nice if we could tell yum to + # log to syslog. So would a unicorn. + cmd = [self.helper, "-c", self.cfgfile] + if self.debug_flag: + cmd.append("-v") + cmd.append(command) + self.debug_log("Packages: running %s" % " ".join(cmd)) + try: + helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + except OSError: + err = sys.exc_info()[1] + self.logger.error("Packages: Failed to execute %s: %s" % + (" ".join(cmd), err)) + return None + + if input: + idata = json.dumps(input) + (stdout, stderr) = helper.communicate(idata) + else: + (stdout, stderr) = helper.communicate() + rv = helper.wait() + if rv: + self.logger.error("Packages: error running bcfg2-yum-helper " + "(returned %d): %s" % (rv, stderr)) + elif self.debug_flag: + self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" % + stderr) + try: + return json.loads(stdout) + except ValueError: + err = sys.exc_info()[1] + self.logger.error("Packages: error reading bcfg2-yum-helper " + "output: %s" % err) + return None + + def setup_data(self, force_update=False): + if not self.use_yum: + return Collection.setup_data(self, force_update) + + if force_update: + # we call this twice: one to clean up data from the old + # config, and once to clean up data from the new config + self.call_helper("clean") + + os.unlink(self.cfgfile) + self.write_config() + + if force_update: + self.call_helper("clean") + + +class YumSource(Source): + basegroups = ['yum', 'redhat', 'centos', 'fedora'] + ptype = 'yum' + + def __init__(self, basepath, xsource, config): + Source.__init__(self, basepath, xsource, config) + self.pulp_id = None + if has_pulp and xsource.get("pulp_id"): + self.pulp_id = xsource.get("pulp_id") + + _setup_pulp(self.config) + repoapi = RepositoryAPI() + try: + self.repo = repoapi.repository(self.pulp_id) + self.gpgkeys = [os.path.join(PULPCONFIG.cds['keyurl'], key) + for key in repoapi.listkeys(self.pulp_id)] + except server.ServerRequestError: + err = sys.exc_info()[1] + if err[0] == 401: + msg = "Packages: Error authenticating to Pulp: %s" % err[1] + elif err[0] == 404: + msg = "Packages: Pulp repo id %s not found: %s" % \ + (self.pulp_id, err[1]) + else: + msg = "Packages: Error %d fetching pulp repo %s: %s" % \ + (err[0], self.pulp_id, err[1]) + raise SourceInitError(msg) + except socket.error: + err = sys.exc_info()[1] + raise SourceInitError("Could not contact Pulp server: %s" % err) + except: + err = sys.exc_info()[1] + raise SourceInitError("Unknown error querying Pulp server: %s" % + err) + self.rawurl = "%s/%s" % (PULPCONFIG.cds['baseurl'], + self.repo['relative_path']) + self.arches = [self.repo['arch']] + + if not self.rawurl: + self.baseurl = self.url + "%(version)s/%(component)s/%(arch)s/" + else: + self.baseurl = self.rawurl + self.packages = dict() + self.deps = dict([('global', dict())]) + self.provides = dict([('global', dict())]) + self.filemap = dict([(x, dict()) + for x in ['global'] + self.arches]) + self.needed_paths = set() + self.file_to_arch = dict() + + @property + def use_yum(self): + return has_yum and self.config.getboolean("yum", "use_yum_libraries", + default=False) + + def save_state(self): + if not self.use_yum: + cache = file(self.cachefile, 'wb') + cPickle.dump((self.packages, self.deps, self.provides, + self.filemap, self.url_map), cache, 2) + cache.close() + + + def load_state(self): + if not self.use_yum: + data = file(self.cachefile) + (self.packages, self.deps, self.provides, + self.filemap, self.url_map) = cPickle.load(data) + + def get_urls(self): + surls = list() + self.url_map = [] + for arch in self.arches: + if self.url: + usettings = [{'version':self.version, 'component':comp, + 'arch':arch} + for comp in self.components] + else: # rawurl given + usettings = [{'version':self.version, 'component':None, + 'arch':arch}] + + for setting in usettings: + setting['url'] = self.baseurl % setting + self.url_map.append(copy.deepcopy(setting)) + surls.append((arch, [setting['url'] for setting in usettings])) + urls = [] + for (sarch, surl_list) in surls: + for surl in surl_list: + urls.extend(self._get_urls_from_repodata(surl, sarch)) + return urls + urls = property(get_urls) + + def _get_urls_from_repodata(self, url, arch): + if self.use_yum: + return [url] + + rmdurl = '%srepodata/repomd.xml' % url + try: + repomd = fetch_url(rmdurl) + xdata = lxml.etree.XML(repomd) + except ValueError: + self.logger.error("Packages: Bad url string %s" % rmdurl) + return [] + except HTTPError: + err = sys.exc_info()[1] + self.logger.error("Packages: Failed to fetch url %s. code=%s" % + (rmdurl, err.code)) + return [] + except lxml.etree.XMLSyntaxError: + err = sys.exc_info()[1] + self.logger.error("Packages: Failed to process metadata at %s: %s" % + (rmdurl, err)) + return [] + + urls = [] + for elt in xdata.findall(RPO + 'data'): + if elt.get('type') in ['filelists', 'primary']: + floc = elt.find(RPO + 'location') + fullurl = url + floc.get('href') + urls.append(fullurl) + self.file_to_arch[self.escape_url(fullurl)] = arch + return urls + + def read_files(self): + # we have to read primary.xml first, and filelists.xml afterwards; + primaries = list() + filelists = list() + for fname in self.files: + if fname.endswith('primary.xml.gz'): + primaries.append(fname) + elif fname.endswith('filelists.xml.gz'): + filelists.append(fname) + + for fname in primaries: + farch = self.file_to_arch[fname] + fdata = lxml.etree.parse(fname).getroot() + self.parse_primary(fdata, farch) + for fname in filelists: + farch = self.file_to_arch[fname] + fdata = lxml.etree.parse(fname).getroot() + self.parse_filelist(fdata, farch) + + # merge data + sdata = list(self.packages.values()) + try: + self.packages['global'] = copy.deepcopy(sdata.pop()) + except IndexError: + logger.error("Packages: No packages in repo") + while sdata: + self.packages['global'] = \ + self.packages['global'].intersection(sdata.pop()) + + for key in self.packages: + if key == 'global': + continue + self.packages[key] = \ + self.packages[key].difference(self.packages['global']) + self.save_state() + + def parse_filelist(self, data, arch): + if arch not in self.filemap: + self.filemap[arch] = dict() + for pkg in data.findall(FL + 'package'): + for fentry in pkg.findall(FL + 'file'): + if fentry.text in self.needed_paths: + if fentry.text in self.filemap[arch]: + self.filemap[arch][fentry.text].add(pkg.get('name')) + else: + self.filemap[arch][fentry.text] = \ + set([pkg.get('name')]) + + def parse_primary(self, data, arch): + if arch not in self.packages: + self.packages[arch] = set() + if arch not in self.deps: + self.deps[arch] = dict() + if arch not in self.provides: + self.provides[arch] = dict() + for pkg in data.getchildren(): + if not pkg.tag.endswith('package'): + continue + pkgname = pkg.find(XP + 'name').text + self.packages[arch].add(pkgname) + + pdata = pkg.find(XP + 'format') + self.deps[arch][pkgname] = set() + pre = pdata.find(RP + 'requires') + if pre is not None: + for entry in pre.getchildren(): + self.deps[arch][pkgname].add(entry.get('name')) + if entry.get('name').startswith('/'): + self.needed_paths.add(entry.get('name')) + pro = pdata.find(RP + 'provides') + if pro != None: + for entry in pro.getchildren(): + prov = entry.get('name') + if prov not in self.provides[arch]: + self.provides[arch][prov] = list() + self.provides[arch][prov].append(pkgname) + + def is_package(self, metadata, item): + arch = [a for a in self.arches if a in metadata.groups] + if not arch: + return False + return ((item in self.packages['global'] or + item in self.packages[arch[0]]) and + item not in self.blacklist and + (len(self.whitelist) == 0 or item in self.whitelist)) + + def get_vpkgs(self, metadata): + if self.use_yum: + return dict() + + rv = Source.get_vpkgs(self, metadata) + for arch, fmdata in list(self.filemap.items()): + if arch not in metadata.groups and arch != 'global': + continue + for filename, pkgs in list(fmdata.items()): + rv[filename] = pkgs + return rv + + def filter_unknown(self, unknown): + if self.use_yum: + filtered = set() + for unk in unknown: + try: + if unk.startswith('rpmlib'): + filtered.update(unk) + except AttributeError: + try: + if unk[0].startswith('rpmlib'): + filtered.update(unk) + except (IndexError, AttributeError): + pass + else: + filtered = set([u for u in unknown if u.startswith('rpmlib')]) + unknown.difference_update(filtered) + + def setup_data(self, force_update=False): + if not self.use_yum: + Source.setup_data(self, force_update=force_update) + + def get_repo_name(self, url_map): + if self.pulp_id: + return self.pulp_id + else: + return Source.get_repo_name(self, url_map) diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py new file mode 100644 index 000000000..e4793a28d --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py @@ -0,0 +1,269 @@ +import os +import sys +import time +import copy +import glob +import shutil +import lxml.etree +import Bcfg2.Logger +import Bcfg2.Server.Plugin +from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen +from Bcfg2.Server.Plugins.Packages import Collection +from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources +from Bcfg2.Server.Plugins.Packages.PackagesConfig import PackagesConfig + +class Packages(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.StructureValidator, + Bcfg2.Server.Plugin.Generator, + Bcfg2.Server.Plugin.Connector): + name = 'Packages' + conflicts = ['Pkgmgr'] + experimental = True + __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload'] + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.StructureValidator.__init__(self) + Bcfg2.Server.Plugin.Generator.__init__(self) + Bcfg2.Server.Plugin.Connector.__init__(self) + Bcfg2.Server.Plugin.Probing.__init__(self) + + self.sentinels = set() + self.cachepath = os.path.join(self.data, 'cache') + self.keypath = os.path.join(self.data, 'keys') + if not os.path.exists(self.keypath): + # create key directory if needed + os.makedirs(self.keypath) + + # set up config files + self.config = PackagesConfig(self) + self.sources = PackagesSources(os.path.join(self.data, "sources.xml"), + self.cachepath, core.fam, self, + self.config) + + def toggle_debug(self): + Bcfg2.Server.Plugin.Plugin.toggle_debug(self) + self.sources.toggle_debug() + + @property + def disableResolver(self): + try: + return not self.config.getboolean("global", "resolver") + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + return False + except ValueError: + # for historical reasons we also accept "enabled" and + # "disabled", which are not handled according to the + # Python docs but appear to be handled properly by + # ConfigParser in at least some versions + return self.config.get("global", "resolver", + default="enabled").lower() == "disabled" + + @property + def disableMetaData(self): + try: + return not self.config.getboolean("global", "resolver") + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + return False + except ValueError: + # for historical reasons we also accept "enabled" and + # "disabled" + return self.config.get("global", "metadata", + default="enabled").lower() == "disabled" + + def create_config(self, entry, metadata): + """ create yum/apt config for the specified host """ + attrib = {'encoding': 'ascii', + 'owner': 'root', + 'group': 'root', + 'type': 'file', + 'perms': '0644'} + + collection = self._get_collection(metadata) + entry.text = collection.get_config() + for (key, value) in list(attrib.items()): + entry.attrib.__setitem__(key, value) + + def HandleEntry(self, entry, metadata): + if entry.tag == 'Package': + collection = self._get_collection(metadata) + entry.set('version', 'auto') + entry.set('version', self.config.get("global", + "version", + default="auto")) + entry.set('type', collection.ptype) + elif entry.tag == 'Path': + if (entry.get("name") == self.config.get("global", "yum_config", + default="") or + entry.get("name") == self.config.get("global", "apt_config", + default="")): + self.create_config(entry, metadata) + + def HandlesEntry(self, entry, metadata): + if entry.tag == 'Package': + if self.config.getboolean("global", "magic_groups", + default=True) == True: + collection = self._get_collection(metadata) + if collection.magic_groups_match(): + return True + else: + return True + elif entry.tag == 'Path': + # managed entries for yum/apt configs + if (entry.get("name") == self.config.get("global", "yum_config", + default="") or + entry.get("name") == self.config.get("global", "apt_config", + default="")): + return True + return False + + def validate_structures(self, metadata, structures): + '''Ensure client configurations include all needed prerequisites + + Arguments: + metadata - client metadata instance + structures - a list of structure-stage entry combinations + ''' + collection = self._get_collection(metadata) + indep = lxml.etree.Element('Independent') + self._build_packages(metadata, indep, structures, + collection=collection) + collection.build_extra_structures(indep) + structures.append(indep) + + def _build_packages(self, metadata, independent, structures, + collection=None): + """ build list of packages that need to be included in the + specification by validate_structures() """ + if self.disableResolver: + # Config requests no resolver + return + + if collection is None: + collection = self._get_collection(metadata) + # initial is the set of packages that are explicitly specified + # in the configuration + initial = set() + # base is the set of initial packages with groups expanded + base = set() + # essential pkgs are those marked as such by the distribution + essential = collection.get_essential() + to_remove = [] + for struct in structures: + for pkg in struct.xpath('//Package | //BoundPackage'): + if pkg.get("name"): + initial.add(pkg.get("name")) + elif pkg.get("group"): + try: + if pkg.get("type"): + gpkgs = collection.get_group(pkg.get("group"), + ptype=pkg.get("type")) + else: + gpkgs = collection.get_group(pkg.get("group")) + base.update(gpkgs) + except TypeError: + raise + self.logger.error("Could not resolve group %s" % + pkg.get("group")) + to_remove.append(pkg) + else: + self.logger.error("Packages: Malformed Package: %s" % + lxml.etree.tostring(pkg)) + base.update(initial | essential) + for el in to_remove: + el.getparent().remove(el) + + packages, unknown = collection.complete(base) + if unknown: + self.logger.info("Packages: Got %d unknown entries" % len(unknown)) + self.logger.info("Packages: %s" % list(unknown)) + newpkgs = list(packages.difference(initial)) + self.debug_log("Packages: %d initial, %d complete, %d new" % + (len(initial), len(packages), len(newpkgs))) + newpkgs.sort() + for pkg in newpkgs: + lxml.etree.SubElement(independent, 'BoundPackage', name=pkg, + version=self.config.get("global", "version", + default="auto"), + type=collection.ptype, origin='Packages') + + def Refresh(self): + '''Packages.Refresh() => True|False\nReload configuration + specification and download sources\n''' + self._load_config(force_update=True) + return True + + def Reload(self): + '''Packages.Refresh() => True|False\nReload configuration + specification and sources\n''' + self._load_config() + return True + + def _load_config(self, force_update=False): + ''' + Load the configuration data and setup sources + + Keyword args: + force_update Force downloading repo data + ''' + self._load_sources(force_update) + self._load_gpg_keys(force_update) + + def _load_sources(self, force_update): + """ Load sources from the config """ + self.sentinels = set() + cachefiles = set() + + for collection in list(Collection.collections.values()): + cachefiles.update(collection.cachefiles) + if not self.disableMetaData: + collection.setup_data(force_update) + self.sentinels.update(collection.basegroups) + + Collection.clear_cache() + + for source in self.sources: + cachefiles.add(source.cachefile) + if not self.disableMetaData: + source.setup_data(force_update) + + for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")): + if cfile not in cachefiles: + try: + if os.path.isdir(cfile): + shutil.rmtree(cfile) + else: + os.unlink(cfile) + except OSError: + err = sys.exc_info()[1] + self.logger.error("Packages: Could not remove cache file " + "%s: %s" % (cfile, err)) + + def _load_gpg_keys(self, force_update): + """ Load gpg keys from the config """ + keyfiles = [] + keys = [] + for source in self.sources: + for key in source.gpgkeys: + localfile = os.path.join(self.keypath, + os.path.basename(key.rstrip("/"))) + if localfile not in keyfiles: + keyfiles.append(localfile) + if ((force_update and key not in keys) or + not os.path.exists(localfile)): + self.logger.info("Packages: Downloading and parsing %s" % key) + response = urlopen(key) + open(localfile, 'w').write(response.read()) + keys.append(key) + + for kfile in glob.glob(os.path.join(self.keypath, "*")): + if kfile not in keyfiles: + os.unlink(kfile) + + def _get_collection(self, metadata): + return Collection.factory(metadata, self.sources, self.data, + debug=self.debug_flag) + + def get_additional_data(self, metadata): + collection = self._get_collection(metadata) + return dict(sources=collection.get_additional_data()) diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py new file mode 100644 index 000000000..e9254cdcc --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py @@ -0,0 +1,169 @@ +'''This module implements a package management scheme for all images''' + +import logging +import re +import Bcfg2.Server.Plugin +import lxml +try: + set +except NameError: + from sets import Set as set + +logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr') + + +class FuzzyDict(dict): + fuzzy = re.compile('(?P<name>.*):(?P<alist>\S+(,\S+)*)') + + def __getitem__(self, key): + if isinstance(key, str): + mdata = self.fuzzy.match(key) + if mdata: + return dict.__getitem__(self, mdata.groupdict()['name']) + else: + print("got non-string key %s" % str(key)) + return dict.__getitem__(self, key) + + def has_key(self, key): + if isinstance(key, str): + mdata = self.fuzzy.match(key) + if self.fuzzy.match(key): + return dict.has_key(self, mdata.groupdict()['name']) + return dict.has_key(self, key) + + def get(self, key, default=None): + try: + return self.__getitem__(key) + except: + if default: + return default + raise + + +class PNode(Bcfg2.Server.Plugin.INode): + """PNode has a list of packages available at a + particular group intersection. + """ + splitters = {'rpm': re.compile('^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \ + '(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'), + 'encap': re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')} + ignore = ['Package'] + + def Match(self, metadata, data, entry=lxml.etree.Element("None")): + """Return a dictionary of package mappings.""" + if self.predicate(metadata, entry): + for key in self.contents: + try: + data[key].update(self.contents[key]) + except: + data[key] = FuzzyDict() + data[key].update(self.contents[key]) + for child in self.children: + child.Match(metadata, data) + + def __init__(self, data, pdict, parent=None): + # copy local attributes to all child nodes if no local attribute exists + if 'Package' not in pdict: + pdict['Package'] = set() + for child in data.getchildren(): + attrs = set(data.attrib.keys()).difference(child.attrib.keys() + ['name']) + for attr in attrs: + try: + child.set(attr, data.get(attr)) + except: + # don't fail on things like comments and other immutable elements + pass + Bcfg2.Server.Plugin.INode.__init__(self, data, pdict, parent) + if 'Package' not in self.contents: + self.contents['Package'] = FuzzyDict() + for pkg in data.findall('./Package'): + if 'name' in pkg.attrib and pkg.get('name') not in pdict['Package']: + pdict['Package'].add(pkg.get('name')) + if pkg.get('name') != None: + self.contents['Package'][pkg.get('name')] = {} + if pkg.getchildren(): + self.contents['Package'][pkg.get('name')]['__children__'] \ + = pkg.getchildren() + if 'simplefile' in pkg.attrib: + pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile'))) + self.contents['Package'][pkg.get('name')].update(pkg.attrib) + else: + if 'file' in pkg.attrib: + if 'multiarch' in pkg.attrib: + archs = pkg.get('multiarch').split() + srcs = pkg.get('srcs', pkg.get('multiarch')).split() + url = ' '.join(["%s/%s" % (pkg.get('uri'), + pkg.get('file') % {'src':srcs[idx], + 'arch':archs[idx]}) + for idx in range(len(archs))]) + pkg.set('url', url) + else: + pkg.set('url', '%s/%s' % (pkg.get('uri'), + pkg.get('file'))) + if pkg.get('type') in self.splitters and pkg.get('file') != None: + mdata = self.splitters[pkg.get('type')].match(pkg.get('file')) + if not mdata: + logger.error("Failed to match pkg %s" % pkg.get('file')) + continue + pkgname = mdata.group('name') + self.contents['Package'][pkgname] = mdata.groupdict() + self.contents['Package'][pkgname].update(pkg.attrib) + if pkg.attrib.get('file'): + self.contents['Package'][pkgname]['url'] = pkg.get('url') + self.contents['Package'][pkgname]['type'] = pkg.get('type') + if pkg.get('verify'): + self.contents['Package'][pkgname]['verify'] = pkg.get('verify') + if pkg.get('multiarch'): + self.contents['Package'][pkgname]['multiarch'] = pkg.get('multiarch') + if pkgname not in pdict['Package']: + pdict['Package'].add(pkgname) + if pkg.getchildren(): + self.contents['Package'][pkgname]['__children__'] = pkg.getchildren() + else: + self.contents['Package'][pkg.get('name')].update(pkg.attrib) + + +class PkgSrc(Bcfg2.Server.Plugin.XMLSrc): + """PkgSrc files contain a PNode hierarchy that + returns matching package entries. + """ + __node__ = PNode + __cacheobj__ = FuzzyDict + + +class Pkgmgr(Bcfg2.Server.Plugin.PrioDir): + """This is a generator that handles package assignments.""" + name = 'Pkgmgr' + __author__ = 'bcfg-dev@mcs.anl.gov' + __child__ = PkgSrc + __element__ = 'Package' + + def HandleEvent(self, event): + '''Handle events and update dispatch table''' + Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event) + for src in list(self.entries.values()): + for itype, children in list(src.items.items()): + for child in children: + try: + self.Entries[itype][child] = self.BindEntry + except KeyError: + self.Entries[itype] = FuzzyDict([(child, + self.BindEntry)]) + + def BindEntry(self, entry, metadata): + """Bind data for entry, and remove instances that are not requested.""" + pname = entry.get('name') + Bcfg2.Server.Plugin.PrioDir.BindEntry(self, entry, metadata) + if entry.findall('Instance'): + mdata = FuzzyDict.fuzzy.match(pname) + if mdata: + arches = mdata.group('alist').split(',') + [entry.remove(inst) for inst in \ + entry.findall('Instance') \ + if inst.get('arch') not in arches] + + def HandlesEntry(self, entry, metadata): + return entry.tag == 'Package' and entry.get('name').split(':')[0] in list(self.Entries['Package'].keys()) + + def HandleEntry(self, entry, metadata): + self.BindEntry(entry, metadata) diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py new file mode 100644 index 000000000..af908eee8 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Probes.py @@ -0,0 +1,285 @@ +import time +import lxml.etree +import operator +import re + +try: + import json + has_json = True +except ImportError: + try: + import simplejson as json + has_json = True + except ImportError: + has_json = False + +try: + import syck + has_syck = True +except ImportError: + has_syck = False + try: + import yaml + has_yaml = True + except ImportError: + has_yaml = False + +import Bcfg2.Server.Plugin + +specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)") +probe_matcher = re.compile("(.*/)?(?P<basename>\S+)") + +class ClientProbeDataSet(dict): + """ dict of probe => [probe data] that records a for each host """ + def __init__(self, *args, **kwargs): + if "timestamp" in kwargs and kwargs['timestamp'] is not None: + self.timestamp = kwargs.pop("timestamp") + else: + self.timestamp = time.time() + dict.__init__(self, *args, **kwargs) + + +class ProbeData(object): + """ a ProbeData object emulates a str object, but also has .xdata + and .json properties to provide convenient ways to use ProbeData + objects as XML or JSON data """ + def __init__(self, data): + self.data = data + self._xdata = None + self._json = None + self._yaml = None + + def __str__(self): + return str(self.data) + + def __repr__(self): + return repr(self.data) + + def __getattr__(self, name): + """ make ProbeData act like a str object """ + return getattr(self.data, name) + + def __complex__(self): + return complex(self.data) + + def __int__(self): + return int(self.data) + + def __long__(self): + return long(self.data) + + def __float__(self): + return float(self.data) + + def __eq__(self, other): + return str(self) == str(other) + + def __ne__(self, other): + return str(self) != str(other) + + def __gt__(self, other): + return str(self) > str(other) + + def __lt__(self, other): + return str(self) < str(other) + + def __ge__(self, other): + return self > other or self == other + + def __le__(self, other): + return self < other or self == other + + @property + def xdata(self): + if self._xdata is None: + try: + self._xdata = lxml.etree.XML(self.data) + except lxml.etree.XMLSyntaxError: + pass + return self._xdata + + @property + def json(self): + if self._json is None and has_json: + try: + self._json = json.loads(self.data) + except ValueError: + pass + return self._json + + @property + def yaml(self): + if self._yaml is None: + if has_yaml: + try: + self._yaml = yaml.load(self.data) + except yaml.YAMLError: + pass + elif has_syck: + try: + self._yaml = syck.load(self.data) + except syck.error: + pass + return self._yaml + + +class ProbeSet(Bcfg2.Server.Plugin.EntrySet): + ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$") + + def __init__(self, path, fam, encoding, plugin_name): + fpattern = '[0-9A-Za-z_\-]+' + self.plugin_name = plugin_name + Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, + Bcfg2.Server.Plugin.SpecificData, + encoding) + fam.AddMonitor(path, self) + self.bangline = re.compile('^#!(?P<interpreter>.*)$') + + def HandleEvent(self, event): + if event.filename != self.path: + if (event.code2str == 'changed' and + event.filename.endswith("probed.xml") and + event.filename not in self.entries): + # for some reason, probed.xml is particularly prone to + # getting changed events before created events, + # because gamin is the worst ever. anyhow, we + # specifically handle it here to avoid a warning on + # every single server startup. + self.entry_init(event) + return + return self.handle_event(event) + + def get_probe_data(self, metadata): + ret = [] + build = dict() + candidates = self.get_matching(metadata) + candidates.sort(key=operator.attrgetter('specific')) + for entry in candidates: + rem = specific_probe_matcher.match(entry.name) + if not rem: + rem = probe_matcher.match(entry.name) + pname = rem.group('basename') + if pname not in build: + build[pname] = entry + + for (name, entry) in list(build.items()): + probe = lxml.etree.Element('probe') + probe.set('name', name.split('/')[-1]) + probe.set('source', self.plugin_name) + probe.text = entry.data + match = self.bangline.match(entry.data.split('\n')[0]) + if match: + probe.set('interpreter', match.group('interpreter')) + else: + probe.set('interpreter', '/bin/sh') + ret.append(probe) + return ret + + +class Probes(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Probing, + Bcfg2.Server.Plugin.Connector): + """A plugin to gather information from a client machine.""" + name = 'Probes' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + Bcfg2.Server.Plugin.Probing.__init__(self) + + try: + self.probes = ProbeSet(self.data, core.fam, core.encoding, + self.name) + except: + raise Bcfg2.Server.Plugin.PluginInitError + + self.probedata = dict() + self.cgroups = dict() + self.load_data() + + def write_data(self): + """Write probe data out for use with bcfg2-info.""" + top = lxml.etree.Element("Probed") + for client, probed in sorted(self.probedata.items()): + cx = lxml.etree.SubElement(top, 'Client', name=client, + timestamp=str(int(probed.timestamp))) + for probe in sorted(probed): + lxml.etree.SubElement(cx, 'Probe', name=probe, + value=str(self.probedata[client][probe])) + for group in sorted(self.cgroups[client]): + lxml.etree.SubElement(cx, "Group", name=group) + data = lxml.etree.tostring(top, encoding='UTF-8', + xml_declaration=True, + pretty_print='true') + try: + datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w') + except IOError: + self.logger.error("Failed to write probed.xml") + datafile.write(data.decode('utf-8')) + + def load_data(self): + try: + data = lxml.etree.parse(self.data + '/probed.xml').getroot() + except: + self.logger.error("Failed to read file probed.xml") + return + self.probedata = {} + self.cgroups = {} + for client in data.getchildren(): + self.probedata[client.get('name')] = \ + ClientProbeDataSet(timestamp=client.get("timestamp")) + self.cgroups[client.get('name')] = [] + for pdata in client: + if (pdata.tag == 'Probe'): + self.probedata[client.get('name')][pdata.get('name')] = \ + ProbeData(pdata.get('value')) + elif (pdata.tag == 'Group'): + self.cgroups[client.get('name')].append(pdata.get('name')) + + def GetProbes(self, meta, force=False): + """Return a set of probes for execution on client.""" + return self.probes.get_probe_data(meta) + + def ReceiveData(self, client, datalist): + self.cgroups[client.hostname] = [] + self.probedata[client.hostname] = ClientProbeDataSet() + for data in datalist: + self.ReceiveDataItem(client, data) + self.write_data() + + def ReceiveDataItem(self, client, data): + """Receive probe results pertaining to client.""" + if client.hostname not in self.cgroups: + self.cgroups[client.hostname] = [] + if data.text == None: + self.logger.error("Got null response to probe %s from %s" % \ + (data.get('name'), client.hostname)) + try: + self.probedata[client.hostname].update({data.get('name'): + ProbeData('')}) + except KeyError: + self.probedata[client.hostname] = \ + ClientProbeDataSet([(data.get('name'), ProbeData(''))]) + return + dlines = data.text.split('\n') + self.logger.debug("%s:probe:%s:%s" % (client.hostname, + data.get('name'), [line.strip() for line in dlines])) + for line in dlines[:]: + if line.split(':')[0] == 'group': + newgroup = line.split(':')[1].strip() + if newgroup not in self.cgroups[client.hostname]: + self.cgroups[client.hostname].append(newgroup) + dlines.remove(line) + dobj = ProbeData("\n".join(dlines)) + try: + self.probedata[client.hostname].update({data.get('name'): dobj}) + except KeyError: + self.probedata[client.hostname] = \ + ClientProbeDataSet([(data.get('name'), dobj)]) + + def get_additional_groups(self, meta): + return self.cgroups.get(meta.hostname, list()) + + def get_additional_data(self, meta): + return self.probedata.get(meta.hostname, ClientProbeDataSet()) diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py new file mode 100644 index 000000000..680881858 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Properties.py @@ -0,0 +1,76 @@ +import os +import re +import sys +import copy +import logging +import lxml.etree +import Bcfg2.Server.Plugin + +logger = logging.getLogger('Bcfg2.Plugins.Properties') + +class PropertyFile(Bcfg2.Server.Plugin.StructFile): + """Class for properties files.""" + def write(self): + """ Write the data in this data structure back to the property + file """ + if self.validate_data(): + try: + open(self.name, + "wb").write(lxml.etree.tostring(self.xdata, + pretty_print=True)) + return True + except IOError: + err = sys.exc_info()[1] + logger.error("Failed to write %s: %s" % (self.name, err)) + return False + else: + return False + + def validate_data(self): + """ ensure that the data in this object validates against the + XML schema for this property file (if a schema exists) """ + schemafile = self.name.replace(".xml", ".xsd") + if os.path.exists(schemafile): + try: + schema = lxml.etree.XMLSchema(file=schemafile) + except: + logger.error("Failed to process schema for %s" % self.name) + return False + else: + # no schema exists + return True + + if not schema.validate(self.xdata): + logger.error("Data for %s fails to validate; run bcfg2-lint for " + "more details" % self.name) + return False + else: + return True + + +class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): + __child__ = PropertyFile + patterns = re.compile(r'.*\.xml$') + + +class Properties(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + """ + The properties plugin maps property + files into client metadata instances. + """ + name = 'Properties' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + try: + self.store = PropDirectoryBacked(self.data, core.fam) + except OSError: + e = sys.exc_info()[1] + self.logger.error("Error while creating Properties store: %s %s" % + (e.strerror, e.filename)) + raise Bcfg2.Server.Plugin.PluginInitError + + def get_additional_data(self, _): + return copy.copy(self.store.entries) diff --git a/src/lib/Bcfg2/Server/Plugins/Rules.py b/src/lib/Bcfg2/Server/Plugins/Rules.py new file mode 100644 index 000000000..b80ef351a --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Rules.py @@ -0,0 +1,55 @@ +"""This generator provides rule-based entry mappings.""" + +import re +import Bcfg2.Server.Plugin + +class RulesConfig(Bcfg2.Server.Plugin.SimpleConfig): + _required = False + +class Rules(Bcfg2.Server.Plugin.PrioDir): + """This is a generator that handles service assignments.""" + name = 'Rules' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore) + self.config = RulesConfig(self) + self._regex_cache = dict() + + def HandlesEntry(self, entry, metadata): + if entry.tag in self.Entries: + return self._matches(entry, metadata, + self.Entries[entry.tag].keys()) + return False + + def HandleEntry(self, entry, metadata): + return self.BindEntry(entry, metadata) + + def BindEntry(self, entry, metadata): + attrs = self.get_attrs(entry, metadata) + for key, val in list(attrs.items()): + if key not in entry.attrib: + entry.attrib[key] = val + + def _matches(self, entry, metadata, rules): + if Bcfg2.Server.Plugin.PrioDir._matches(self, entry, metadata, rules): + return True + elif (entry.tag == "Path" and + ((entry.get('name').endswith("/") and + entry.get('name').rstrip("/") in rules) or + (not entry.get('name').endswith("/") and + entry.get('name') + '/' in rules))): + # special case for Path tags: + # http://trac.mcs.anl.gov/projects/bcfg2/ticket/967 + return True + elif self._regex_enabled: + # attempt regular expression matching + for rule in rules: + if rule not in self._regex_cache: + self._regex_cache[rule] = re.compile("%s$" % rule) + if self._regex_cache[rule].match(entry.get('name')): + return True + return False + + def _regex_enabled(self): + return self.config.getboolean("rules", "regex", default=False) diff --git a/src/lib/Bcfg2/Server/Plugins/SGenshi.py b/src/lib/Bcfg2/Server/Plugins/SGenshi.py new file mode 100644 index 000000000..0ba08125e --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/SGenshi.py @@ -0,0 +1,97 @@ +'''This module implements a templating generator based on Genshi''' + +import genshi.input +import genshi.template +import lxml.etree +import logging +import copy +import sys +import os.path + +import Bcfg2.Server.Plugin +import Bcfg2.Server.Plugins.TGenshi + +logger = logging.getLogger('Bcfg2.Plugins.SGenshi') + + +class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile, + Bcfg2.Server.Plugin.StructFile): + def __init__(self, name, specific, encoding): + Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name, + specific, encoding) + Bcfg2.Server.Plugin.StructFile.__init__(self, name) + + def get_xml_value(self, metadata): + if not hasattr(self, 'template'): + logger.error("No parsed template information for %s" % (self.name)) + raise Bcfg2.Server.Plugin.PluginExecutionError + try: + stream = self.template.generate(metadata=metadata).filter( \ + Bcfg2.Server.Plugins.TGenshi.removecomment) + data = lxml.etree.XML(stream.render('xml', strip_whitespace=False)) + bundlename = os.path.splitext(os.path.basename(self.name))[0] + bundle = lxml.etree.Element('Bundle', name=bundlename) + for item in self.Match(metadata, data): + bundle.append(copy.deepcopy(item)) + return bundle + except LookupError: + lerror = sys.exc_info()[1] + logger.error('Genshi lookup error: %s' % lerror) + except genshi.template.TemplateError: + terror = sys.exc_info()[1] + logger.error('Genshi template error: %s' % terror) + raise + except genshi.input.ParseError: + perror = sys.exc_info()[1] + logger.error('Genshi parse error: %s' % perror) + raise + + def Match(self, metadata, xdata): + """Return matching fragments of parsed template.""" + rv = [] + for child in xdata.getchildren(): + rv.extend(self._match(child, metadata)) + logger.debug("File %s got %d match(es)" % (self.name, len(rv))) + return rv + +class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet): + + def __init__(self, path, fam, encoding): + fpattern = '\S+\.xml' + Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, + SGenshiTemplateFile, encoding) + fam.AddMonitor(path, self) + + def HandleEvent(self, event): + '''passthrough event handler for old calling convention''' + if event.filename != self.path: + return self.handle_event(event) + + def BuildStructures(self, metadata): + """Build SGenshi structures.""" + ret = [] + for entry in self.get_matching(metadata): + try: + ret.append(entry.get_xml_value(metadata)) + except: + logger.error("SGenshi: Failed to template file %s" % entry.name) + return ret + + +class SGenshi(SGenshiEntrySet, + Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure): + """The SGenshi plugin provides templated structures.""" + name = 'SGenshi' + __author__ = 'bcfg-dev@mcs.anl.gov' + deprecated = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + try: + SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding) + except: + logger.error("Failed to load %s repository; disabling %s" \ + % (self.name, self.name)) + raise Bcfg2.Server.Plugin.PluginInitError diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py new file mode 100644 index 000000000..ac281ad1a --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py @@ -0,0 +1,413 @@ +'''This module manages ssh key files for bcfg2''' + +import binascii +import re +import os +import socket +import shutil +import sys +import tempfile +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin +from Bcfg2.Bcfg2Py3k import u_str + +if sys.hexversion >= 0x03000000: + from functools import reduce + +import logging +logger = logging.getLogger(__name__) + +class KeyData(Bcfg2.Server.Plugin.SpecificData): + def __init__(self, name, specific, encoding): + Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific, + encoding) + self.encoding = encoding + + def bind_entry(self, entry, metadata): + entry.set('type', 'file') + if entry.get('encoding') == 'base64': + entry.text = binascii.b2a_base64(self.data) + else: + try: + entry.text = u_str(self.data, self.encoding) + except UnicodeDecodeError: + e = sys.exc_info()[1] + logger.error("Failed to decode %s: %s" % (entry.get('name'), e)) + logger.error("Please verify you are using the proper encoding.") + raise Bcfg2.Server.Plugin.PluginExecutionError + except ValueError: + e = sys.exc_info()[1] + logger.error("Error in specification for %s" % + entry.get('name')) + logger.error(str(e)) + logger.error("You need to specify base64 encoding for %s." % + entry.get('name')) + raise Bcfg2.Server.Plugin.PluginExecutionError + if entry.text in ['', None]: + entry.set('empty', 'true') + +class HostKeyEntrySet(Bcfg2.Server.Plugin.EntrySet): + def __init__(self, basename, path): + if basename.startswith("ssh_host_key"): + encoding = "base64" + else: + encoding = None + Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, KeyData, + encoding) + self.metadata = {'owner': 'root', + 'group': 'root', + 'type': 'file'} + if encoding is not None: + self.metadata['encoding'] = encoding + if basename.endswith('.pub'): + self.metadata['perms'] = '0644' + else: + self.metadata['perms'] = '0600' + + +class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet): + def __init__(self, path): + Bcfg2.Server.Plugin.EntrySet.__init__(self, "ssh_known_hosts", path, + KeyData, None) + self.metadata = {'owner': 'root', + 'group': 'root', + 'type': 'file', + 'perms': '0644'} + + +class SSHbase(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Generator, + Bcfg2.Server.Plugin.PullTarget): + """ + The sshbase generator manages ssh host keys (both v1 and v2) + for hosts. It also manages the ssh_known_hosts file. It can + integrate host keys from other management domains and similarly + export its keys. The repository contains files in the following + formats: + + ssh_host_key.H_(hostname) -> the v1 host private key for + (hostname) + ssh_host_key.pub.H_(hostname) -> the v1 host public key + for (hostname) + ssh_host_(ec)(dr)sa_key.H_(hostname) -> the v2 ssh host + private key for (hostname) + ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host + public key for (hostname) + ssh_known_hosts -> the current known hosts file. this + is regenerated each time a new key is generated. + + """ + name = 'SSHbase' + __author__ = 'bcfg-dev@mcs.anl.gov' + + keypatterns = ["ssh_host_dsa_key", + "ssh_host_ecdsa_key", + "ssh_host_rsa_key", + "ssh_host_key", + "ssh_host_dsa_key.pub", + "ssh_host_ecdsa_key.pub", + "ssh_host_rsa_key.pub", + "ssh_host_key.pub"] + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Generator.__init__(self) + Bcfg2.Server.Plugin.PullTarget.__init__(self) + self.ipcache = {} + self.namecache = {} + self.__skn = False + + # keep track of which bogus keys we've warned about, and only + # do so once + self.badnames = dict() + + core.fam.AddMonitor(self.data, self) + + self.static = dict() + self.entries = dict() + self.Entries['Path'] = dict() + + self.entries['/etc/ssh/ssh_known_hosts'] = KnownHostsEntrySet(self.data) + self.Entries['Path']['/etc/ssh/ssh_known_hosts'] = self.build_skn + for keypattern in self.keypatterns: + self.entries["/etc/ssh/" + keypattern] = HostKeyEntrySet(keypattern, + self.data) + self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk + + def get_skn(self): + """Build memory cache of the ssh known hosts file.""" + if not self.__skn: + # if no metadata is registered yet, defer + if len(self.core.metadata.query.all()) == 0: + self.__skn = False + return self.__skn + + skn = [s.data.decode().rstrip() + for s in list(self.static.values())] + + mquery = self.core.metadata.query + + # build hostname cache + names = dict() + for cmeta in mquery.all(): + names[cmeta.hostname] = set([cmeta.hostname]) + names[cmeta.hostname].update(cmeta.aliases) + newnames = set() + newips = set() + for name in names[cmeta.hostname]: + newnames.add(name.split('.')[0]) + try: + newips.add(self.get_ipcache_entry(name)[0]) + except: + continue + names[cmeta.hostname].update(newnames) + names[cmeta.hostname].update(cmeta.addresses) + names[cmeta.hostname].update(newips) + # TODO: Only perform reverse lookups on IPs if an option is set. + if True: + for ip in newips: + try: + names[cmeta.hostname].update(self.get_namecache_entry(ip)) + except: + continue + names[cmeta.hostname] = sorted(names[cmeta.hostname]) + + pubkeys = [pubk for pubk in list(self.entries.keys()) + if pubk.endswith('.pub')] + pubkeys.sort() + for pubkey in pubkeys: + for entry in sorted(self.entries[pubkey].entries.values(), + key=lambda e: e.specific.hostname or e.specific.group): + specific = entry.specific + hostnames = [] + if specific.hostname and specific.hostname in names: + hostnames = names[specific.hostname] + elif specific.group: + hostnames = \ + reduce(lambda x, y: x + y, + [names[cmeta.hostname] + for cmeta in \ + mquery.by_groups([specific.group])], []) + elif specific.all: + # a generic key for all hosts? really? + hostnames = reduce(lambda x, y: x + y, + list(names.values()), []) + if not hostnames: + if specific.hostname: + key = specific.hostname + ktype = "host" + elif specific.group: + key = specific.group + ktype = "group" + else: + # user has added a global SSH key, but + # have no clients yet. don't warn about + # this. + continue + + if key not in self.badnames: + self.badnames[key] = True + self.logger.info("Ignoring key for unknown %s %s" % + (ktype, key)) + continue + + skn.append("%s %s" % (','.join(hostnames), + entry.data.decode().rstrip())) + + self.__skn = "\n".join(skn) + "\n" + return self.__skn + + def set_skn(self, value): + """Set backing data for skn.""" + self.__skn = value + skn = property(get_skn, set_skn) + + def HandleEvent(self, event=None): + """Local event handler that does skn regen on pubkey change.""" + # skip events we don't care about + action = event.code2str() + if action == "endExist" or event.filename == self.data: + return + + for entry in list(self.entries.values()): + if entry.specific.match(event.filename): + entry.handle_event(event) + if event.filename.endswith(".pub"): + self.logger.info("New public key %s; invalidating " + "ssh_known_hosts cache" % event.filename) + self.skn = False + return + + if event.filename in ['info', 'info.xml', ':info']: + for entry in list(self.entries.values()): + entry.handle_event(event) + return + + if event.filename.endswith('.static'): + self.logger.info("Static key %s %s; invalidating ssh_known_hosts " + "cache" % (event.filename, action)) + if action == "deleted" and event.filename in self.static: + del self.static[event.filename] + self.skn = False + else: + self.static[event.filename] = \ + Bcfg2.Server.Plugin.FileBacked(os.path.join(self.data, + event.filename)) + self.static[event.filename].HandleEvent(event) + self.skn = False + return + + self.logger.warn("SSHbase: Got unknown event %s %s" % + (event.filename, action)) + + def get_ipcache_entry(self, client): + """Build a cache of dns results.""" + if client in self.ipcache: + if self.ipcache[client]: + return self.ipcache[client] + else: + raise socket.gaierror + else: + # need to add entry + try: + ipaddr = socket.gethostbyname(client) + self.ipcache[client] = (ipaddr, client) + return (ipaddr, client) + except socket.gaierror: + ipaddr = Popen(["getent", "hosts", client], + stdout=PIPE).stdout.read().strip().split() + if ipaddr: + self.ipcache[client] = (ipaddr, client) + return (ipaddr, client) + self.ipcache[client] = False + self.logger.error("Failed to find IP address for %s" % client) + raise socket.gaierror + + def get_namecache_entry(self, cip): + """Build a cache of name lookups from client IP addresses.""" + if cip in self.namecache: + # lookup cached name from IP + if self.namecache[cip]: + return self.namecache[cip] + else: + raise socket.gaierror + else: + # add an entry that has not been cached + try: + rvlookup = socket.gethostbyaddr(cip) + if rvlookup[0]: + self.namecache[cip] = [rvlookup[0]] + else: + self.namecache[cip] = [] + self.namecache[cip].extend(rvlookup[1]) + return self.namecache[cip] + except socket.gaierror: + self.namecache[cip] = False + self.logger.error("Failed to find any names associated with IP address %s" % cip) + raise + + def build_skn(self, entry, metadata): + """This function builds builds a host specific known_hosts file.""" + try: + rv = self.entries[entry.get('name')].bind_entry(entry, metadata) + except Bcfg2.Server.Plugin.PluginExecutionError: + client = metadata.hostname + entry.text = self.skn + hostkeys = [] + for k in self.keypatterns: + if k.endswith(".pub"): + try: + hostkeys.append(self.entries["/etc/ssh/" + + k].best_matching(metadata)) + except Bcfg2.Server.Plugin.PluginExecutionError: + pass + hostkeys.sort() + for hostkey in hostkeys: + entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" % ( + hostkey.data.decode()) + self.entries[entry.get('name')].bind_info_to_entry(entry, metadata) + + def build_hk(self, entry, metadata): + """This binds host key data into entries.""" + try: + self.entries[entry.get('name')].bind_entry(entry, metadata) + except Bcfg2.Server.Plugin.PluginExecutionError: + filename = entry.get('name').split('/')[-1] + self.GenerateHostKeyPair(metadata.hostname, filename) + # Service the FAM events queued up by the key generation + # so the data structure entries will be available for + # binding. + # + # NOTE: We wait for up to ten seconds. There is some + # potential for race condition, because if the file + # monitor doesn't get notified about the new key files in + # time, those entries won't be available for binding. In + # practice, this seems "good enough". + tries = 0 + is_bound = False + while not is_bound: + if tries >= 10: + self.logger.error("%s still not registered" % filename) + raise Bcfg2.Server.Plugin.PluginExecutionError + self.core.fam.handle_events_in_interval(1) + tries += 1 + try: + self.entries[entry.get('name')].bind_entry(entry, metadata) + is_bound = True + except Bcfg2.Server.Plugin.PluginExecutionError: + pass + + def GenerateHostKeyPair(self, client, filename): + """Generate new host key pair for client.""" + match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa)_)?key)', filename) + if match: + hostkey = "%s.H_%s" % (match.group(1), client) + if match.group(2): + keytype = match.group(2) + else: + keytype = 'rsa1' + else: + self.logger.error("Unknown key filename: %s" % filename) + return + + fileloc = "%s/%s" % (self.data, hostkey) + publoc = self.data + '/' + ".".join([hostkey.split('.')[0], 'pub', + "H_%s" % client]) + tempdir = tempfile.mkdtemp() + temploc = "%s/%s" % (tempdir, hostkey) + cmd = ["ssh-keygen", "-q", "-f", temploc, "-N", "", + "-t", keytype, "-C", "root@%s" % client] + proc = Popen(cmd, stdout=PIPE, stdin=PIPE) + proc.communicate() + proc.wait() + + try: + shutil.copy(temploc, fileloc) + shutil.copy("%s.pub" % temploc, publoc) + except IOError: + err = sys.exc_info()[1] + self.logger.error("Temporary SSH keys not found: %s" % err) + + try: + os.unlink(temploc) + os.unlink("%s.pub" % temploc) + os.rmdir(tempdir) + except OSError: + err = sys.exc_info()[1] + self.logger.error("Failed to unlink temporary ssh keys: %s" % err) + + def AcceptChoices(self, _, metadata): + return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)] + + def AcceptPullData(self, specific, entry, log): + """Per-plugin bcfg2-admin pull support.""" + # specific will always be host specific + filename = "%s/%s.H_%s" % (self.data, entry['name'].split('/')[-1], + specific.hostname) + try: + open(filename, 'w').write(entry['text']) + if log: + print("Wrote file %s" % filename) + except KeyError: + self.logger.error("Failed to pull %s. This file does not currently " + "exist on the client" % entry.get('name')) diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py new file mode 100644 index 000000000..0072dc62d --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py @@ -0,0 +1,274 @@ +import Bcfg2.Server.Plugin +import Bcfg2.Options +import lxml.etree +import posixpath +import tempfile +import pipes +import os +from subprocess import Popen, PIPE, STDOUT +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + + +class SSLCA(Bcfg2.Server.Plugin.GroupSpool): + """ + The SSLCA generator handles the creation and + management of ssl certificates and their keys. + """ + name = 'SSLCA' + __author__ = 'g.hagger@gmail.com' + __child__ = Bcfg2.Server.Plugin.FileBacked + key_specs = {} + cert_specs = {} + CAs = {} + + def HandleEvent(self, event=None): + """ + Updates which files this plugin handles based upon filesystem events. + Allows configuration items to be added/removed without server restarts. + """ + action = event.code2str() + if event.filename[0] == '/': + return + epath = "".join([self.data, self.handles[event.requestID], + event.filename]) + if posixpath.isdir(epath): + ident = self.handles[event.requestID] + event.filename + else: + ident = self.handles[event.requestID][:-1] + + fname = "".join([ident, '/', event.filename]) + + if event.filename.endswith('.xml'): + if action in ['exists', 'created', 'changed']: + if event.filename.endswith('key.xml'): + key_spec = dict(list(lxml.etree.parse(epath).find('Key').items())) + self.key_specs[ident] = { + 'bits': key_spec.get('bits', 2048), + 'type': key_spec.get('type', 'rsa') + } + self.Entries['Path'][ident] = self.get_key + elif event.filename.endswith('cert.xml'): + cert_spec = dict(list(lxml.etree.parse(epath).find('Cert').items())) + ca = cert_spec.get('ca', 'default') + self.cert_specs[ident] = { + 'ca': ca, + 'format': cert_spec.get('format', 'pem'), + 'key': cert_spec.get('key'), + 'days': cert_spec.get('days', 365), + 'C': cert_spec.get('c'), + 'L': cert_spec.get('l'), + 'ST': cert_spec.get('st'), + 'OU': cert_spec.get('ou'), + 'O': cert_spec.get('o'), + 'emailAddress': cert_spec.get('emailaddress') + } + cp = ConfigParser.ConfigParser() + cp.read(self.core.cfile) + self.CAs[ca] = dict(cp.items('sslca_' + ca)) + self.Entries['Path'][ident] = self.get_cert + if action == 'deleted': + if ident in self.Entries['Path']: + del self.Entries['Path'][ident] + else: + if action in ['exists', 'created']: + if posixpath.isdir(epath): + self.AddDirectoryMonitor(epath[len(self.data):]) + if ident not in self.entries and posixpath.isfile(epath): + self.entries[fname] = self.__child__(epath) + self.entries[fname].HandleEvent(event) + if action == 'changed': + self.entries[fname].HandleEvent(event) + elif action == 'deleted': + if fname in self.entries: + del self.entries[fname] + else: + self.entries[fname].HandleEvent(event) + + def get_key(self, entry, metadata): + """ + either grabs a prexisting key hostfile, or triggers the generation + of a new key if one doesn't exist. + """ + # set path type and permissions, otherwise bcfg2 won't bind the file + permdata = {'owner': 'root', + 'group': 'root', + 'type': 'file', + 'perms': '644'} + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + + # check if we already have a hostfile, or need to generate a new key + # TODO: verify key fits the specs + path = entry.get('name') + filename = "".join([path, '/', path.rsplit('/', 1)[1], + '.H_', metadata.hostname]) + if filename not in list(self.entries.keys()): + key = self.build_key(filename, entry, metadata) + open(self.data + filename, 'w').write(key) + entry.text = key + self.entries[filename] = self.__child__("%s%s" % (self.data, + filename)) + self.entries[filename].HandleEvent() + else: + entry.text = self.entries[filename].data + + def build_key(self, filename, entry, metadata): + """ + generates a new key according the the specification + """ + type = self.key_specs[entry.get('name')]['type'] + bits = self.key_specs[entry.get('name')]['bits'] + if type == 'rsa': + cmd = ["openssl", "genrsa", bits] + elif type == 'dsa': + cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits] + key = Popen(cmd, stdout=PIPE).stdout.read() + return key + + def get_cert(self, entry, metadata): + """ + either grabs a prexisting cert hostfile, or triggers the generation + of a new cert if one doesn't exist. + """ + # set path type and permissions, otherwise bcfg2 won't bind the file + permdata = {'owner': 'root', + 'group': 'root', + 'type': 'file', + 'perms': '644'} + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + + path = entry.get('name') + filename = "".join([path, '/', path.rsplit('/', 1)[1], + '.H_', metadata.hostname]) + + # first - ensure we have a key to work with + key = self.cert_specs[entry.get('name')].get('key') + key_filename = "".join([key, '/', key.rsplit('/', 1)[1], + '.H_', metadata.hostname]) + if key_filename not in self.entries: + e = lxml.etree.Element('Path') + e.attrib['name'] = key + self.core.Bind(e, metadata) + + # check if we have a valid hostfile + if filename in list(self.entries.keys()) and self.verify_cert(filename, + key_filename, + entry): + entry.text = self.entries[filename].data + else: + cert = self.build_cert(key_filename, entry, metadata) + open(self.data + filename, 'w').write(cert) + self.entries[filename] = self.__child__("%s%s" % (self.data, + filename)) + self.entries[filename].HandleEvent() + entry.text = cert + + def verify_cert(self, filename, key_filename, entry): + if self.verify_cert_against_ca(filename, entry): + if self.verify_cert_against_key(filename, key_filename): + return True + return False + + def verify_cert_against_ca(self, filename, entry): + """ + check that a certificate validates against the ca cert, + and that it has not expired. + """ + chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert') + cert = self.data + filename + res = Popen(["openssl", "verify", "-CAfile", chaincert, cert], + stdout=PIPE, stderr=STDOUT).stdout.read() + if res == cert + ": OK\n": + return True + return False + + def verify_cert_against_key(self, filename, key_filename): + """ + check that a certificate validates against its private key. + """ + cert = self.data + filename + key = self.data + key_filename + cmd = ("openssl x509 -noout -modulus -in %s | openssl md5" % + pipes.quote(cert)) + cert_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read() + cmd = ("openssl rsa -noout -modulus -in %s | openssl md5" % + pipes.quote(key)) + key_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read() + if cert_md5 == key_md5: + return True + return False + + def build_cert(self, key_filename, entry, metadata): + """ + creates a new certificate according to the specification + """ + req_config = self.build_req_config(entry, metadata) + req = self.build_request(key_filename, req_config, entry) + ca = self.cert_specs[entry.get('name')]['ca'] + ca_config = self.CAs[ca]['config'] + days = self.cert_specs[entry.get('name')]['days'] + passphrase = self.CAs[ca].get('passphrase') + cmd = ["openssl", "ca", "-config", ca_config, "-in", req, + "-days", days, "-batch"] + if passphrase: + cmd.extend(["-passin", "pass:%s" % passphrase]) + cert = Popen(cmd, stdout=PIPE).stdout.read() + try: + os.unlink(req_config) + os.unlink(req) + except OSError: + self.logger.error("Failed to unlink temporary files") + return cert + + def build_req_config(self, entry, metadata): + """ + generates a temporary openssl configuration file that is + used to generate the required certificate request + """ + # create temp request config file + conffile = open(tempfile.mkstemp()[1], 'w') + cp = ConfigParser.ConfigParser({}) + cp.optionxform = str + defaults = { + 'req': { + 'default_md': 'sha1', + 'distinguished_name': 'req_distinguished_name', + 'req_extensions': 'v3_req', + 'x509_extensions': 'v3_req', + 'prompt': 'no' + }, + 'req_distinguished_name': {}, + 'v3_req': { + 'subjectAltName': '@alt_names' + }, + 'alt_names': {} + } + for section in list(defaults.keys()): + cp.add_section(section) + for key in defaults[section]: + cp.set(section, key, defaults[section][key]) + x = 1 + altnames = list(metadata.aliases) + altnames.append(metadata.hostname) + for altname in altnames: + cp.set('alt_names', 'DNS.' + str(x), altname) + x += 1 + for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']: + if self.cert_specs[entry.get('name')][item]: + cp.set('req_distinguished_name', item, self.cert_specs[entry.get('name')][item]) + cp.set('req_distinguished_name', 'CN', metadata.hostname) + cp.write(conffile) + conffile.close() + return conffile.name + + def build_request(self, key_filename, req_config, entry): + """ + creates the certificate request + """ + req = tempfile.mkstemp()[1] + days = self.cert_specs[entry.get('name')]['days'] + key = self.data + key_filename + cmd = ["openssl", "req", "-new", "-config", req_config, + "-days", days, "-key", key, "-text", "-out", req] + res = Popen(cmd, stdout=PIPE).stdout.read() + return req diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py new file mode 100644 index 000000000..aeb3b9f74 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Snapshots.py @@ -0,0 +1,142 @@ +#import lxml.etree +import logging +import binascii +import difflib +#import sqlalchemy +#import sqlalchemy.orm +import Bcfg2.Server.Plugin +import Bcfg2.Server.Snapshots +import Bcfg2.Logger +from Bcfg2.Server.Snapshots.model import Snapshot +import sys +import time +import threading + +# Compatibility import +from Bcfg2.Bcfg2Py3k import Queue + +logger = logging.getLogger('Snapshots') + +ftypes = ['ConfigFile', 'SymLink', 'Directory'] +datafields = { + 'Package': ['version'], + 'Path': ['type'], + 'Service': ['status'], + 'ConfigFile': ['owner', 'group', 'perms'], + 'Directory': ['owner', 'group', 'perms'], + 'SymLink': ['to'], + } + + +# py3k compatibility +def u_str(string): + if sys.hexversion >= 0x03000000: + return string + else: + return unicode(string) + + +def build_snap_ent(entry): + basefields = [] + if entry.tag in ['Package', 'Service']: + basefields += ['type'] + desired = dict([(key, u_str(entry.get(key))) for key in basefields]) + state = dict([(key, u_str(entry.get(key))) for key in basefields]) + desired.update([(key, u_str(entry.get(key))) for key in \ + datafields[entry.tag]]) + if entry.tag == 'ConfigFile' or \ + ((entry.tag == 'Path') and (entry.get('type') == 'file')): + if entry.text == None: + desired['contents'] = None + else: + if entry.get('encoding', 'ascii') == 'ascii': + desired['contents'] = u_str(entry.text) + else: + desired['contents'] = u_str(binascii.a2b_base64(entry.text)) + + if 'current_bfile' in entry.attrib: + state['contents'] = u_str(binascii.a2b_base64( \ + entry.get('current_bfile'))) + elif 'current_bdiff' in entry.attrib: + diff = binascii.a2b_base64(entry.get('current_bdiff')) + state['contents'] = u_str( \ + '\n'.join(difflib.restore(diff.split('\n'), 1))) + + state.update([(key, u_str(entry.get('current_' + key, entry.get(key)))) \ + for key in datafields[entry.tag]]) + if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false': + state = None + return [desired, state] + + +class Snapshots(Bcfg2.Server.Plugin.Statistics, + Bcfg2.Server.Plugin.Plugin): + name = 'Snapshots' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Statistics.__init__(self) + self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile) + self.work_queue = Queue() + self.loader = threading.Thread(target=self.load_snapshot) + self.loader.start() + + def load_snapshot(self): + while self.running: + try: + (metadata, data) = self.work_queue.get(block=True, timeout=5) + except: + continue + self.statistics_from_old_stats(metadata, data) + + def process_statistics(self, metadata, data): + return self.work_queue.put((metadata, data)) + + def statistics_from_old_stats(self, metadata, xdata): + # entries are name -> (modified, correct, start, desired, end) + # not sure we can get all of this from old format stats + t1 = time.time() + entries = dict([('Package', dict()), + ('Service', dict()), ('Path', dict())]) + extra = dict([('Package', dict()), ('Service', dict()), + ('Path', dict())]) + bad = [] + state = xdata.find('.//Statistics') + correct = state.get('state') == 'clean' + revision = u_str(state.get('revision', '-1')) + for entry in state.find('.//Bad'): + data = [False, False, u_str(entry.get('name'))] \ + + build_snap_ent(entry) + if entry.tag in ftypes: + etag = 'Path' + else: + etag = entry.tag + entries[etag][entry.get('name')] = data + for entry in state.find('.//Modified'): + if entry.tag in ftypes: + etag = 'Path' + else: + etag = entry.tag + if entry.get('name') in entries[etag]: + data = [True, False, u_str(entry.get('name'))] + \ + build_snap_ent(entry) + else: + data = [True, False, u_str(entry.get('name'))] + \ + build_snap_ent(entry) + for entry in state.find('.//Extra'): + if entry.tag in datafields: + data = build_snap_ent(entry)[1] + ename = u_str(entry.get('name')) + data['name'] = ename + extra[entry.tag][ename] = data + else: + print("extra", entry.tag, entry.get('name')) + t2 = time.time() + snap = Snapshot.from_data(self.session, correct, revision, + metadata, entries, extra) + self.session.add(snap) + self.session.commit() + t3 = time.time() + logger.info("Snapshot storage took %fs" % (t3 - t2)) + return True diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py new file mode 100644 index 000000000..265ef95a8 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Statistics.py @@ -0,0 +1,161 @@ +'''This file manages the statistics collected by the BCFG2 Server''' + +import binascii +import copy +import difflib +import logging +from lxml.etree import XML, SubElement, Element, XMLSyntaxError +import lxml.etree +import os +from time import asctime, localtime, time, strptime, mktime +import threading + +import Bcfg2.Server.Plugin + + +class StatisticsStore(object): + """Manages the memory and file copy of statistics collected about client runs.""" + __min_write_delay__ = 0 + + def __init__(self, filename): + self.filename = filename + self.element = Element('Dummy') + self.dirty = 0 + self.lastwrite = 0 + self.logger = logging.getLogger('Bcfg2.Server.Statistics') + self.ReadFromFile() + + def WriteBack(self, force=0): + """Write statistics changes back to persistent store.""" + if (self.dirty and (self.lastwrite + self.__min_write_delay__ <= time())) \ + or force: + try: + fout = open(self.filename + '.new', 'w') + except IOError: + ioerr = sys.exc_info()[1] + self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr)) + else: + fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True)) + fout.close() + os.rename(self.filename + '.new', self.filename) + self.dirty = 0 + self.lastwrite = time() + + def ReadFromFile(self): + """Reads current state regarding statistics.""" + try: + fin = open(self.filename, 'r') + data = fin.read() + fin.close() + self.element = XML(data) + self.dirty = 0 + except (IOError, XMLSyntaxError): + self.logger.error("Creating new statistics file %s"%(self.filename)) + self.element = Element('ConfigStatistics') + self.WriteBack() + self.dirty = 0 + + def updateStats(self, xml, client): + """Updates the statistics of a current node with new data.""" + + # Current policy: + # - Keep anything less than 24 hours old + # - Keep latest clean run for clean nodes + # - Keep latest clean and dirty run for dirty nodes + newstat = xml.find('Statistics') + + if newstat.get('state') == 'clean': + node_dirty = 0 + else: + node_dirty = 1 + + # Find correct node entry in stats data + # The following list comprehension should be guarenteed to return at + # most one result + nodes = [elem for elem in self.element.findall('Node') \ + if elem.get('name') == client] + nummatch = len(nodes) + if nummatch == 0: + # Create an entry for this node + node = SubElement(self.element, 'Node', name=client) + elif nummatch == 1 and not node_dirty: + # Delete old instance + node = nodes[0] + [node.remove(elem) for elem in node.findall('Statistics') \ + if self.isOlderThan24h(elem.get('time'))] + elif nummatch == 1 and node_dirty: + # Delete old dirty statistics entry + node = nodes[0] + [node.remove(elem) for elem in node.findall('Statistics') \ + if (elem.get('state') == 'dirty' \ + and self.isOlderThan24h(elem.get('time')))] + else: + # Shouldn't be reached + self.logger.error("Duplicate node entry for %s"%(client)) + + # Set current time for stats + newstat.set('time', asctime(localtime())) + + # Add statistic + node.append(copy.copy(newstat)) + + # Set dirty + self.dirty = 1 + self.WriteBack(force=1) + + def isOlderThan24h(self, testTime): + """Helper function to determine if <time> string is older than 24 hours.""" + now = time() + utime = mktime(strptime(testTime)) + secondsPerDay = 60*60*24 + + return (now-utime) > secondsPerDay + + +class Statistics(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.ThreadedStatistics, + Bcfg2.Server.Plugin.PullSource): + name = 'Statistics' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore) + Bcfg2.Server.Plugin.PullSource.__init__(self) + fpath = "%s/etc/statistics.xml" % datastore + self.data_file = StatisticsStore(fpath) + + def handle_statistic(self, metadata, data): + self.data_file.updateStats(data, metadata.hostname) + + def FindCurrent(self, client): + rt = self.data_file.element.xpath('//Node[@name="%s"]' % client)[0] + maxtime = max([strptime(stat.get('time')) for stat \ + in rt.findall('Statistics')]) + return [stat for stat in rt.findall('Statistics') \ + if strptime(stat.get('time')) == maxtime][0] + + def GetExtra(self, client): + return [(entry.tag, entry.get('name')) for entry \ + in self.FindCurrent(client).xpath('.//Extra/*')] + + def GetCurrentEntry(self, client, e_type, e_name): + curr = self.FindCurrent(client) + entry = curr.xpath('.//Bad/%s[@name="%s"]' % (e_type, e_name)) + if not entry: + raise Bcfg2.Server.Plugin.PluginExecutionError + cfentry = entry[-1] + + owner = cfentry.get('current_owner', cfentry.get('owner')) + group = cfentry.get('current_group', cfentry.get('group')) + perms = cfentry.get('current_perms', cfentry.get('perms')) + if cfentry.get('sensitive') in ['true', 'True']: + raise Bcfg2.Server.Plugin.PluginExecutionError + elif 'current_bfile' in cfentry.attrib: + contents = binascii.a2b_base64(cfentry.get('current_bfile')) + elif 'current_bdiff' in cfentry.attrib: + diff = binascii.a2b_base64(cfentry.get('current_bdiff')) + contents = '\n'.join(difflib.restore(diff.split('\n'), 1)) + else: + contents = None + + return (owner, group, perms, contents) diff --git a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py new file mode 100644 index 000000000..f4232ad5c --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py @@ -0,0 +1,10 @@ +"""This generator provides service mappings.""" + +import Bcfg2.Server.Plugin + + +class Svcmgr(Bcfg2.Server.Plugin.PrioDir): + """This is a generator that handles service assignments.""" + name = 'Svcmgr' + __author__ = 'bcfg-dev@mcs.anl.gov' + deprecated = True diff --git a/src/lib/Bcfg2/Server/Plugins/Svn.py b/src/lib/Bcfg2/Server/Plugins/Svn.py new file mode 100644 index 000000000..ae43388ea --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Svn.py @@ -0,0 +1,46 @@ +import os +import pipes +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Svn') + + +class Svn(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Svn is a version plugin for dealing with Bcfg2 repos.""" + name = 'Svn' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # path to svn directory for bcfg2 repo + svn_dir = "%s/.svn" % datastore + + # Read revision from bcfg2 repo + if os.path.isdir(svn_dir): + self.get_revision() + else: + logger.error("%s is not a directory" % svn_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized svn plugin with svn directory = %s" % svn_dir) + + def get_revision(self): + """Read svn revision information for the Bcfg2 repository.""" + try: + data = Popen(("env LC_ALL=C svn info %s" % + pipes.quote(self.datastore)), shell=True, + stdout=PIPE).communicate()[0].split('\n') + return [line.split(': ')[1] for line in data \ + if line[:9] == 'Revision:'][-1] + except IndexError: + logger.error("Failed to read svn info; disabling svn support") + logger.error('''Ran command "svn info %s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError diff --git a/src/lib/Bcfg2/Server/Plugins/Svn2.py b/src/lib/Bcfg2/Server/Plugins/Svn2.py new file mode 100644 index 000000000..e4df9574f --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Svn2.py @@ -0,0 +1,126 @@ +try: + import pysvn + missing = False +except: + missing = True +import Bcfg2.Server.Plugin + +class Svn2(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Svn is a version plugin for dealing with Bcfg2 repos.""" + name = 'Svn2' + __author__ = 'bcfg-dev@mcs.anl.gov' + + conflicts = ['Svn'] + experimental = True + __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Update','Commit'] + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + + if missing: + self.logger.error("Svn2: Missing PySvn") + raise Bcfg2.Server.Plugin.PluginInitError + + self.client = pysvn.Client() + + self.core = core + self.datastore = datastore + self.svn_root = None + self.revision = None + + # Read revision from bcfg2 repo + revision = self.get_revision() + if not self.revision: + raise Bcfg2.Server.Plugin.PluginInitError + + self.logger.debug("Initialized svn plugin with svn root %s at revision %s" + % (self.svn_root, revision)) + + def get_revision(self): + """Read svn revision information for the Bcfg2 repository.""" + try: + info = self.client.info(self.datastore) + self.revision = info.revision + self.svn_root = info.url + return str(self.revision.number) + except: + self.logger.error("Svn2: Failed to get revision", exc_info=1) + self.revision = None + return str(-1) + + def commit_data(self, file_list, comment=None): + """Commit changes into the repository""" + if not comment: + comment = 'Svn2: autocommit' + + # First try to update + if not self.Update(): + self.logger.error("Failed to update svn repository, refusing to commit changes") + return + + #FIXME - look for conflicts? + + for fname in file_list: + stat = self.client.status(fname) + self.client.add([f.path for f in stat \ + if f.text_status == pysvn.wc_status_kind.unversioned]) + try: + self.revision = self.client.checkin([self.datastore], comment, + recurse=True) + self.revision = self.client.update(self.datastore, recurse=True)[0] + self.logger.info("Svn2: Commited changes. At %s" % + self.revision.number) + except Exception: + err = sys.exc_info()[1] + # try to be smart about the error we got back + details = None + if "callback_ssl_server_trust_prompt" in str(err): + details = "SVN server certificate is not trusted" + elif "callback_get_login" in str(err): + details = "SVN credentials not cached" + + if details is None: + self.logger.error("Svn2: Failed to commit changes", + exc_info=1) + else: + self.logger.error("Svn2: Failed to commit changes: %s" % + details) + + def Update(self): + '''Svn2.Update() => True|False\nUpdate svn working copy\n''' + try: + old_revision = self.revision.number + self.revision = self.client.update(self.datastore, recurse=True)[0] + except Exception, err: + # try to be smart about the error we got back + details = None + if "callback_ssl_server_trust_prompt" in str(err): + details = "SVN server certificate is not trusted" + elif "callback_get_login" in str(err): + details = "SVN credentials not cached" + + if details is None: + self.logger.error("Svn2: Failed to update server repository", + exc_info=1) + else: + self.logger.error("Svn2: Failed to update server repository: %s" % + details) + return False + + if old_revision == self.revision.number: + self.logger.debug("repository is current") + else: + self.logger.info("Updated %s from revision %s to %s" % \ + (self.datastore, old_revision, self.revision.number)) + return True + + def Commit(self): + """Svn2.Commit() => True|False\nCommit svn repository\n""" + try: + self.commit_changes([]) + return True + except: + return False + + diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py new file mode 100644 index 000000000..8879fdef1 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/TCheetah.py @@ -0,0 +1,80 @@ +'''This module implements a templating generator based on Cheetah''' + +import binascii +import logging +import sys +import traceback +import Bcfg2.Server.Plugin +# py3k compatibility +if sys.hexversion >= 0x03000000: + unicode = str + +logger = logging.getLogger('Bcfg2.Plugins.TCheetah') + +try: + import Cheetah.Template + import Cheetah.Parser +except: + logger.error("TCheetah: Failed to import Cheetah. Is it installed?") + raise + + +class TemplateFile: + """Template file creates Cheetah template structures for the loaded file.""" + + def __init__(self, name, specific, encoding): + self.name = name + self.specific = specific + self.encoding = encoding + self.template = None + self.searchlist = dict() + + def handle_event(self, event): + """Handle all fs events for this template.""" + if event.code2str() == 'deleted': + return + try: + s = {'useStackFrames': False} + self.template = Cheetah.Template.Template(open(self.name).read(), + compilerSettings=s, + searchList=self.searchlist) + except Cheetah.Parser.ParseError: + perror = sys.exc_info()[1] + logger.error("Cheetah parse error for file %s" % (self.name)) + logger.error(perror.report()) + + def bind_entry(self, entry, metadata): + """Build literal file information.""" + self.template.metadata = metadata + self.searchlist['metadata'] = metadata + self.template.path = entry.get('realname', entry.get('name')) + self.searchlist['path'] = entry.get('realname', entry.get('name')) + self.template.source_path = self.name + self.searchlist['source_path'] = self.name + + if entry.tag == 'Path': + entry.set('type', 'file') + try: + if type(self.template) == unicode: + entry.text = self.template + else: + if entry.get('encoding') == 'base64': + # take care of case where file needs base64 encoding + entry.text = binascii.b2a_base64(self.template) + else: + entry.text = unicode(str(self.template), self.encoding) + except: + (a, b, c) = sys.exc_info() + msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1] + logger.error(msg) + logger.error("TCheetah template error for %s" % self.searchlist['path']) + del a, b, c + raise Bcfg2.Server.Plugin.PluginExecutionError + + +class TCheetah(Bcfg2.Server.Plugin.GroupSpool): + """The TCheetah generator implements a templating mechanism for configuration files.""" + name = 'TCheetah' + __author__ = 'bcfg-dev@mcs.anl.gov' + filename_pattern = 'template' + es_child_cls = TemplateFile diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py new file mode 100644 index 000000000..c4dd40614 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/TGenshi.py @@ -0,0 +1,135 @@ +"""This module implements a templating generator based on Genshi.""" + +import binascii +import logging +import sys +import Bcfg2.Server.Plugin +# py3k compatibility +if sys.hexversion >= 0x03000000: + unicode = str + +logger = logging.getLogger('Bcfg2.Plugins.TGenshi') + +# try to import genshi stuff +try: + import genshi.core + import genshi.input + from genshi.template import TemplateLoader, \ + TextTemplate, MarkupTemplate, TemplateError +except ImportError: + logger.error("TGenshi: Failed to import Genshi. Is it installed?") + raise Bcfg2.Server.Plugin.PluginInitError +try: + from genshi.template import NewTextTemplate + have_ntt = True +except: + have_ntt = False + +def removecomment(stream): + """A genshi filter that removes comments from the stream.""" + for kind, data, pos in stream: + if kind is genshi.core.COMMENT: + continue + yield kind, data, pos + + +class TemplateFile: + """Template file creates Genshi template structures for the loaded file.""" + + def __init__(self, name, specific, encoding): + self.name = name + self.specific = specific + self.encoding = encoding + if self.specific.all: + matchname = self.name + elif self.specific.group: + matchname = self.name[:self.name.find('.G')] + else: + matchname = self.name[:self.name.find('.H')] + if matchname.endswith('.txt'): + self.template_cls = TextTemplate + elif matchname.endswith('.newtxt'): + if not have_ntt: + logger.error("Genshi NewTextTemplates not supported by this version of Genshi") + else: + self.template_cls = NewTextTemplate + else: + self.template_cls = MarkupTemplate + self.HandleEvent = self.handle_event + + def handle_event(self, event=None): + """Handle all fs events for this template.""" + if event and event.code2str() == 'deleted': + return + try: + loader = TemplateLoader() + try: + self.template = loader.load(self.name, cls=self.template_cls, + encoding=self.encoding) + except LookupError: + lerror = sys.exc_info()[1] + logger.error('Genshi lookup error: %s' % lerror) + except TemplateError: + terror = sys.exc_info()[1] + logger.error('Genshi template error: %s' % terror) + except genshi.input.ParseError: + perror = sys.exc_info()[1] + logger.error('Genshi parse error: %s' % perror) + + def bind_entry(self, entry, metadata): + """Build literal file information.""" + fname = entry.get('realname', entry.get('name')) + if entry.tag == 'Path': + entry.set('type', 'file') + try: + stream = self.template.generate( \ + name=fname, metadata=metadata, + path=self.name).filter(removecomment) + if have_ntt: + ttypes = [TextTemplate, NewTextTemplate] + else: + ttypes = [TextTemplate] + if True in [isinstance(self.template, t) for t in ttypes]: + try: + textdata = stream.render('text', strip_whitespace=False) + except TypeError: + textdata = stream.render('text') + if type(textdata) == unicode: + entry.text = textdata + else: + if entry.get('encoding') == 'base64': + # take care of case where file needs base64 encoding + entry.text = binascii.b2a_base64(textdata) + else: + entry.text = unicode(textdata, self.encoding) + else: + try: + xmldata = stream.render('xml', strip_whitespace=False) + except TypeError: + xmldata = stream.render('xml') + if type(xmldata) == unicode: + entry.text = xmldata + else: + entry.text = unicode(xmldata, self.encoding) + if entry.text == '': + entry.set('empty', 'true') + except TemplateError: + err = sys.exc_info()[1] + logger.exception('Genshi template error') + raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template error: %s' % err) + except AttributeError: + err = sys.exc_info()[1] + logger.exception('Genshi template loading error') + raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template loading error: %s' % err) + + +class TGenshi(Bcfg2.Server.Plugin.GroupSpool): + """ + The TGenshi generator implements a templating + mechanism for configuration files. + + """ + name = 'TGenshi' + __author__ = 'jeff@ocjtech.us' + filename_pattern = 'template\.(txt|newtxt|xml)' + es_child_cls = TemplateFile diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py new file mode 100644 index 000000000..2c0ee03e0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py @@ -0,0 +1,83 @@ +import re +import imp +import sys +import logging +import Bcfg2.Server.Plugin + +logger = logging.getLogger(__name__) + +class HelperModule(Bcfg2.Server.Plugin.SpecificData): + _module_name_re = re.compile(r'([^/]+?)\.py') + + def __init__(self, name, specific, encoding): + Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific, + encoding) + match = self._module_name_re.search(self.name) + if match: + self._module_name = match.group(1) + else: + self._module_name = name + self._attrs = [] + + def handle_event(self, event): + Bcfg2.Server.Plugin.SpecificData.handle_event(self, event) + try: + module = imp.load_source(self._module_name, self.name) + except: + err = sys.exc_info()[1] + logger.error("TemplateHelper: Failed to import %s: %s" % + (self.name, err)) + return + + if not hasattr(module, "__export__"): + logger.error("TemplateHelper: %s has no __export__ list" % + self.name) + return + + for sym in module.__export__: + if sym not in self._attrs and hasattr(self, sym): + logger.warning("TemplateHelper: %s: %s is a reserved keyword, " + "skipping export" % (self.name, sym)) + setattr(self, sym, getattr(module, sym)) + # remove old exports + for sym in set(self._attrs) - set(module.__export__): + delattr(self, sym) + + self._attrs = module.__export__ + + +class HelperSet(Bcfg2.Server.Plugin.EntrySet): + ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\.py[co])$") + + def __init__(self, path, fam, encoding, plugin_name): + fpattern = '[0-9A-Za-z_\-]+\.py' + self.plugin_name = plugin_name + Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, + HelperModule, encoding) + fam.AddMonitor(path, self) + + def HandleEvent(self, event): + if (event.filename != self.path and + not self.ignore.match(event.filename)): + return self.handle_event(event) + + +class TemplateHelper(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + """ A plugin to provide helper classes and functions to templates """ + name = 'TemplateHelper' + __author__ = 'chris.a.st.pierre@gmail.com' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + + try: + self.helpers = HelperSet(self.data, core.fam, core.encoding, + self.name) + except: + raise Bcfg2.Server.Plugin.PluginInitError + + def get_additional_data(self, metadata): + return dict([(h._module_name, h) + for h in list(self.helpers.entries.values())]) diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py new file mode 100644 index 000000000..b0d21545c --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py @@ -0,0 +1,43 @@ +import os +import Bcfg2.Server.Plugin + + +def async_run(prog, args): + pid = os.fork() + if pid: + os.waitpid(pid, 0) + else: + dpid = os.fork() + if not dpid: + os.system(" ".join([prog] + args)) + os._exit(0) + + +class Trigger(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Statistics): + """Trigger is a plugin that calls external scripts (on the server).""" + name = 'Trigger' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Statistics.__init__(self) + try: + os.stat(self.data) + except: + self.logger.error("Trigger: spool directory %s does not exist; " + "unloading" % self.data) + raise Bcfg2.Server.Plugin.PluginInitError + + def process_statistics(self, metadata, _): + args = [metadata.hostname, '-p', metadata.profile, '-g', + ':'.join([g for g in metadata.groups])] + for notifier in os.listdir(self.data): + if ((notifier[-1] == '~') or + (notifier[:2] == '.#') or + (notifier[-4:] == '.swp') or + (notifier in ['SCCS', '.svn', '4913'])): + continue + npath = self.data + '/' + notifier + self.logger.debug("Running %s %s" % (npath, " ".join(args))) + async_run(npath, args) diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py new file mode 100644 index 000000000..f9f1b4e52 --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/__init__.py @@ -0,0 +1,34 @@ +"""Imports for Bcfg2.Server.Plugins.""" + +__all__ = [ + 'Account', + 'Base', + 'Bundler', + 'Bzr', + 'Cfg', + 'Cvs', + 'Darcs', + 'Decisions', + 'Fossil', + 'Git', + 'GroupPatterns', + 'Hg', + 'Hostbase', + 'Metadata', + 'NagiosGen', + 'Ohai', + 'Packages', + 'Properties', + 'Probes', + 'Pkgmgr', + 'Rules', + 'SSHbase', + 'Snapshots', + 'Statistics', + 'Svcmgr', + 'Svn', + 'TCheetah', + 'Trigger', + 'SGenshi', + 'TGenshi', + ] diff --git a/src/lib/Bcfg2/Server/Reports/__init__.py b/src/lib/Bcfg2/Server/Reports/__init__.py new file mode 100644 index 000000000..bdf908f4a --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/__init__.py @@ -0,0 +1 @@ +__all__ = ['manage', 'nisauth', 'reports', 'settings', 'backends', 'urls', 'importscript'] diff --git a/src/lib/Bcfg2/Server/Reports/backends.py b/src/lib/Bcfg2/Server/Reports/backends.py new file mode 100644 index 000000000..85241932f --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/backends.py @@ -0,0 +1,34 @@ +from django.contrib.auth.models import User +from nisauth import * + + +class NISBackend(object): + + def authenticate(self, username=None, password=None): + try: + print("start nis authenticate") + n = nisauth(username, password) + temp_pass = User.objects.make_random_password(100) + nis_user = dict(username=username, + ) + + user_session_obj = dict(email=username, + first_name=None, + last_name=None, + uid=n.uid) + user, created = User.objects.get_or_create(username=username) + + return user + + except NISAUTHError: + e = sys.exc_info()[1] + print(e) + return None + + def get_user(self, user_id): + try: + return User.objects.get(pk=user_id) + except User.DoesNotExist: + e = sys.exc_info()[1] + print(e) + return None diff --git a/src/lib/Bcfg2/Server/Reports/importscript.py b/src/lib/Bcfg2/Server/Reports/importscript.py new file mode 100755 index 000000000..16df86a9b --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/importscript.py @@ -0,0 +1,314 @@ +#! /usr/bin/env python +""" +Imports statistics.xml and clients.xml files in to database backend for +new statistics engine +""" + +import binascii +import os +import sys +try: + import Bcfg2.Server.Reports.settings +except Exception: + e = sys.exc_info()[1] + sys.stderr.write("Failed to load configuration settings. %s\n" % e) + sys.exit(1) + +project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__) +project_name = os.path.basename(project_directory) +sys.path.append(os.path.join(project_directory, '..')) +project_module = __import__(project_name, '', '', ['']) +sys.path.pop() +# Set DJANGO_SETTINGS_MODULE appropriately. +os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name + +from Bcfg2.Server.Reports.reports.models import * +from lxml.etree import XML, XMLSyntaxError +from getopt import getopt, GetoptError +from datetime import datetime +from time import strptime +from django.db import connection +from Bcfg2.Server.Reports.updatefix import update_database +import logging +import Bcfg2.Logger +import platform + +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + + +def build_reason_kwargs(r_ent, encoding, logger): + binary_file = False + sensitive_file = False + unpruned_entries = '' + if r_ent.get('sensitive') in ['true', 'True']: + sensitive_file = True + rc_diff = '' + elif r_ent.get('current_bfile', False): + binary_file = True + rc_diff = r_ent.get('current_bfile') + if len(rc_diff) > 1024 * 1024: + rc_diff = '' + elif len(rc_diff) == 0: + # No point in flagging binary if we have no data + binary_file = False + elif r_ent.get('current_bdiff', False): + rc_diff = binascii.a2b_base64(r_ent.get('current_bdiff')) + elif r_ent.get('current_diff', False): + rc_diff = r_ent.get('current_diff') + else: + rc_diff = '' + # detect unmanaged entries in pruned directories + if r_ent.get('prune', 'false') == 'true' and r_ent.get('qtest'): + unpruned_elist = [e.get('path') for e in r_ent.findall('Prune')] + unpruned_entries = "\n".join(unpruned_elist) + if not binary_file: + try: + rc_diff = rc_diff.decode(encoding) + except: + logger.error("Reason isn't %s encoded, cannot decode it" % encoding) + rc_diff = '' + return dict(owner=r_ent.get('owner', default=""), + current_owner=r_ent.get('current_owner', default=""), + group=r_ent.get('group', default=""), + current_group=r_ent.get('current_group', default=""), + perms=r_ent.get('perms', default=""), + current_perms=r_ent.get('current_perms', default=""), + status=r_ent.get('status', default=""), + current_status=r_ent.get('current_status', default=""), + to=r_ent.get('to', default=""), + current_to=r_ent.get('current_to', default=""), + version=r_ent.get('version', default=""), + current_version=r_ent.get('current_version', default=""), + current_exists=r_ent.get('current_exists', default="True").capitalize() == "True", + current_diff=rc_diff, + is_binary=binary_file, + is_sensitive=sensitive_file, + unpruned=unpruned_entries) + + +def load_stats(cdata, sdata, encoding, vlevel, logger, quick=False, location=''): + clients = {} + [clients.__setitem__(c.name, c) \ + for c in Client.objects.all()] + + pingability = {} + [pingability.__setitem__(n.get('name'), n.get('pingable', default='N')) \ + for n in cdata.findall('Client')] + + for node in sdata.findall('Node'): + name = node.get('name') + c_inst, created = Client.objects.get_or_create(name=name) + if vlevel > 0: + logger.info("Client %s added to db" % name) + clients[name] = c_inst + try: + pingability[name] + except KeyError: + pingability[name] = 'N' + for statistics in node.findall('Statistics'): + timestamp = datetime(*strptime(statistics.get('time'))[0:6]) + ilist = Interaction.objects.filter(client=c_inst, + timestamp=timestamp) + if ilist: + current_interaction = ilist[0] + if vlevel > 0: + logger.info("Interaction for %s at %s with id %s already exists" % \ + (c_inst.id, timestamp, current_interaction.id)) + continue + else: + newint = Interaction(client=c_inst, + timestamp=timestamp, + state=statistics.get('state', + default="unknown"), + repo_rev_code=statistics.get('revision', + default="unknown"), + goodcount=statistics.get('good', + default="0"), + totalcount=statistics.get('total', + default="0"), + server=location) + newint.save() + current_interaction = newint + if vlevel > 0: + logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (c_inst.id, + timestamp, current_interaction.id)) + + counter_fields = {TYPE_CHOICES[0]: 0, + TYPE_CHOICES[1]: 0, + TYPE_CHOICES[2]: 0} + pattern = [('Bad/*', TYPE_CHOICES[0]), + ('Extra/*', TYPE_CHOICES[2]), + ('Modified/*', TYPE_CHOICES[1])] + for (xpath, type) in pattern: + for x in statistics.findall(xpath): + counter_fields[type] = counter_fields[type] + 1 + kargs = build_reason_kwargs(x, encoding, logger) + + try: + rr = None + try: + rr = Reason.objects.filter(**kargs)[0] + except IndexError: + rr = Reason(**kargs) + rr.save() + if vlevel > 0: + logger.info("Created reason: %s" % rr.id) + except Exception: + ex = sys.exc_info()[1] + logger.error("Failed to create reason for %s: %s" % (x.get('name'), ex)) + rr = Reason(current_exists=x.get('current_exists', + default="True").capitalize() == "True") + rr.save() + + entry, created = Entries.objects.get_or_create(\ + name=x.get('name'), kind=x.tag) + + Entries_interactions(entry=entry, reason=rr, + interaction=current_interaction, + type=type[0]).save() + if vlevel > 0: + logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id)) + + # Update interaction counters + current_interaction.bad_entries = counter_fields[TYPE_CHOICES[0]] + current_interaction.modified_entries = counter_fields[TYPE_CHOICES[1]] + current_interaction.extra_entries = counter_fields[TYPE_CHOICES[2]] + current_interaction.save() + + mperfs = [] + for times in statistics.findall('OpStamps'): + for metric, value in list(times.items()): + mmatch = [] + if not quick: + mmatch = Performance.objects.filter(metric=metric, value=value) + + if mmatch: + mperf = mmatch[0] + else: + mperf = Performance(metric=metric, value=value) + mperf.save() + mperfs.append(mperf) + current_interaction.performance_items.add(*mperfs) + + for key in list(pingability.keys()): + if key not in clients: + continue + try: + pmatch = Ping.objects.filter(client=clients[key]).order_by('-endtime')[0] + if pmatch.status == pingability[key]: + pmatch.endtime = datetime.now() + pmatch.save() + continue + except IndexError: + pass + Ping(client=clients[key], status=pingability[key], + starttime=datetime.now(), + endtime=datetime.now()).save() + + if vlevel > 1: + logger.info("---------------PINGDATA SYNCED---------------------") + + #Clients are consistent + +if __name__ == '__main__': + from sys import argv + verb = 0 + cpath = "/etc/bcfg2.conf" + clientpath = False + statpath = False + syslog = False + + try: + opts, args = getopt(argv[1:], "hvudc:s:CS", ["help", + "verbose", + "updates", + "debug", + "clients=", + "stats=", + "config=", + "syslog"]) + except GetoptError: + mesg = sys.exc_info()[1] + # print help information and exit: + print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-c clients-file] [-s statistics-file]" % (mesg)) + raise SystemExit(2) + + for o, a in opts: + if o in ("-h", "--help"): + print("Usage:\nimportscript.py [-h] [-v] -c <clients-file> -s <statistics-file> \n") + print("h : help; this message") + print("v : verbose; print messages on record insertion/skip") + print("u : updates; print status messages as items inserted semi-verbose") + print("d : debug; print most SQL used to manipulate database") + print("C : path to bcfg2.conf config file.") + print("c : clients.xml file") + print("s : statistics.xml file") + print("S : syslog; output to syslog") + raise SystemExit + if o in ["-C", "--config"]: + cpath = a + + if o in ("-v", "--verbose"): + verb = 1 + if o in ("-u", "--updates"): + verb = 2 + if o in ("-d", "--debug"): + verb = 3 + if o in ("-c", "--clients"): + clientspath = a + + if o in ("-s", "--stats"): + statpath = a + if o in ("-S", "--syslog"): + syslog = True + + logger = logging.getLogger('importscript.py') + logging.getLogger().setLevel(logging.INFO) + Bcfg2.Logger.setup_logging('importscript.py', + True, + syslog) + + cf = ConfigParser.ConfigParser() + cf.read([cpath]) + + if not statpath: + try: + statpath = "%s/etc/statistics.xml" % cf.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + print("Could not read bcfg2.conf; exiting") + raise SystemExit(1) + try: + statsdata = XML(open(statpath).read()) + except (IOError, XMLSyntaxError): + print("StatReports: Failed to parse %s" % (statpath)) + raise SystemExit(1) + + try: + encoding = cf.get('components', 'encoding') + except: + encoding = 'UTF-8' + + if not clientpath: + try: + clientspath = "%s/Metadata/clients.xml" % \ + cf.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + print("Could not read bcfg2.conf; exiting") + raise SystemExit(1) + try: + clientsdata = XML(open(clientspath).read()) + except (IOError, XMLSyntaxError): + print("StatReports: Failed to parse %s" % (clientspath)) + raise SystemExit(1) + + q = '-O3' in sys.argv + # Be sure the database is ready for new schema + update_database() + load_stats(clientsdata, + statsdata, + encoding, + verb, + logger, + quick=q, + location=platform.node()) diff --git a/src/lib/Bcfg2/Server/Reports/manage.py b/src/lib/Bcfg2/Server/Reports/manage.py new file mode 100755 index 000000000..858bddeca --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/manage.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +try: + import settings # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) + sys.exit(1) + +if __name__ == "__main__": + execute_manager(settings) diff --git a/src/lib/Bcfg2/Server/Reports/nisauth.py b/src/lib/Bcfg2/Server/Reports/nisauth.py new file mode 100644 index 000000000..b3e37113b --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/nisauth.py @@ -0,0 +1,44 @@ +import crypt +import nis +from Bcfg2.Server.Reports.settings import AUTHORIZED_GROUP + +"""Checks with NIS to see if the current user is in the support group""" + + +class NISAUTHError(Exception): + """NISAUTHError is raised when somehting goes boom.""" + pass + + +class nisauth(object): + group_test = False + samAcctName = None + distinguishedName = None + sAMAccountName = None + telephoneNumber = None + title = None + memberOf = None + department = None # this will be a list + mail = None + extensionAttribute1 = None # badgenumber + badge_no = None + uid = None + + def __init__(self, login, passwd=None): + """get user profile from NIS""" + try: + p = nis.match(login, 'passwd.byname').split(":") + print(p) + except: + raise NISAUTHError('username') + # check user password using crypt and 2 character salt from passwd file + if p[1] == crypt.crypt(passwd, p[1][:2]): + # check to see if user is in valid support groups + # will have to include these groups in a settings file eventually + if not login in nis.match(AUTHORIZED_GROUP, + 'group.byname').split(':')[-1].split(','): + raise NISAUTHError('group') + self.uid = p[2] + print(self.uid) + else: + raise NISAUTHError('password') diff --git a/src/lib/Bcfg2/Server/Reports/reports/__init__.py b/src/lib/Bcfg2/Server/Reports/reports/__init__.py new file mode 100644 index 000000000..ccdce8943 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/__init__.py @@ -0,0 +1 @@ +__all__ = ['templatetags'] diff --git a/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml b/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml new file mode 100644 index 000000000..bde236989 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml @@ -0,0 +1,43 @@ +<?xml version='1.0' encoding='utf-8' ?> +<django-objects version="1.0"> + <object pk="1" model="reports.internaldatabaseversion"> + <field type="IntegerField" name="version">0</field> + <field type="DateTimeField" name="updated">2008-08-05 11:03:50</field> + </object> + <object pk="2" model="reports.internaldatabaseversion"> + <field type="IntegerField" name="version">1</field> + <field type="DateTimeField" name="updated">2008-08-05 11:04:10</field> + </object> + <object pk="3" model="reports.internaldatabaseversion"> + <field type="IntegerField" name="version">2</field> + <field type="DateTimeField" name="updated">2008-08-05 13:37:19</field> + </object> + <object pk="4" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>3</field> + <field type='DateTimeField' name='updated'>2008-08-11 08:44:36</field> + </object> + <object pk="5" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>10</field> + <field type='DateTimeField' name='updated'>2008-08-22 11:28:50</field> + </object> + <object pk="5" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>11</field> + <field type='DateTimeField' name='updated'>2009-01-13 12:26:10</field> + </object> + <object pk="6" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>16</field> + <field type='DateTimeField' name='updated'>2010-06-01 12:26:10</field> + </object> + <object pk="7" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>17</field> + <field type='DateTimeField' name='updated'>2010-07-02 00:00:00</field> + </object> + <object pk="8" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>18</field> + <field type='DateTimeField' name='updated'>2011-06-30 00:00:00</field> + </object> + <object pk="8" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>19</field> + <field type='DateTimeField' name='updated'>2012-03-28 00:00:00</field> + </object> +</django-objects> diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py new file mode 100644 index 000000000..0438ea133 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/models.py @@ -0,0 +1,343 @@ +"""Django models for Bcfg2 reports.""" +from django.db import models +from django.db import connection, transaction +from django.db.models import Q +from datetime import datetime, timedelta +from time import strptime + +KIND_CHOICES = ( + #These are the kinds of config elements + ('Package', 'Package'), + ('Path', 'directory'), + ('Path', 'file'), + ('Path', 'permissions'), + ('Path', 'symlink'), + ('Service', 'Service'), +) +PING_CHOICES = ( + #These are possible ping states + ('Up (Y)', 'Y'), + ('Down (N)', 'N') +) +TYPE_BAD = 1 +TYPE_MODIFIED = 2 +TYPE_EXTRA = 3 + +TYPE_CHOICES = ( + (TYPE_BAD, 'Bad'), + (TYPE_MODIFIED, 'Modified'), + (TYPE_EXTRA, 'Extra'), +) + + +def convert_entry_type_to_id(type_name): + """Convert a entry type to its entry id""" + for e_id, e_name in TYPE_CHOICES: + if e_name.lower() == type_name.lower(): + return e_id + return -1 + + +class ClientManager(models.Manager): + """Extended client manager functions.""" + def active(self, timestamp=None): + """returns a set of clients that have been created and have not + yet been expired as of optional timestmamp argument. Timestamp + should be a datetime object.""" + + if timestamp == None: + timestamp = datetime.now() + elif not isinstance(timestamp, datetime): + raise ValueError('Expected a datetime object') + else: + try: + timestamp = datetime(*strptime(timestamp, + "%Y-%m-%d %H:%M:%S")[0:6]) + except ValueError: + return self.none() + + return self.filter(Q(expiration__gt=timestamp) | Q(expiration__isnull=True), + creation__lt=timestamp) + + +class Client(models.Model): + """Object representing every client we have seen stats for.""" + creation = models.DateTimeField(auto_now_add=True) + name = models.CharField(max_length=128,) + current_interaction = models.ForeignKey('Interaction', + null=True, blank=True, + related_name="parent_client") + expiration = models.DateTimeField(blank=True, null=True) + + def __str__(self): + return self.name + + objects = ClientManager() + + class Admin: + pass + + +class Ping(models.Model): + """Represents a ping of a client (sparsely).""" + client = models.ForeignKey(Client, related_name="pings") + starttime = models.DateTimeField() + endtime = models.DateTimeField() + status = models.CharField(max_length=4, choices=PING_CHOICES) # up/down + + class Meta: + get_latest_by = 'endtime' + + +class InteractiveManager(models.Manager): + """Manages interactions objects.""" + + def recent_interactions_dict(self, maxdate=None, active_only=True): + """ + Return the most recent interactions for clients as of a date. + + This method uses aggregated queries to return a ValuesQueryDict object. + Faster then raw sql since this is executed as a single query. + """ + + return list(self.values('client').annotate(max_timestamp=Max('timestamp')).values()) + + def interaction_per_client(self, maxdate=None, active_only=True): + """ + Returns the most recent interactions for clients as of a date + + Arguments: + maxdate -- datetime object. Most recent date to pull. (dafault None) + active_only -- Include only active clients (default True) + + """ + + if maxdate and not isinstance(maxdate, datetime): + raise ValueError('Expected a datetime object') + return self.filter(id__in=self.get_interaction_per_client_ids(maxdate, active_only)) + + def get_interaction_per_client_ids(self, maxdate=None, active_only=True): + """ + Returns the ids of most recent interactions for clients as of a date. + + Arguments: + maxdate -- datetime object. Most recent date to pull. (dafault None) + active_only -- Include only active clients (default True) + + """ + from django.db import connection + cursor = connection.cursor() + cfilter = "expiration is null" + + sql = 'select reports_interaction.id, x.client_id from (select client_id, MAX(timestamp) ' + \ + 'as timer from reports_interaction' + if maxdate: + if not isinstance(maxdate, datetime): + raise ValueError('Expected a datetime object') + sql = sql + " where timestamp <= '%s' " % maxdate + cfilter = "(expiration is null or expiration > '%s') and creation <= '%s'" % (maxdate, maxdate) + sql = sql + ' GROUP BY client_id) x, reports_interaction where ' + \ + 'reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer' + if active_only: + sql = sql + " and x.client_id in (select id from reports_client where %s)" % \ + cfilter + try: + cursor.execute(sql) + return [item[0] for item in cursor.fetchall()] + except: + '''FIXME - really need some error hadling''' + pass + return [] + + +class Interaction(models.Model): + """Models each reconfiguration operation interaction between client and server.""" + client = models.ForeignKey(Client, related_name="interactions",) + timestamp = models.DateTimeField() # Timestamp for this record + state = models.CharField(max_length=32) # good/bad/modified/etc + repo_rev_code = models.CharField(max_length=64) # repo revision at time of interaction + goodcount = models.IntegerField() # of good config-items + totalcount = models.IntegerField() # of total config-items + server = models.CharField(max_length=256) # Name of the server used for the interaction + bad_entries = models.IntegerField(default=-1) + modified_entries = models.IntegerField(default=-1) + extra_entries = models.IntegerField(default=-1) + + def __str__(self): + return "With " + self.client.name + " @ " + self.timestamp.isoformat() + + def percentgood(self): + if not self.totalcount == 0: + return (self.goodcount / float(self.totalcount)) * 100 + else: + return 0 + + def percentbad(self): + if not self.totalcount == 0: + return ((self.totalcount - self.goodcount) / (float(self.totalcount))) * 100 + else: + return 0 + + def isclean(self): + if (self.bad_entry_count() == 0 and self.goodcount == self.totalcount): + return True + else: + return False + + def isstale(self): + if (self == self.client.current_interaction): # Is Mostrecent + if(datetime.now() - self.timestamp > timedelta(hours=25)): + return True + else: + return False + else: + #Search for subsequent Interaction for this client + #Check if it happened more than 25 hrs ago. + if (self.client.interactions.filter(timestamp__gt=self.timestamp) + .order_by('timestamp')[0].timestamp - + self.timestamp > timedelta(hours=25)): + return True + else: + return False + + def save(self): + super(Interaction, self).save() # call the real save... + self.client.current_interaction = self.client.interactions.latest() + self.client.save() # save again post update + + def delete(self): + '''Override the default delete. Allows us to remove Performance items''' + pitems = list(self.performance_items.all()) + super(Interaction, self).delete() + for perf in pitems: + if perf.interaction.count() == 0: + perf.delete() + + def badcount(self): + return self.totalcount - self.goodcount + + def bad(self): + return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_BAD) + + def bad_entry_count(self): + """Number of bad entries. Store the count in the interation field to save db queries.""" + if self.bad_entries < 0: + self.bad_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_BAD).count() + self.save() + return self.bad_entries + + def modified(self): + return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_MODIFIED) + + def modified_entry_count(self): + """Number of modified entries. Store the count in the interation field to save db queries.""" + if self.modified_entries < 0: + self.modified_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_MODIFIED).count() + self.save() + return self.modified_entries + + def extra(self): + return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_EXTRA) + + def extra_entry_count(self): + """Number of extra entries. Store the count in the interation field to save db queries.""" + if self.extra_entries < 0: + self.extra_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_EXTRA).count() + self.save() + return self.extra_entries + + objects = InteractiveManager() + + class Admin: + list_display = ('client', 'timestamp', 'state') + list_filter = ['client', 'timestamp'] + pass + + class Meta: + get_latest_by = 'timestamp' + ordering = ['-timestamp'] + unique_together = ("client", "timestamp") + + +class Reason(models.Model): + """reason why modified or bad entry did not verify, or changed.""" + owner = models.TextField(max_length=128, blank=True) + current_owner = models.TextField(max_length=128, blank=True) + group = models.TextField(max_length=128, blank=True) + current_group = models.TextField(max_length=128, blank=True) + perms = models.TextField(max_length=4, blank=True) # txt fixes typing issue + current_perms = models.TextField(max_length=4, blank=True) + status = models.TextField(max_length=3, blank=True) # on/off/(None) + current_status = models.TextField(max_length=1, blank=True) # on/off/(None) + to = models.TextField(max_length=256, blank=True) + current_to = models.TextField(max_length=256, blank=True) + version = models.TextField(max_length=128, blank=True) + current_version = models.TextField(max_length=128, blank=True) + current_exists = models.BooleanField() # False means its missing. Default True + current_diff = models.TextField(max_length=1280, blank=True) + is_binary = models.BooleanField(default=False) + is_sensitive = models.BooleanField(default=False) + unpruned = models.TextField(max_length=1280, blank=True) + + def _str_(self): + return "Reason" + + @staticmethod + @transaction.commit_on_success + def prune_orphans(): + '''Prune oprhaned rows... no good way to use the ORM''' + cursor = connection.cursor() + cursor.execute('delete from reports_reason where not exists (select rei.id from reports_entries_interactions rei where rei.reason_id = reports_reason.id)') + transaction.set_dirty() + + +class Entries(models.Model): + """Contains all the entries feed by the client.""" + name = models.CharField(max_length=128, db_index=True) + kind = models.CharField(max_length=16, choices=KIND_CHOICES, db_index=True) + + def __str__(self): + return self.name + + @staticmethod + @transaction.commit_on_success + def prune_orphans(): + '''Prune oprhaned rows... no good way to use the ORM''' + cursor = connection.cursor() + cursor.execute('delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)') + transaction.set_dirty() + + +class Entries_interactions(models.Model): + """Define the relation between the reason, the interaction and the entry.""" + entry = models.ForeignKey(Entries) + reason = models.ForeignKey(Reason) + interaction = models.ForeignKey(Interaction) + type = models.IntegerField(choices=TYPE_CHOICES) + + +class Performance(models.Model): + """Object representing performance data for any interaction.""" + interaction = models.ManyToManyField(Interaction, related_name="performance_items") + metric = models.CharField(max_length=128) + value = models.DecimalField(max_digits=32, decimal_places=16) + + def __str__(self): + return self.metric + + @staticmethod + @transaction.commit_on_success + def prune_orphans(): + '''Prune oprhaned rows... no good way to use the ORM''' + cursor = connection.cursor() + cursor.execute('delete from reports_performance where not exists (select ri.id from reports_performance_interaction ri where ri.performance_id = reports_performance.id)') + transaction.set_dirty() + + +class InternalDatabaseVersion(models.Model): + """Object that tell us to witch version is the database.""" + version = models.IntegerField() + updated = models.DateTimeField(auto_now_add=True) + + def __str__(self): + return "version %d updated the %s" % (self.version, self.updated.isoformat()) diff --git a/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql b/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql new file mode 100644 index 000000000..8c63754c9 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql @@ -0,0 +1,9 @@ +CREATE VIEW reports_current_interactions AS SELECT x.client_id AS client_id, reports_interaction.id AS interaction_id FROM (select client_id, MAX(timestamp) as timer FROM reports_interaction GROUP BY client_id) x, reports_interaction WHERE reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer; + +create index reports_interaction_client_id on reports_interaction (client_id); +create index reports_extra_interactions_client_id on reports_extra_interactions(interaction_id); +create index reports_modified_interactions_client_id on reports_modified_interactions(interaction_id); +create index reports_client_current_interaction_id on reports_client (current_interaction_id); +create index reports_performance_interaction_performance_id on reports_performance_interaction (performance_id); +create index reports_interaction_timestamp on reports_interaction (timestamp); +create index reports_performance_interation_interaction_id on reports_performance_interaction (interaction_id);
\ No newline at end of file diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/404.html b/src/lib/Bcfg2/Server/Reports/reports/templates/404.html new file mode 100644 index 000000000..168bd9fec --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/404.html @@ -0,0 +1,8 @@ +{% extends 'base.html' %} +{% block title %}Bcfg2 - Page not found{% endblock %} +{% block fullcontent %} +<h2>Page not found</h2> +<p> +The page or object requested could not be found. +</p> +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html new file mode 100644 index 000000000..842de36f0 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html @@ -0,0 +1,25 @@ +{% extends "base.html" %} + +{% block timepiece %} +<script type="text/javascript"> +function showCalendar() { + var cal = new CalendarPopup("calendar_div"); + cal.showYearNavigation(); + cal.select(document.forms['cal_form'].cal_date,'cal_link', + 'yyyy/MM/dd' {% if timestamp %}, '{{ timestamp|date:"Y/m/d" }}'{% endif %} ); + return false; +} +function bcfg2_check_date() { + var new_date = document.getElementById('cal_date').value; + if(new_date) { + document.cal_form.submit(); + } +} +document.write(getCalendarStyles()); +</script> +{% if not timestamp %}Rendered at {% now "Y-m-d H:i" %} | {% else %}View as of {{ timestamp|date:"Y-m-d H:i" }} | {% endif %}{% spaceless %} + <a id='cal_link' name='cal_link' href='#' onclick='showCalendar(); return false;' + >[change]</a> + <form method='post' action='{{ path }}' id='cal_form' name='cal_form'><input id='cal_date' name='cal_date' type='hidden' value=''/></form> +{% endspaceless %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html new file mode 100644 index 000000000..f541c0d2b --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html @@ -0,0 +1,95 @@ +{% load bcfg2_tags %} + +<?xml version="1.0"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> +<head> +<title>{% block title %}Bcfg2 Reporting System{% endblock %}</title> + +<meta http-equiv="Content-type" content="text/html; charset=utf-8" /> +<meta http-equiv="Content-language" content="en" /> +<meta http-equiv="X-UA-Compatible" content="IE=EmulateIE7" /> +<meta name="robots" content="noindex, nofollow" /> +<meta http-equiv="cache-control" content="no-cache" /> + +<link rel="stylesheet" type="text/css" href="{% to_media_url bcfg2_base.css %}" media="all" /> +<script type="text/javascript" src="{% to_media_url bcfg2.js %}"></script> +<script type="text/javascript" src="{% to_media_url date.js %}"></script> +<script type="text/javascript" src="{% to_media_url AnchorPosition.js %}"></script> +<script type="text/javascript" src="{% to_media_url CalendarPopup.js %}"></script> +<script type="text/javascript" src="{% to_media_url PopupWindow.js %}"></script> +{% block extra_header_info %}{% endblock %} + +</head> +<body onload="{% block body_onload %}{% endblock %}"> + + <div id="header"> + <a href="http://bcfg2.org"><img src='{% to_media_url bcfg2_logo.png %}' + height='115' width='300' alt='Bcfg2' style='float:left; height: 115px' /></a> + </div> + +<div id="document"> + <div id="content"><div id="contentwrapper"> + {% block fullcontent %} + <div class='page_name'> + <h1>{% block pagebanner %}Page Banner{% endblock %}</h1> + <div id="timepiece">{% block timepiece %}Rendered at {% now "Y-m-d H:i" %}{% endblock %}</div> + </div> + <div class='detail_wrapper'> + {% block content %}{% endblock %} + </div> + {% endblock %} + </div></div><!-- content --> + <div id="sidemenucontainer"><div id="sidemenu"> + {% block sidemenu %} + <ul class='menu-level1'> + <li>Overview</li> + </ul> + <ul class='menu-level2'> + <li><a href="{% url reports_summary %}">Summary</a></li> + <li><a href="{% url reports_history %}">Recent Interactions</a></li> + <li><a href="{% url reports_timing %}">Timing</a></li> + </ul> + <ul class='menu-level1'> + <li>Clients</li> + </ul> + <ul class='menu-level2'> + <li><a href="{% url reports_grid_view %}">Grid View</a></li> + <li><a href="{% url reports_detailed_list %}">Detailed List</a></li> + <li><a href="{% url reports_client_manage %}">Manage</a></li> + </ul> + <ul class='menu-level1'> + <li>Entries Configured</li> + </ul> + <ul class='menu-level2'> + <li><a href="{% url reports_item_list "bad" %}">Bad</a></li> + <li><a href="{% url reports_item_list "modified" %}">Modified</a></li> + <li><a href="{% url reports_item_list "extra" %}">Extra</a></li> + </ul> +{% comment %} + TODO + <ul class='menu-level1'> + <li>Entry Types</li> + </ul> + <ul class='menu-level2'> + <li><a href="#">Action</a></li> + <li><a href="#">Package</a></li> + <li><a href="#">Path</a></li> + <li><a href="#">Service</a></li> + </ul> +{% endcomment %} + <ul class='menu-level1'> + <li><a href="http://bcfg2.org">Homepage</a></li> + <li><a href="http://docs.bcfg2.org">Documentation</a></li> + </ul> + {% endblock %} + </div></div><!-- sidemenu --> + <div style='clear:both'></div> +</div><!-- document --> + <div id="footer"> + <span>Bcfg2 Version 1.2.2</span> + </div> + +<div id="calendar_div" style='position:absolute; visibility:hidden; background-color:white; layer-background-color:white;'></div> +</body> +</html> diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html new file mode 100644 index 000000000..dd4295f21 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html @@ -0,0 +1,127 @@ +{% extends "base.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Client {{client.name}}{% endblock %} + +{% block extra_header_info %} +<style type="text/css"> +.node_data { + border: 1px solid #98DBCC; + margin: 10px; + padding-left: 18px; +} +.node_data td { + padding: 1px 20px 1px 2px; +} +span.history_links { + font-size: 90%; + margin-left: 50px; +} +span.history_links a { + font-size: 90%; +} +</style> +{% endblock %} + +{% block body_onload %}javascript:clientdetailload(){% endblock %} + +{% block pagebanner %}Client Details{% endblock %} + +{% block content %} + <div class='detail_header'> + <h2>{{client.name}}</h2> + <a href='{% url reports_client_manage %}#{{ client.name }}'>[manage]</a> + <span class='history_links'><a href="{% url reports_client_history client.name %}">View History</a> | Jump to + <select id="quick" name="quick" onchange="javascript:pageJump('quick');"> + <option value="" selected="selected">--- Time ---</option> + {% for i in client.interactions.all|slice:":25" %} + <option value="{% url reports_client_detail_pk hostname=client.name, pk=i.id %}">{{i.timestamp}}</option> + {% endfor %} + </select></span> + </div> + + {% if interaction.isstale %} + <div class="warningbox"> + This node did not run within the last 24 hours — it may be out of date. + </div> + {% endif %} + <table class='node_data'> + <tr><td>Timestamp</td><td>{{interaction.timestamp}}</td></tr> + {% if interaction.server %} + <tr><td>Served by</td><td>{{interaction.server}}</td></tr> + {% endif %} + {% if interaction.repo_rev_code %} + <tr><td>Revision</td><td>{{interaction.repo_rev_code}}</td></tr> + {% endif %} + <tr><td>State</td><td class='{{interaction.state}}-lineitem'>{{interaction.state|capfirst}}</td></tr> + <tr><td>Managed entries</td><td>{{interaction.totalcount}}</td></tr> + {% if not interaction.isclean %} + <tr><td>Deviation</td><td>{{interaction.percentbad|floatformat:"3"}}%</td></tr> + {% endif %} + </table> + + {% if interaction.bad_entry_count %} + <div class='entry_list'> + <div class='entry_list_head dirty-lineitem' onclick='javascript:toggleMe("bad_table");'> + <h3>Bad Entries — {{ interaction.bad_entry_count }}</h3> + <div class='entry_expand_tab' id='plusminus_bad_table'>[+]</div> + </div> + <table id='bad_table' class='entry_list'> + {% for e in interaction.bad|sortwell %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='entry_list_type'>{{e.entry.kind}}:</td> + <td><a href="{% url reports_item "bad",e.id %}"> + {{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endif %} + + {% if interaction.modified_entry_count %} + <div class='entry_list'> + <div class='entry_list_head modified-lineitem' onclick='javascript:toggleMe("modified_table");'> + <h3>Modified Entries — {{ interaction.modified_entry_count }}</h3> + <div class='entry_expand_tab' id='plusminus_modified_table'>[+]</div> + </div> + <table id='modified_table' class='entry_list'> + {% for e in interaction.modified|sortwell %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='entry_list_type'>{{e.entry.kind}}:</td> + <td><a href="{% url reports_item "modified",e.id %}"> + {{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endif %} + + {% if interaction.extra_entry_count %} + <div class='entry_list'> + <div class='entry_list_head extra-lineitem' onclick='javascript:toggleMe("extra_table");'> + <h3>Extra Entries — {{ interaction.extra_entry_count }}</h3> + <div class='entry_expand_tab' id='plusminus_extra_table'>[+]</div> + </div> + <table id='extra_table' class='entry_list'> + {% for e in interaction.extra|sortwell %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='entry_list_type'>{{e.entry.kind}}:</td> + <td><a href="{% url reports_item "extra",e.id %}">{{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endif %} + + {% if entry_list %} + <div class="entry_list recent_history_wrapper"> + <div class="entry_list_head" style="border-bottom: 2px solid #98DBCC;"> + <h4 style="display: inline"><a href="{% url reports_client_history client.name %}">Recent Interactions</a></h4> + </div> + <div class='recent_history_box'> + {% include "widgets/interaction_list.inc" %} + <div style='padding-left: 5px'><a href="{% url reports_client_history client.name %}">more...</a></div> + </div> + </div> + {% endif %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html new file mode 100644 index 000000000..0c1fae8d5 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html @@ -0,0 +1,46 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Detailed Client Listing{% endblock %} +{% block pagebanner %}Clients - Detailed View{% endblock %} + +{% block content %} +<div class='client_list_box'> +{% if entry_list %} + {% filter_navigator %} + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td class='left_column'>Node</td> + <td class='right_column' style='width:75px'>State</td> + <td class='right_column_narrow'>Good</td> + <td class='right_column_narrow'>Bad</td> + <td class='right_column_narrow'>Modified</td> + <td class='right_column_narrow'>Extra</td> + <td class='right_column'>Last Run</td> + <td class='right_column_wide'>Server</td> + </tr> + {% for entry in entry_list %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='left_column'><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=entry.client.name, pk=entry.id %}'>{{ entry.client.name }}</a></td> + <td class='right_column' style='width:75px'><a href='{% add_url_filter state=entry.state %}' + {% ifequal entry.state 'dirty' %}class='dirty-lineitem'{% endifequal %}>{{ entry.state }}</a></td> + <td class='right_column_narrow'>{{ entry.goodcount }}</td> + <td class='right_column_narrow'>{{ entry.bad_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.modified_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.extra_entry_count }}</td> + <td class='right_column'><span {% if entry.timestamp|isstale:entry_max %}class='dirty-lineitem'{% endif %}>{{ entry.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</span></td> + <td class='right_column_wide'> + {% if entry.server %} + <a href='{% add_url_filter server=entry.server %}'>{{ entry.server }}</a> + {% else %} + + {% endif %} + </td> + </tr> + {% endfor %} + </table> +{% else %} + <p>No client records are available.</p> +{% endif %} +</div> +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html new file mode 100644 index 000000000..01d4ec2f4 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html @@ -0,0 +1,20 @@ +{% extends "base.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Interaction History{% endblock %} +{% block pagebanner %}Interaction history{% if client %} for {{ client.name }}{% endif %}{% endblock %} + +{% block extra_header_info %} +{% endblock %} + +{% block content %} +<div class='client_list_box'> +{% if entry_list %} + {% filter_navigator %} + {% include "widgets/interaction_list.inc" %} +{% else %} + <p>No client records are available.</p> +{% endif %} +</div> +{% page_navigator %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html new file mode 100644 index 000000000..e0c0d2d7a --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html @@ -0,0 +1,34 @@ +{% extends "base-timeview.html" %} + +{% block extra_header_info %} +{% endblock%} + +{% block title %}Bcfg2 - Client Grid View{% endblock %} + +{% block pagebanner %}Clients - Grid View{% endblock %} + +{% block content %} + +{% if inter_list %} + <table class='grid-view' align='center'> + {% for inter in inter_list %} + {% if forloop.first %}<tr>{% endif %} + <td class="{{inter.state}}-lineitem"> + <a href="{% spaceless %}{% if not timestamp %} + {% url reports_client_detail inter.client.name %} + {% else %} + {% url reports_client_detail_pk inter.client.name,inter.id %} + {% endif %} + {% endspaceless %}">{{ inter.client.name }}</a> + </td> + {% if forloop.last %} + </tr> + {% else %} + {% if forloop.counter|divisibleby:"4" %}</tr><tr>{% endif %} + {% endif %} + {% endfor %} + </table> +{% else %} + <p>No client records are available.</p> +{% endif %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html new file mode 100644 index 000000000..5725ae577 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html @@ -0,0 +1,45 @@ +{% extends "base.html" %} + +{% block extra_header_info %} +{% endblock%} + +{% block title %}Bcfg2 - Manage Clients{% endblock %} + +{% block pagebanner %}Clients - Manage{% endblock %} + +{% block content %} +<div class='client_list_box'> + {% if message %} + <div class="warningbox">{{ message }}</div> + {% endif %} +{% if clients %} + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td class='left_column'>Node</td> + <td class='right_column'>Expiration</td> + <td class='right_column_narrow'>Manage</td> + </tr> + {% for client in clients %} + <tr class='{% cycle listview,listview_alt %}'> + <td><span id="{{ client.name }}"> </span> + <span id="ttag-{{ client.name }}"> </span> + <span id="s-ttag-{{ client.name }}"> </span> + <a href="{% url reports_client_detail client.name %}">{{ client.name }}</a></td> + <td>{% firstof client.expiration 'Active' %}</td> + <td> + <form method="post" action="{% url reports_client_manage %}"> + <div> {# here for no reason other then to validate #} + <input type="hidden" name="client_name" value="{{ client.name }}" /> + <input type="hidden" name="client_action" value="{% if client.expiration %}unexpire{% else %}expire{% endif %}" /> + <input type="submit" value="{% if client.expiration %}Activate{% else %}Expire Now{% endif %}" /> + </div> + </form> + </td> + </tr> + {% endfor %} + </table> + </div> +{% else %} + <p>No client records are available.</p> +{% endif %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html new file mode 100644 index 000000000..cadc178a7 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html @@ -0,0 +1,130 @@ +{% extends "base.html" %} +{% load split %} +{% load syntax_coloring %} + + +{% block title %}Bcfg2 - Element Details{% endblock %} + + +{% block extra_header_info %} +<style type="text/css"> +#table_list_header { + font-size: 100%; +} +table.entry_list { + width: auto; +} +div.information_wrapper { + margin: 15px; +} +div.diff_wrapper { + overflow: auto; +} +div.entry_list h3 { + font-size: 90%; + padding: 5px; +} +</style> +{% endblock%} + +{% block pagebanner %}Element Details{% endblock %} + +{% block content %} + <div class='detail_header'> + <h3>{{mod_or_bad|capfirst}} {{item.entry.kind}}: {{item.entry.name}}</h3> + </div> + + <div class="information_wrapper"> + + {% if isextra %} + <p>This item exists on the host but is not defined in the configuration.</p> + {% endif %} + + {% if not item.reason.current_exists %} + <div class="warning">This item does not currently exist on the host but is specified to exist in the configuration.</div> + {% endif %} + + {% if item.reason.current_owner or item.reason.current_group or item.reason.current_perms or item.reason.current_status or item.reason.current_status or item.reason.current_to or item.reason.current_version %} + <table class='entry_list'> + <tr id='table_list_header'> + <td style='text-align: right;'>Problem Type</td><td>Expected</td><td style='border-bottom: 1px solid #98DBCC;'>Found</td></tr> + {% if item.reason.current_owner %} + <tr><td style='text-align: right'><b>Owner</b></td><td>{{item.reason.owner}}</td> + <td>{{item.reason.current_owner}}</td></tr> + {% endif %} + {% if item.reason.current_group %} + <tr><td style='text-align: right'><b>Group</b></td><td>{{item.reason.group}}</td> + <td>{{item.reason.current_group}}</td></tr> + {% endif %} + {% if item.reason.current_perms %} + <tr><td style='text-align: right'><b>Permissions</b></td><td>{{item.reason.perms}}</td> + <td>{{item.reason.current_perms}}</td></tr> + {% endif %} + {% if item.reason.current_status %} + <tr><td style='text-align: right'><b>Status</b></td><td>{{item.reason.status}}</td> + <td>{{item.reason.current_status}}</td></tr> + {% endif %} + {% if item.reason.current_to %} + <tr><td style='text-align: right'><b>Symlink Target</b></td><td>{{item.reason.to}}</td> + <td>{{item.reason.current_to}}</td></tr> + {% endif %} + {% if item.reason.current_version %} + <tr><td style='text-align: right'><b>Package Version</b></td><td>{{item.reason.version|cut:"("|cut:")"}}</td> + <td>{{item.reason.current_version|cut:"("|cut:")"}}</td></tr> + {% endif %} + </table> + {% endif %} + + {% if item.reason.current_diff or item.reason.is_sensitive %} + <div class='entry_list'> + <div class='entry_list_head'> + {% if item.reason.is_sensitive %} + <h3>File contents unavailable, as they might contain sensitive data.</h3> + {% else %} + <h3>Incorrect file contents</h3> + {% endif %} + </div> + {% if not item.reason.is_sensitive %} + <div class='diff_wrapper'> + {{ item.reason.current_diff|syntaxhilight }} + </div> + {% endif %} + </div> + {% endif %} + + <!-- display extra directory entries --> + {% if item.reason.unpruned %} + <div class='entry_list'> + <div class='entry_list_head'> + <h3>Extra entries found</h3> + </div> + <table class='entry_list' cellpadding='3'> + {% for unpruned_item in item.reason.unpruned|split %} + <tr><td>{{ unpruned_item }}</td></tr> + {% endfor %} + </table> + </div> + {% endif %} + + + <div class='entry_list'> + <div class='entry_list_head'> + <h3>Occurences on {{ timestamp|date:"Y-m-d" }}</h3> + </div> + {% if associated_list %} + <table class="entry_list" cellpadding="3"> + {% for inter in associated_list %} + <tr><td><a href="{% url reports_client_detail inter.client.name %}" + >{{inter.client.name}}</a></td> + <td><a href="{% url reports_client_detail_pk hostname=inter.client.name,pk=inter.id %}" + >{{inter.timestamp}}</a></td> + </tr> + {% endfor %} + </table> + {% else %} + <p>Missing client list</p> + {% endif %} + </div> + + </div><!-- information_wrapper --> +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html new file mode 100644 index 000000000..9b1026a08 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html @@ -0,0 +1,33 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Element Listing{% endblock %} + +{% block extra_header_info %} +{% endblock%} + +{% block pagebanner %}{{mod_or_bad|capfirst}} Element Listing{% endblock %} + +{% block content %} +{% if item_list_dict %} + {% for kind, entries in item_list_dict.items %} + + <div class='entry_list'> + <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ kind }}");'> + <h3>{{ kind }} — {{ entries|length }}</h3> + <div class='entry_expand_tab' id='plusminus_table_{{ kind }}'>[–]</div> + </div> + + <table id='table_{{ kind }}' class='entry_list'> + {% for e in entries %} + <tr class='{% cycle listview,listview_alt %}'> + <td><a href="{% url reports_item type=mod_or_bad,pk=e.id %}">{{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endfor %} +{% else %} + <p>There are currently no inconsistent configuration entries.</p> +{% endif %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html b/src/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html new file mode 100644 index 000000000..b9847cf96 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html @@ -0,0 +1,42 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Client Summary{% endblock %} +{% block pagebanner %}Clients - Summary{% endblock %} + +{% block body_onload %}javascript:hide_table_array(hide_tables){% endblock %} + +{% block extra_header_info %} +<script type="text/javascript"> +var hide_tables = new Array({{ summary_data|length }}); +{% for summary in summary_data %} +hide_tables[{{ forloop.counter0 }}] = "table_{{ summary.name }}"; +{% endfor %} +</script> +{% endblock%} + +{% block content %} + <div class='detail_header'> + <h2>{{ node_count }} nodes reporting in</h2> + </div> +{% if summary_data %} + {% for summary in summary_data %} + <div class='entry_list'> + <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ summary.name }}");'> + <h3>{{ summary.nodes|length }} {{ summary.label }}</h3> + <div class='entry_expand_tab' id='plusminus_table_{{ summary.name }}'>[+]</div> + </div> + + <table id='table_{{ summary.name }}' class='entry_list'> + {% for node in summary.nodes|sort_interactions_by_name %} + <tr class='{% cycle listview,listview_alt %}'> + <td><a href="{% url reports_client_detail_pk hostname=node.client.name,pk=node.id %}">{{ node.client.name }}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endfor %} +{% else %} + <p>No data to report on</p> +{% endif %} +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html b/src/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html new file mode 100644 index 000000000..ff775ded5 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html @@ -0,0 +1,38 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Performance Metrics{% endblock %} +{% block pagebanner %}Performance Metrics{% endblock %} + + +{% block extra_header_info %} +{% endblock%} + +{% block content %} +<div class='client_list_box'> + {% if metrics %} + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td>Name</td> + <td>Parse</td> + <td>Probe</td> + <td>Inventory</td> + <td>Install</td> + <td>Config</td> + <td>Total</td> + </tr> + {% for metric in metrics|dictsort:"name" %} + <tr class='{% cycle listview,listview_alt %}'> + <td><a style='font-size: 100%' + href="{% url reports_client_detail hostname=metric.name %}">{{ metric.name }}</a></td> + {% for mitem in metric|build_metric_list %} + <td>{{ mitem }}</td> + {% endfor %} + </tr> + {% endfor %} + </table> + {% else %} + <p>No metric data available</p> + {% endif %} +</div> +{% endblock %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html new file mode 100644 index 000000000..6fbe585ab --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html @@ -0,0 +1,13 @@ +{% spaceless %} +{% if filters %} +{% for filter, filter_url in filters %} + {% if forloop.first %} + <div class="filter_bar">Active filters (click to remove): + {% endif %} + <a href='{{ filter_url }}'>{{ filter|capfirst }}</a>{% if not forloop.last %}, {% endif %} + {% if forloop.last %} + </div> + {% endif %} +{% endfor %} +{% endif %} +{% endspaceless %} diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc new file mode 100644 index 000000000..8f2dec1dc --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc @@ -0,0 +1,38 @@ +{% load bcfg2_tags %} +<div class='interaction_history_widget'> + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td class='left_column'>Timestamp</td> + {% if not client %} + <td class='right_column_wide'>Client</td> + {% endif %} + <td class='right_column' style='width:75px'>State</td> + <td class='right_column_narrow'>Good</td> + <td class='right_column_narrow'>Bad</td> + <td class='right_column_narrow'>Modified</td> + <td class='right_column_narrow'>Extra</td> + <td class='right_column_wide'>Server</td> + </tr> + {% for entry in entry_list %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='left_column'><a href='{% url reports_client_detail_pk hostname=entry.client.name, pk=entry.id %}'>{{ entry.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</a></td> + {% if not client %} + <td class='right_column_wide'><a href='{% add_url_filter hostname=entry.client.name %}'>{{ entry.client.name }}</a></td> + {% endif %} + <td class='right_column' style='width:75px'><a href='{% add_url_filter state=entry.state %}' + {% ifequal entry.state 'dirty' %}class='dirty-lineitem'{% endifequal %}>{{ entry.state }}</a></td> + <td class='right_column_narrow'>{{ entry.goodcount }}</td> + <td class='right_column_narrow'>{{ entry.bad_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.modified_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.extra_entry_count }}</td> + <td class='right_column_wide'> + {% if entry.server %} + <a href='{% add_url_filter server=entry.server %}'>{{ entry.server }}</a> + {% else %} + + {% endif %} + </td> + </tr> + {% endfor %} + </table> +</div> diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html new file mode 100644 index 000000000..aa0def83e --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html @@ -0,0 +1,23 @@ +{% spaceless %} +{% for page, page_url in pager %} + {% if forloop.first %} + <div class="page_bar"> + {% if prev_page %}<a href="{{ prev_page }}">< Prev</a><span> </span>{% endif %} + {% if first_page %}<a href="{{ first_page }}">1</a><span> ... </span>{% endif %} + {% endif %} + {% ifequal page current_page %} + <span class='nav_bar_current'>{{ page }}</span> + {% else %} + <a href="{{ page_url }}">{{ page }}</a> + {% endifequal %} + {% if forloop.last %} + {% if last_page %}<span> ... </span><a href="{{ last_page }}">{{ total_pages }}</a><span> </span>{% endif %} + {% if next_page %}<a href="{{ next_page }}">Next ></a><span> </span>{% endif %} + |{% for limit, limit_url in page_limits %} <a href="{{ limit_url }}">{{ limit }}</a>{% endfor %} + </div> + {% else %} + <span> </span> + {% endif %} +{% endfor %} +{% endspaceless %} +<!-- {{ path }} --> diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py new file mode 100644 index 000000000..f738f7bdd --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py @@ -0,0 +1,276 @@ +from django import template +from django.core.urlresolvers import resolve, reverse, Resolver404, NoReverseMatch +from django.utils.encoding import smart_unicode, smart_str +from datetime import datetime, timedelta +from Bcfg2.Server.Reports.utils import filter_list + +register = template.Library() + +__PAGE_NAV_LIMITS__ = (10, 25, 50, 100) + +@register.inclusion_tag('widgets/page_bar.html', takes_context=True) +def page_navigator(context): + """ + Creates paginated links. + + Expects the context to be a RequestContext and views.prepare_paginated_list() + to have populated page information. + """ + fragment = dict() + try: + path = context['request'].META['PATH_INFO'] + total_pages = int(context['total_pages']) + records_per_page = int(context['records_per_page']) + except KeyError: + return fragment + except ValueError: + return fragment + + if total_pages < 2: + return {} + + try: + view, args, kwargs = resolve(path) + current_page = int(kwargs.get('page_number',1)) + fragment['current_page'] = current_page + fragment['page_number'] = current_page + fragment['total_pages'] = total_pages + fragment['records_per_page'] = records_per_page + if current_page > 1: + kwargs['page_number'] = current_page - 1 + fragment['prev_page'] = reverse(view, args=args, kwargs=kwargs) + if current_page < total_pages: + kwargs['page_number'] = current_page + 1 + fragment['next_page'] = reverse(view, args=args, kwargs=kwargs) + + view_range = 5 + if total_pages > view_range: + pager_start = current_page - 2 + pager_end = current_page + 2 + if pager_start < 1: + pager_end += (1 - pager_start) + pager_start = 1 + if pager_end > total_pages: + pager_start -= (pager_end - total_pages) + pager_end = total_pages + else: + pager_start = 1 + pager_end = total_pages + + if pager_start > 1: + kwargs['page_number'] = 1 + fragment['first_page'] = reverse(view, args=args, kwargs=kwargs) + if pager_end < total_pages: + kwargs['page_number'] = total_pages + fragment['last_page'] = reverse(view, args=args, kwargs=kwargs) + + pager = [] + for page in range(pager_start, int(pager_end) + 1): + kwargs['page_number'] = page + pager.append( (page, reverse(view, args=args, kwargs=kwargs)) ) + + kwargs['page_number'] = 1 + page_limits = [] + for limit in __PAGE_NAV_LIMITS__: + kwargs['page_limit'] = limit + page_limits.append( (limit, reverse(view, args=args, kwargs=kwargs)) ) + # resolver doesn't like this + del kwargs['page_number'] + del kwargs['page_limit'] + page_limits.append( ('all', reverse(view, args=args, kwargs=kwargs) + "|all") ) + + fragment['pager'] = pager + fragment['page_limits'] = page_limits + + except Resolver404: + path = "404" + except NoReverseMatch: + nr = sys.exc_info()[1] + path = "NoReverseMatch: %s" % nr + except ValueError: + path = "ValueError" + #FIXME - Handle these + + fragment['path'] = path + return fragment + +@register.inclusion_tag('widgets/filter_bar.html', takes_context=True) +def filter_navigator(context): + try: + path = context['request'].META['PATH_INFO'] + view, args, kwargs = resolve(path) + + # Strip any page limits and numbers + if 'page_number' in kwargs: + del kwargs['page_number'] + if 'page_limit' in kwargs: + del kwargs['page_limit'] + + filters = [] + for filter in filter_list: + if filter in kwargs: + myargs = kwargs.copy() + del myargs[filter] + filters.append( (filter, reverse(view, args=args, kwargs=myargs) ) ) + filters.sort(lambda x,y: cmp(x[0], y[0])) + return { 'filters': filters } + except (Resolver404, NoReverseMatch, ValueError, KeyError): + pass + return dict() + +def _subtract_or_na(mdict, x, y): + """ + Shortcut for build_metric_list + """ + try: + return round(mdict[x] - mdict[y], 4) + except: + return "n/a" + +@register.filter +def build_metric_list(mdict): + """ + Create a list of metric table entries + + Moving this here it simplify the view. Should really handle the case where these + are missing... + """ + td_list = [] + # parse + td_list.append( _subtract_or_na(mdict, 'config_parse', 'config_download')) + #probe + td_list.append( _subtract_or_na(mdict, 'probe_upload', 'start')) + #inventory + td_list.append( _subtract_or_na(mdict, 'inventory', 'initialization')) + #install + td_list.append( _subtract_or_na(mdict, 'install', 'inventory')) + #cfg download & parse + td_list.append( _subtract_or_na(mdict, 'config_parse', 'probe_upload')) + #total + td_list.append( _subtract_or_na(mdict, 'finished', 'start')) + return td_list + +@register.filter +def isstale(timestamp, entry_max=None): + """ + Check for a stale timestamp + + Compares two timestamps and returns True if the + difference is greater then 24 hours. + """ + if not entry_max: + entry_max = datetime.now() + return entry_max - timestamp > timedelta(hours=24) + +@register.filter +def sort_interactions_by_name(value): + """ + Sort an interaction list by client name + """ + inters = list(value) + inters.sort(lambda a,b: cmp(a.client.name, b.client.name)) + return inters + +class AddUrlFilter(template.Node): + def __init__(self, filter_name, filter_value): + self.filter_name = filter_name + self.filter_value = filter_value + self.fallback_view = 'Bcfg2.Server.Reports.reports.views.render_history_view' + + def render(self, context): + link = '#' + try: + path = context['request'].META['PATH_INFO'] + view, args, kwargs = resolve(path) + filter_value = self.filter_value.resolve(context, True) + if filter_value: + filter_name = smart_str(self.filter_name) + filter_value = smart_unicode(filter_value) + kwargs[filter_name] = filter_value + # These two don't make sense + if filter_name == 'server' and 'hostname' in kwargs: + del kwargs['hostname'] + elif filter_name == 'hostname' and 'server' in kwargs: + del kwargs['server'] + try: + link = reverse(view, args=args, kwargs=kwargs) + except NoReverseMatch: + link = reverse(self.fallback_view, args=None, + kwargs={ filter_name: filter_value }) + except NoReverseMatch: + rm = sys.exc_info()[1] + raise rm + except (Resolver404, ValueError): + pass + return link + +@register.tag +def add_url_filter(parser, token): + """ + Return a url with the filter added to the current view. + + Takes a new filter and resolves the current view with the new filter + applied. Resolves to Bcfg2.Server.Reports.reports.views.client_history + by default. + + {% add_url_filter server=interaction.server %} + """ + try: + tag_name, filter_pair = token.split_contents() + filter_name, filter_value = filter_pair.split('=', 1) + filter_name = filter_name.strip() + filter_value = parser.compile_filter(filter_value) + except ValueError: + raise template.TemplateSyntaxError("%r tag requires exactly one argument" % token.contents.split()[0]) + if not filter_name or not filter_value: + raise template.TemplateSyntaxError("argument should be a filter=value pair") + + return AddUrlFilter(filter_name, filter_value) + +@register.filter +def sortwell(value): + """ + Sorts a list(or evaluates queryset to list) of bad, extra, or modified items in the best + way for presentation + """ + + configItems = list(value) + configItems.sort(lambda x,y: cmp(x.entry.name, y.entry.name)) + configItems.sort(lambda x,y: cmp(x.entry.kind, y.entry.kind)) + return configItems + +class MediaTag(template.Node): + def __init__(self, filter_value): + self.filter_value = filter_value + + def render(self, context): + base = context['MEDIA_URL'] + try: + request = context['request'] + try: + base = request.environ['bcfg2.media_url'] + except: + if request.path != request.META['PATH_INFO']: + offset = request.path.find(request.META['PATH_INFO']) + if offset > 0: + base = "%s/%s" % (request.path[:offset], \ + context['MEDIA_URL'].strip('/')) + except: + pass + return "%s/%s" % (base, self.filter_value) + +@register.tag +def to_media_url(parser, token): + """ + Return a url relative to the media_url. + + {% to_media_url /bcfg2.css %} + """ + try: + tag_name, filter_value = token.split_contents() + filter_value = parser.compile_filter(filter_value) + except ValueError: + raise template.TemplateSyntaxError("%r tag requires exactly one argument" % token.contents.split()[0]) + + return MediaTag(filter_value) + diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/split.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/split.py new file mode 100644 index 000000000..a9b4f0371 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/split.py @@ -0,0 +1,8 @@ +from django import template +register = template.Library() + + +@register.filter +def split(s): + """split by newlines""" + return s.split('\n') diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py new file mode 100644 index 000000000..2e30125f9 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py @@ -0,0 +1,49 @@ +import sys +from django import template +from django.utils.encoding import smart_unicode, smart_str +from django.utils.html import conditional_escape +from django.utils.safestring import mark_safe + +register = template.Library() + +try: + from pygments import highlight + from pygments.lexers import get_lexer_by_name + from pygments.formatters import HtmlFormatter + colorize = True + +except: + colorize = False + +# py3k compatibility +def u_str(string): + if sys.hexversion >= 0x03000000: + return string + else: + return unicode(string) + +@register.filter +def syntaxhilight(value, arg="diff", autoescape=None): + """ + Returns a syntax-hilighted version of Code; requires code/language arguments + """ + + if autoescape: + value = conditional_escape(value) + arg = conditional_escape(arg) + + if colorize: + try: + output = u_str('<style type="text/css">') \ + + smart_unicode(HtmlFormatter().get_style_defs('.highlight')) \ + + u_str('</style>') + + lexer = get_lexer_by_name(arg) + output += highlight(value, lexer, HtmlFormatter()) + return mark_safe(output) + except: + return value + else: + return mark_safe(u_str('<div class="note-box">Tip: Install pygments for highlighting</div><pre>%s</pre>') % value) +syntaxhilight.needs_autoescape = True + diff --git a/src/lib/Bcfg2/Server/Reports/reports/urls.py b/src/lib/Bcfg2/Server/Reports/reports/urls.py new file mode 100644 index 000000000..434ce07b7 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/urls.py @@ -0,0 +1,55 @@ +from django.conf.urls.defaults import * +from django.core.urlresolvers import reverse, NoReverseMatch +from django.http import HttpResponsePermanentRedirect +from Bcfg2.Server.Reports.utils import filteredUrls, paginatedUrls, timeviewUrls + +def newRoot(request): + try: + grid_view = reverse('reports_grid_view') + except NoReverseMatch: + grid_view = '/grid' + return HttpResponsePermanentRedirect(grid_view) + +urlpatterns = patterns('Bcfg2.Server.Reports.reports', + (r'^$', newRoot), + + url(r'^manage/?$', 'views.client_manage', name='reports_client_manage'), + url(r'^client/(?P<hostname>[^/]+)/(?P<pk>\d+)/?$', 'views.client_detail', name='reports_client_detail_pk'), + url(r'^client/(?P<hostname>[^/]+)/?$', 'views.client_detail', name='reports_client_detail'), + url(r'^elements/(?P<type>\w+)/(?P<pk>\d+)/?$', 'views.config_item', name='reports_item'), +) + +urlpatterns += patterns('Bcfg2.Server.Reports.reports', + *timeviewUrls( + (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'), + (r'^summary/?$', 'views.display_summary', None, 'reports_summary'), + (r'^timing/?$', 'views.display_timing', None, 'reports_timing'), + (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'), +)) + +urlpatterns += patterns('Bcfg2.Server.Reports.reports', + *filteredUrls(*timeviewUrls( + (r'^detailed/?$', + 'views.client_detailed_list', None, 'reports_detailed_list') +))) + +urlpatterns += patterns('Bcfg2.Server.Reports.reports', + *paginatedUrls( *filteredUrls( + (r'^history/?$', + 'views.render_history_view', None, 'reports_history'), + (r'^history/(?P<hostname>[^/|]+)/?$', + 'views.render_history_view', None, 'reports_client_history'), +))) + + # Uncomment this for admin: + #(r'^admin/', include('django.contrib.admin.urls')), + + +## Uncomment this section if using authentication +#urlpatterns += patterns('', +# (r'^login/$', 'django.contrib.auth.views.login', +# {'template_name': 'auth/login.html'}), +# (r'^logout/$', 'django.contrib.auth.views.logout', +# {'template_name': 'auth/logout.html'}) +# ) + diff --git a/src/lib/Bcfg2/Server/Reports/reports/views.py b/src/lib/Bcfg2/Server/Reports/reports/views.py new file mode 100644 index 000000000..ccd71a60e --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/reports/views.py @@ -0,0 +1,415 @@ +""" +Report views + +Functions to handle all of the reporting views. +""" +from datetime import datetime, timedelta +import sys +from time import strptime + +from django.template import Context, RequestContext +from django.http import \ + HttpResponse, HttpResponseRedirect, HttpResponseServerError, Http404 +from django.shortcuts import render_to_response, get_object_or_404 +from django.core.urlresolvers import \ + resolve, reverse, Resolver404, NoReverseMatch +from django.db import connection + +from Bcfg2.Server.Reports.reports.models import * + + +class PaginationError(Exception): + """This error is raised when pagination cannot be completed.""" + pass + + +def server_error(request): + """ + 500 error handler. + + For now always return the debug response. Mailing isn't appropriate here. + + """ + from django.views import debug + return debug.technical_500_response(request, *sys.exc_info()) + + +def timeview(fn): + """ + Setup a timeview view + + Handles backend posts from the calendar and converts date pieces + into a 'timestamp' parameter + + """ + def _handle_timeview(request, **kwargs): + """Send any posts back.""" + if request.method == 'POST': + cal_date = request.POST['cal_date'] + try: + fmt = "%Y/%m/%d" + if cal_date.find(' ') > -1: + fmt += " %H:%M" + timestamp = datetime(*strptime(cal_date, fmt)[0:6]) + view, args, kw = resolve(request.META['PATH_INFO']) + kw['year'] = "%0.4d" % timestamp.year + kw['month'] = "%02.d" % timestamp.month + kw['day'] = "%02.d" % timestamp.day + if cal_date.find(' ') > -1: + kw['hour'] = timestamp.hour + kw['minute'] = timestamp.minute + return HttpResponseRedirect(reverse(view, + args=args, + kwargs=kw)) + except KeyError: + pass + except: + pass + # FIXME - Handle this + + """Extract timestamp from args.""" + timestamp = None + try: + timestamp = datetime(int(kwargs.pop('year')), + int(kwargs.pop('month')), + int(kwargs.pop('day')), int(kwargs.pop('hour', 0)), + int(kwargs.pop('minute', 0)), 0) + kwargs['timestamp'] = timestamp + except KeyError: + pass + except: + raise + return fn(request, **kwargs) + + return _handle_timeview + + +def config_item(request, pk, type="bad"): + """ + Display a single entry. + + Dispalys information about a single entry. + + """ + item = get_object_or_404(Entries_interactions, id=pk) + timestamp = item.interaction.timestamp + time_start = item.interaction.timestamp.replace(hour=0, + minute=0, + second=0, + microsecond=0) + time_end = time_start + timedelta(days=1) + + todays_data = Interaction.objects.filter(timestamp__gte=time_start, + timestamp__lt=time_end) + shared_entries = Entries_interactions.objects.filter(entry=item.entry, + reason=item.reason, + type=item.type, + interaction__in=[x['id']\ + for x in todays_data.values('id')]) + + associated_list = Interaction.objects.filter(id__in=[x['interaction']\ + for x in shared_entries.values('interaction')])\ + .order_by('client__name', 'timestamp').select_related().all() + + return render_to_response('config_items/item.html', + {'item': item, + 'isextra': item.type == TYPE_EXTRA, + 'mod_or_bad': type, + 'associated_list': associated_list, + 'timestamp': timestamp}, + context_instance=RequestContext(request)) + + +@timeview +def config_item_list(request, type, timestamp=None): + """Render a listing of affected elements""" + mod_or_bad = type.lower() + type = convert_entry_type_to_id(type) + if type < 0: + raise Http404 + + current_clients = Interaction.objects.get_interaction_per_client_ids(timestamp) + item_list_dict = {} + seen = dict() + for x in Entries_interactions.objects.filter(interaction__in=current_clients, + type=type).select_related(): + if (x.entry, x.reason) in seen: + continue + seen[(x.entry, x.reason)] = 1 + if item_list_dict.get(x.entry.kind, None): + item_list_dict[x.entry.kind].append(x) + else: + item_list_dict[x.entry.kind] = [x] + + for kind in item_list_dict: + item_list_dict[kind].sort(lambda a, b: cmp(a.entry.name, b.entry.name)) + + return render_to_response('config_items/listing.html', + {'item_list_dict': item_list_dict, + 'mod_or_bad': mod_or_bad, + 'timestamp': timestamp}, + context_instance=RequestContext(request)) + + +@timeview +def client_index(request, timestamp=None): + """ + Render a grid view of active clients. + + Keyword parameters: + timestamp -- datetime objectto render from + + """ + list = Interaction.objects.interaction_per_client(timestamp).select_related()\ + .order_by("client__name").all() + + return render_to_response('clients/index.html', + {'inter_list': list, + 'timestamp': timestamp}, + context_instance=RequestContext(request)) + + +@timeview +def client_detailed_list(request, timestamp=None, **kwargs): + """ + Provides a more detailed list view of the clients. Allows for extra + filters to be passed in. + + """ + + kwargs['interaction_base'] = Interaction.objects.interaction_per_client(timestamp).select_related() + kwargs['orderby'] = "client__name" + kwargs['page_limit'] = 0 + return render_history_view(request, 'clients/detailed-list.html', **kwargs) + + +def client_detail(request, hostname=None, pk=None): + context = dict() + client = get_object_or_404(Client, name=hostname) + if(pk == None): + context['interaction'] = client.current_interaction + return render_history_view(request, 'clients/detail.html', page_limit=5, + client=client, context=context) + else: + context['interaction'] = client.interactions.get(pk=pk) + return render_history_view(request, 'clients/detail.html', page_limit=5, + client=client, maxdate=context['interaction'].timestamp, context=context) + + +def client_manage(request): + """Manage client expiration""" + message = '' + if request.method == 'POST': + try: + client_name = request.POST.get('client_name', None) + client_action = request.POST.get('client_action', None) + client = Client.objects.get(name=client_name) + if client_action == 'expire': + client.expiration = datetime.now() + client.save() + message = "Expiration for %s set to %s." % \ + (client_name, client.expiration.strftime("%Y-%m-%d %H:%M:%S")) + elif client_action == 'unexpire': + client.expiration = None + client.save() + message = "%s is now active." % client_name + else: + message = "Missing action" + except Client.DoesNotExist: + if not client_name: + client_name = "<none>" + message = "Couldn't find client \"%s\"" % client_name + + return render_to_response('clients/manage.html', + {'clients': Client.objects.order_by('name').all(), 'message': message}, + context_instance=RequestContext(request)) + + +@timeview +def display_summary(request, timestamp=None): + """ + Display a summary of the bcfg2 world + """ + query = Interaction.objects.interaction_per_client(timestamp).select_related() + node_count = query.count() + recent_data = query.all() + if not timestamp: + timestamp = datetime.now() + + collected_data = dict(clean=[], + bad=[], + modified=[], + extra=[], + stale=[], + pings=[]) + for node in recent_data: + if timestamp - node.timestamp > timedelta(hours=24): + collected_data['stale'].append(node) + # If stale check for uptime + try: + if node.client.pings.latest().status == 'N': + collected_data['pings'].append(node) + except Ping.DoesNotExist: + collected_data['pings'].append(node) + continue + if node.bad_entry_count() > 0: + collected_data['bad'].append(node) + else: + collected_data['clean'].append(node) + if node.modified_entry_count() > 0: + collected_data['modified'].append(node) + if node.extra_entry_count() > 0: + collected_data['extra'].append(node) + + # label, header_text, node_list + summary_data = [] + get_dict = lambda name, label: {'name': name, + 'nodes': collected_data[name], + 'label': label} + if len(collected_data['clean']) > 0: + summary_data.append(get_dict('clean', + 'nodes are clean.')) + if len(collected_data['bad']) > 0: + summary_data.append(get_dict('bad', + 'nodes are bad.')) + if len(collected_data['modified']) > 0: + summary_data.append(get_dict('modified', + 'nodes were modified.')) + if len(collected_data['extra']) > 0: + summary_data.append(get_dict('extra', + 'nodes have extra configurations.')) + if len(collected_data['stale']) > 0: + summary_data.append(get_dict('stale', + 'nodes did not run within the last 24 hours.')) + if len(collected_data['pings']) > 0: + summary_data.append(get_dict('pings', + 'are down.')) + + return render_to_response('displays/summary.html', + {'summary_data': summary_data, 'node_count': node_count, + 'timestamp': timestamp}, + context_instance=RequestContext(request)) + + +@timeview +def display_timing(request, timestamp=None): + mdict = dict() + inters = Interaction.objects.interaction_per_client(timestamp).select_related().all() + [mdict.__setitem__(inter, {'name': inter.client.name}) \ + for inter in inters] + for metric in Performance.objects.filter(interaction__in=list(mdict.keys())).all(): + for i in metric.interaction.all(): + mdict[i][metric.metric] = metric.value + return render_to_response('displays/timing.html', + {'metrics': list(mdict.values()), + 'timestamp': timestamp}, + context_instance=RequestContext(request)) + + +def render_history_view(request, template='clients/history.html', **kwargs): + """ + Provides a detailed history of a clients interactions. + + Renders a detailed history of a clients interactions. Allows for various + filters and settings. Automatically sets pagination data into the context. + + Keyword arguments: + interaction_base -- Interaction QuerySet to build on + (default Interaction.objects) + context -- Additional context data to render with + page_number -- Page to display (default 1) + page_limit -- Number of results per page, if 0 show all (default 25) + client -- Client object to render + hostname -- Client hostname to lookup and render. Returns a 404 if + not found + server -- Filter interactions by server + state -- Filter interactions by state + entry_max -- Most recent interaction to display + orderby -- Sort results using this field + + """ + + context = kwargs.get('context', dict()) + max_results = int(kwargs.get('page_limit', 25)) + page = int(kwargs.get('page_number', 1)) + + client = kwargs.get('client', None) + if not client and 'hostname' in kwargs: + client = get_object_or_404(Client, name=kwargs['hostname']) + if client: + context['client'] = client + + entry_max = kwargs.get('maxdate', None) + context['entry_max'] = entry_max + + # Either filter by client or limit by clients + iquery = kwargs.get('interaction_base', Interaction.objects) + if client: + iquery = iquery.filter(client__exact=client).select_related() + + if 'orderby' in kwargs and kwargs['orderby']: + iquery = iquery.order_by(kwargs['orderby']) + + if 'state' in kwargs and kwargs['state']: + iquery = iquery.filter(state__exact=kwargs['state']) + if 'server' in kwargs and kwargs['server']: + iquery = iquery.filter(server__exact=kwargs['server']) + + if entry_max: + iquery = iquery.filter(timestamp__lte=entry_max) + + if max_results < 0: + max_results = 1 + entry_list = [] + if max_results > 0: + try: + rec_start, rec_end = prepare_paginated_list(request, + context, + iquery, + page, + max_results) + except PaginationError: + page_error = sys.exc_info()[1] + if isinstance(page_error[0], HttpResponse): + return page_error[0] + return HttpResponseServerError(page_error) + context['entry_list'] = iquery.all()[rec_start:rec_end] + else: + context['entry_list'] = iquery.all() + + return render_to_response(template, context, + context_instance=RequestContext(request)) + + +def prepare_paginated_list(request, context, paged_list, page=1, max_results=25): + """ + Prepare context and slice an object for pagination. + """ + if max_results < 1: + raise PaginationError("Max results less then 1") + if paged_list == None: + raise PaginationError("Invalid object") + + try: + nitems = paged_list.count() + except TypeError: + nitems = len(paged_list) + + rec_start = (page - 1) * int(max_results) + try: + total_pages = (nitems / int(max_results)) + 1 + except: + total_pages = 1 + if page > total_pages: + # If we passed beyond the end send back + try: + view, args, kwargs = resolve(request.META['PATH_INFO']) + kwargs['page_number'] = total_pages + raise PaginationError(HttpResponseRedirect(reverse(view, + kwards=kwargs))) + except (Resolver404, NoReverseMatch, ValueError): + raise "Accessing beyond last page. Unable to resolve redirect." + + context['total_pages'] = total_pages + context['records_per_page'] = max_results + return (rec_start, rec_start + int(max_results)) diff --git a/src/lib/Bcfg2/Server/Reports/settings.py b/src/lib/Bcfg2/Server/Reports/settings.py new file mode 100644 index 000000000..4d567f1a2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/settings.py @@ -0,0 +1,161 @@ +import django +import sys + +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser +# Django settings for bcfg2 reports project. +c = ConfigParser.ConfigParser() +if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0: + raise ImportError("Please check that bcfg2.conf or bcfg2-web.conf exists " + "and is readable by your web server.") + +try: + DEBUG = c.getboolean('statistics', 'web_debug') +except: + DEBUG = False + +if DEBUG: + print("Warning: Setting web_debug to True causes extraordinary memory " + "leaks. Only use this setting if you know what you're doing.") + +TEMPLATE_DEBUG = DEBUG + +ADMINS = ( + ('Root', 'root'), +) + +MANAGERS = ADMINS +try: + db_engine = c.get('statistics', 'database_engine') +except ConfigParser.NoSectionError: + e = sys.exc_info()[1] + raise ImportError("Failed to determine database engine: %s" % e) +db_name = '' +if c.has_option('statistics', 'database_name'): + db_name = c.get('statistics', 'database_name') +if db_engine == 'sqlite3' and db_name == '': + db_name = "%s/etc/brpt.sqlite" % c.get('server', 'repository') + +DATABASES = { + 'default': { + 'ENGINE': "django.db.backends.%s" % db_engine, + 'NAME': db_name + } +} + +if db_engine != 'sqlite3': + DATABASES['default']['USER'] = c.get('statistics', 'database_user') + DATABASES['default']['PASSWORD'] = c.get('statistics', 'database_password') + DATABASES['default']['HOST'] = c.get('statistics', 'database_host') + try: + DATABASES['default']['PORT'] = c.get('statistics', 'database_port') + except: # An empty string tells Django to use the default port. + DATABASES['default']['PORT'] = '' + +if django.VERSION[0] == 1 and django.VERSION[1] < 2: + DATABASE_ENGINE = db_engine + DATABASE_NAME = DATABASES['default']['NAME'] + if DATABASE_ENGINE != 'sqlite3': + DATABASE_USER = DATABASES['default']['USER'] + DATABASE_PASSWORD = DATABASES['default']['PASSWORD'] + DATABASE_HOST = DATABASES['default']['HOST'] + DATABASE_PORT = DATABASES['default']['PORT'] + + +# Local time zone for this installation. All choices can be found here: +# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone +if django.VERSION[0] == 1 and django.VERSION[1] > 2: + try: + TIME_ZONE = c.get('statistics', 'time_zone') + except: + TIME_ZONE = None + +# Language code for this installation. All choices can be found here: +# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes +# http://blogs.law.harvard.edu/tech/stories/storyReader$15 +LANGUAGE_CODE = 'en-us' + +SITE_ID = 1 + +# Absolute path to the directory that holds media. +# Example: "/home/media/media.lawrence.com/" +MEDIA_ROOT = '' + +# URL that handles the media served from MEDIA_ROOT. +# Example: "http://media.lawrence.com" +MEDIA_URL = '/site_media' +if c.has_option('statistics', 'web_prefix'): + MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL + +# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a +# trailing slash. +# Examples: "http://foo.com/media/", "/media/". +ADMIN_MEDIA_PREFIX = '/media/' + +# Make this unique, and don't share it with anybody. +SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7' + +# List of callables that know how to import templates from various sources. +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', +) + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.middleware.doc.XViewMiddleware', +) + +ROOT_URLCONF = 'Bcfg2.Server.Reports.urls' + +# Authentication Settings +# Use NIS authentication backend defined in backends.py +AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', + 'Bcfg2.Server.Reports.backends.NISBackend') +# The NIS group authorized to login to BCFG2's reportinvg system +AUTHORIZED_GROUP = '' +#create login url area: +try: + import django.contrib.auth +except ImportError: + raise ImportError('Import of Django module failed. Is Django installed?') +django.contrib.auth.LOGIN_URL = '/login' + +SESSION_EXPIRE_AT_BROWSER_CLOSE = True + + + +TEMPLATE_DIRS = ( + # Put strings here, like "/home/html/django_templates". + # Always use forward slashes, even on Windows. + '/usr/share/python-support/python-django/django/contrib/admin/templates/', + 'Bcfg2.Server.Reports.reports' +) + +if django.VERSION[0] == 1 and django.VERSION[1] < 2: + TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.core.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request' + ) +else: + TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.contrib.auth.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request' + ) + +INSTALLED_APPS = ( + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.admin', + 'Bcfg2.Server.Reports.reports' +) diff --git a/src/lib/Bcfg2/Server/Reports/updatefix.py b/src/lib/Bcfg2/Server/Reports/updatefix.py new file mode 100644 index 000000000..39fc10b56 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/updatefix.py @@ -0,0 +1,265 @@ +import Bcfg2.Server.Reports.settings + +from django.db import connection, DatabaseError +import django.core.management +import logging +import sys +import traceback +from Bcfg2.Server.Reports.reports.models import InternalDatabaseVersion, \ + TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA +logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix') + + +# all update function should go here +def _merge_database_table_entries(): + cursor = connection.cursor() + insert_cursor = connection.cursor() + find_cursor = connection.cursor() + cursor.execute(""" + Select name, kind from reports_bad + union + select name, kind from reports_modified + union + select name, kind from reports_extra + """) + # this fetch could be better done + entries_map = {} + for row in cursor.fetchall(): + insert_cursor.execute("insert into reports_entries (name, kind) \ + values (%s, %s)", (row[0], row[1])) + entries_map[(row[0], row[1])] = insert_cursor.lastrowid + + cursor.execute(""" + Select name, kind, reason_id, interaction_id, 1 from reports_bad + inner join reports_bad_interactions on reports_bad.id=reports_bad_interactions.bad_id + union + Select name, kind, reason_id, interaction_id, 2 from reports_modified + inner join reports_modified_interactions on reports_modified.id=reports_modified_interactions.modified_id + union + Select name, kind, reason_id, interaction_id, 3 from reports_extra + inner join reports_extra_interactions on reports_extra.id=reports_extra_interactions.extra_id + """) + for row in cursor.fetchall(): + key = (row[0], row[1]) + if entries_map.get(key, None): + entry_id = entries_map[key] + else: + find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key) + rowe = find_cursor.fetchone() + entry_id = rowe[0] + insert_cursor.execute("insert into reports_entries_interactions \ + (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4])) + + +def _interactions_constraint_or_idx(): + """sqlite doesn't support alter tables.. or constraints""" + cursor = connection.cursor() + try: + cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)') + except: + cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)') + + +def _remove_table_column(tbl, col): + """sqlite doesn't support deleting a column via alter table""" + cursor = connection.cursor() + try: + cursor.execute('alter table %s ' + 'drop column %s;' % (tbl, col)) + except DatabaseError: + # sqlite wants us to create a new table containing the columns we want + # and copy into it http://www.sqlite.org/faq.html#q11 + + tmptbl_name = "t_backup" + _tmptbl_create = \ +"""create temporary table "%s" ( + "id" integer NOT NULL PRIMARY KEY, + "client_id" integer NOT NULL REFERENCES "reports_client" ("id"), + "timestamp" datetime NOT NULL, + "state" varchar(32) NOT NULL, + "repo_rev_code" varchar(64) NOT NULL, + "goodcount" integer NOT NULL, + "totalcount" integer NOT NULL, + "server" varchar(256) NOT NULL, + "bad_entries" integer NOT NULL, + "modified_entries" integer NOT NULL, + "extra_entries" integer NOT NULL, + UNIQUE ("client_id", "timestamp") +);""" % tmptbl_name + _newtbl_create = \ +"""create table "%s" ( + "id" integer NOT NULL PRIMARY KEY, + "client_id" integer NOT NULL REFERENCES "reports_client" ("id"), + "timestamp" datetime NOT NULL, + "state" varchar(32) NOT NULL, + "repo_rev_code" varchar(64) NOT NULL, + "goodcount" integer NOT NULL, + "totalcount" integer NOT NULL, + "server" varchar(256) NOT NULL, + "bad_entries" integer NOT NULL, + "modified_entries" integer NOT NULL, + "extra_entries" integer NOT NULL, + UNIQUE ("client_id", "timestamp") +);""" % tbl + new_cols = "id,\ + client_id,\ + timestamp,\ + state,\ + repo_rev_code,\ + goodcount,\ + totalcount,\ + server,\ + bad_entries,\ + modified_entries,\ + extra_entries" + + delete_col = [_tmptbl_create, + "insert into %s select %s from %s;" % (tmptbl_name, new_cols, tbl), + "drop table %s" % tbl, + _newtbl_create, + "create index reports_interaction_client_id on %s (client_id);" % tbl, + "insert into %s select %s from %s;" % (tbl, new_cols, + tmptbl_name), + "drop table %s;" % tmptbl_name] + + for sql in delete_col: + cursor.execute(sql) + + +def _populate_interaction_entry_counts(): + '''Populate up the type totals for the interaction table''' + cursor = connection.cursor() + count_field = {TYPE_BAD: 'bad_entries', + TYPE_MODIFIED: 'modified_entries', + TYPE_EXTRA: 'extra_entries'} + + for type in list(count_field.keys()): + cursor.execute("select count(type), interaction_id " + + "from reports_entries_interactions where type = %s group by interaction_id" % type) + updates = [] + for row in cursor.fetchall(): + updates.append(row) + try: + cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates) + except Exception: + e = sys.exc_info()[1] + print(e) + cursor.close() + + +# be sure to test your upgrade query before reflecting the change in the models +# the list of function and sql command to do should go here +_fixes = [_merge_database_table_entries, + # this will remove unused tables + "drop table reports_bad;", + "drop table reports_bad_interactions;", + "drop table reports_extra;", + "drop table reports_extra_interactions;", + "drop table reports_modified;", + "drop table reports_modified_interactions;", + "drop table reports_repository;", + "drop table reports_metadata;", + "alter table reports_interaction add server varchar(256) not null default 'N/A';", + # fix revision data type to support $VCS hashes + "alter table reports_interaction add repo_rev_code varchar(64) default '';", + # Performance enhancements for large sites + 'alter table reports_interaction add column bad_entries integer not null default -1;', + 'alter table reports_interaction add column modified_entries integer not null default -1;', + 'alter table reports_interaction add column extra_entries integer not null default -1;', + _populate_interaction_entry_counts, + _interactions_constraint_or_idx, + 'alter table reports_reason add is_binary bool NOT NULL default False;', + 'alter table reports_reason add is_sensitive bool NOT NULL default False;', + _remove_table_column('reports_interaction', 'client_version'), + "alter table reports_reason add unpruned varchar(1280) not null default 'N/A';", +] + +# this will calculate the last possible version of the database +lastversion = len(_fixes) + + +def rollupdate(current_version): + """function responsible to coordinates all the updates + need current_version as integer + """ + ret = None + if current_version < lastversion: + for i in range(current_version, lastversion): + try: + if type(_fixes[i]) == str: + connection.cursor().execute(_fixes[i]) + else: + _fixes[i]() + except: + logger.error("Failed to perform db update %s" % (_fixes[i]), + exc_info=1) + # since the array starts at 0 but version + # starts at 1 we add 1 to the normal count + ret = InternalDatabaseVersion.objects.create(version=i + 1) + return ret + else: + return None + + +def dosync(): + """Function to do the syncronisation for the models""" + # try to detect if it's a fresh new database + try: + cursor = connection.cursor() + # If this table goes missing, + # don't forget to change it to the new one + cursor.execute("Select * from reports_client") + # if we get here with no error then the database has existing tables + fresh = False + except: + logger.debug("there was an error while detecting " + "the freshness of the database") + #we should get here if the database is new + fresh = True + + # ensure database connections are closed + # so that the management can do its job right + try: + cursor.close() + connection.close() + except: + # ignore any errors from missing/invalid dbs + pass + # Do the syncdb according to the django version + if "call_command" in dir(django.core.management): + # this is available since django 1.0 alpha. + # not yet tested for full functionnality + django.core.management.call_command("syncdb", interactive=False, verbosity=0) + if fresh: + django.core.management.call_command("loaddata", 'initial_version.xml', verbosity=0) + elif "syncdb" in dir(django.core.management): + # this exist only for django 0.96.* + django.core.management.syncdb(interactive=False, verbosity=0) + if fresh: + logger.debug("loading the initial_version fixtures") + django.core.management.load_data(fixture_labels=['initial_version'], verbosity=0) + else: + logger.warning("Don't forget to run syncdb") + + +def update_database(): + """method to search where we are in the revision + of the database models and update them""" + try: + logger.debug("Running upgrade of models to the new one") + dosync() + know_version = InternalDatabaseVersion.objects.order_by('-version') + if not know_version: + logger.debug("No version, creating initial version") + know_version = InternalDatabaseVersion.objects.create(version=0) + else: + know_version = know_version[0] + logger.debug("Presently at %s" % know_version) + if know_version.version < lastversion: + new_version = rollupdate(know_version.version) + if new_version: + logger.debug("upgraded to %s" % new_version) + except: + logger.error("Error while updating the database") + for x in traceback.format_exc().splitlines(): + logger.error(x) diff --git a/src/lib/Bcfg2/Server/Reports/urls.py b/src/lib/Bcfg2/Server/Reports/urls.py new file mode 100644 index 000000000..d7ff1eee5 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/urls.py @@ -0,0 +1,14 @@ +from django.conf.urls.defaults import * +from django.http import HttpResponsePermanentRedirect + +handler500 = 'Bcfg2.Server.Reports.reports.views.server_error' + +urlpatterns = patterns('', + (r'^', include('Bcfg2.Server.Reports.reports.urls')) +) + +#urlpatterns += patterns("django.views", +# url(r"media/(?P<path>.*)$", "static.serve", { +# "document_root": '/Users/tlaszlo/svn/bcfg2/reports/site_media/', +# }) +#) diff --git a/src/lib/Bcfg2/Server/Reports/utils.py b/src/lib/Bcfg2/Server/Reports/utils.py new file mode 100755 index 000000000..e0b6ead59 --- /dev/null +++ b/src/lib/Bcfg2/Server/Reports/utils.py @@ -0,0 +1,124 @@ +"""Helper functions for reports""" +from django.conf.urls.defaults import * +import re + +"""List of filters provided by filteredUrls""" +filter_list = ('server', 'state') + + +class BatchFetch(object): + """Fetch Django objects in smaller batches to save memory""" + + def __init__(self, obj, step=10000): + self.count = 0 + self.block_count = 0 + self.obj = obj + self.data = None + self.step = step + self.max = obj.count() + + def __iter__(self): + return self + + def next(self): + """Provide compatibility with python < 3.0""" + return self.__next__() + + def __next__(self): + """Return the next object from our array and fetch from the + database when needed""" + if self.block_count + self.count - self.step == self.max: + raise StopIteration + if self.block_count == 0 or self.count == self.step: + # Without list() this turns into LIMIT 1 OFFSET x queries + self.data = list(self.obj.all()[self.block_count: \ + (self.block_count + self.step)]) + self.block_count += self.step + self.count = 0 + self.count += 1 + return self.data[self.count - 1] + + +def generateUrls(fn): + """ + Parse url tuples and send to functions. + + Decorator for url generators. Handles url tuple parsing + before the actual function is called. + """ + def url_gen(*urls): + results = [] + for url_tuple in urls: + if isinstance(url_tuple, (list, tuple)): + results += fn(*url_tuple) + else: + raise ValueError("Unable to handle compiled urls") + return results + return url_gen + + +@generateUrls +def paginatedUrls(pattern, view, kwargs=None, name=None): + """ + Takes a group of url tuples and adds paginated urls. + + Extends a url tuple to include paginated urls. + Currently doesn't handle url() compiled patterns. + + """ + results = [(pattern, view, kwargs, name)] + tail = '' + mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) + if mtail: + tail = mtail.group(1) + pattern = pattern[:len(pattern) - len(tail)] + results += [(pattern + "/(?P<page_number>\d+)" + tail, view, kwargs)] + results += [(pattern + "/(?P<page_number>\d+)\|(?P<page_limit>\d+)" + + tail, view, kwargs)] + if not kwargs: + kwargs = dict() + kwargs['page_limit'] = 0 + results += [(pattern + "/?\|(?P<page_limit>all)" + tail, view, kwargs)] + return results + + +@generateUrls +def filteredUrls(pattern, view, kwargs=None, name=None): + """ + Takes a url and adds filtered urls. + + Extends a url tuple to include filtered view urls. Currently doesn't + handle url() compiled patterns. + """ + results = [(pattern, view, kwargs, name)] + tail = '' + mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) + if mtail: + tail = mtail.group(1) + pattern = pattern[:len(pattern) - len(tail)] + for filter in ('/state/(?P<state>\w+)', + '/server/(?P<server>[\w\-\.]+)', + '/server/(?P<server>[\w\-\.]+)/(?P<state>[A-Za-z]+)'): + results += [(pattern + filter + tail, view, kwargs)] + return results + + +@generateUrls +def timeviewUrls(pattern, view, kwargs=None, name=None): + """ + Takes a url and adds timeview urls + + Extends a url tuple to include filtered view urls. Currently doesn't + handle url() compiled patterns. + """ + results = [(pattern, view, kwargs, name)] + tail = '' + mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) + if mtail: + tail = mtail.group(1) + pattern = pattern[:len(pattern) - len(tail)] + for filter in ('/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})/' + \ + '(?P<hour>\d\d)-(?P<minute>\d\d)', + '/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})'): + results += [(pattern + filter + tail, view, kwargs)] + return results diff --git a/src/lib/Bcfg2/Server/Snapshots/__init__.py b/src/lib/Bcfg2/Server/Snapshots/__init__.py new file mode 100644 index 000000000..7c901adb2 --- /dev/null +++ b/src/lib/Bcfg2/Server/Snapshots/__init__.py @@ -0,0 +1,31 @@ +__all__ = ['models', 'db_from_config', 'setup_session'] + +import sqlalchemy +import sqlalchemy.orm +# Compatibility import +from Bcfg2.Bcfg2Py3k import ConfigParser + + +def db_from_config(cfile): + cp = ConfigParser.ConfigParser() + cp.read([cfile]) + driver = cp.get('snapshots', 'driver') + if driver == 'sqlite': + path = cp.get('snapshots', 'database') + return 'sqlite:///%s' % path + elif driver in ['mysql', 'postgres']: + user = cp.get('snapshots', 'user') + password = cp.get('snapshots', 'password') + host = cp.get('snapshots', 'host') + db = cp.get('snapshots', 'database') + return '%s://%s:%s@%s/%s' % (driver, user, password, host, db) + else: + raise Exception("unsupported db driver %s" % driver) + + +def setup_session(cfile, debug=False): + engine = sqlalchemy.create_engine(db_from_config(cfile), + echo=debug) + Session = sqlalchemy.orm.sessionmaker() + Session.configure(bind=engine) + return Session() diff --git a/src/lib/Bcfg2/Server/Snapshots/model.py b/src/lib/Bcfg2/Server/Snapshots/model.py new file mode 100644 index 000000000..5d7973c16 --- /dev/null +++ b/src/lib/Bcfg2/Server/Snapshots/model.py @@ -0,0 +1,329 @@ +import sys +from sqlalchemy import Table, Column, Integer, Unicode, ForeignKey, Boolean, \ + DateTime, UnicodeText, desc +import datetime +import sqlalchemy.exceptions +from sqlalchemy.orm import relation, backref +from sqlalchemy.ext.declarative import declarative_base + + +# py3k compatibility +def u_str(string): + if sys.hexversion >= 0x03000000: + return string + else: + return unicode(string) + + +class Uniquer(object): + force_rt = True + + @classmethod + def by_value(cls, session, **kwargs): + if cls.force_rt: + try: + return session.query(cls).filter_by(**kwargs).one() + except sqlalchemy.exceptions.InvalidRequestError: + return cls(**kwargs) + else: + return cls(**kwargs) + + @classmethod + def from_record(cls, session, data): + return cls.by_value(session, **data) + +Base = declarative_base() + + +class Administrator(Uniquer, Base): + __tablename__ = 'administrator' + id = Column(Integer, primary_key=True) + name = Column(Unicode(20), unique=True) + email = Column(Unicode(64)) + +admin_client = Table('admin_client', Base.metadata, + Column('admin_id', + Integer, + ForeignKey('administrator.id')), + Column('client_id', + Integer, + ForeignKey('client.id'))) + +admin_group = Table('admin_group', Base.metadata, + Column('admin_id', + Integer, + ForeignKey('administrator.id')), + Column('group_id', + Integer, + ForeignKey('group.id'))) + + +class Client(Uniquer, Base): + __tablename__ = 'client' + id = Column(Integer, primary_key=True) + name = Column(Unicode(64), unique=True) + admins = relation("Administrator", secondary=admin_client, + backref='clients') + active = Column(Boolean, default=True) + online = Column(Boolean, default=True) + online_ts = Column(DateTime) + + +class Group(Uniquer, Base): + __tablename__ = 'group' + id = Column(Integer, primary_key=True) + name = Column(Unicode(32), unique=True) + admins = relation("Administrator", secondary=admin_group, + backref='groups') + + +class ConnectorKeyVal(Uniquer, Base): + __tablename__ = 'connkeyval' + id = Column(Integer, primary_key=True) + connector = Column(Unicode(16)) + key = Column(Unicode(32)) + value = Column(UnicodeText) + +meta_group = Table('meta_group', Base.metadata, + Column('metadata_id', + Integer, + ForeignKey('metadata.id')), + Column('group_id', + Integer, + ForeignKey('group.id'))) + +meta_conn = Table('meta_conn', Base.metadata, + Column('metadata_id', + Integer, + ForeignKey('metadata.id')), + Column('connkeyval_id', + Integer, + ForeignKey('connkeyval.id'))) + + +class Metadata(Base): + __tablename__ = 'metadata' + id = Column(Integer, primary_key=True) + client_id = Column(Integer, ForeignKey('client.id')) + client = relation(Client) + groups = relation("Group", secondary=meta_group) + keyvals = relation(ConnectorKeyVal, secondary=meta_conn) + timestamp = Column(DateTime) + + @classmethod + def from_metadata(cls, mysession, mymetadata): + client = Client.by_value(mysession, name=u_str(mymetadata.hostname)) + m = cls(client=client) + for group in mymetadata.groups: + m.groups.append(Group.by_value(mysession, name=u_str(group))) + for connector in mymetadata.connectors: + data = getattr(mymetadata, connector) + if not isinstance(data, dict): + continue + for key, value in list(data.items()): + if not isinstance(value, str): + continue + m.keyvals.append(ConnectorKeyVal.by_value(mysession, + connector=u_str(connector), + key=u_str(key), + value=u_str(value))) + return m + + +class Package(Base, Uniquer): + __tablename__ = 'package' + id = Column(Integer, primary_key=True) + name = Column(Unicode(24)) + type = Column(Unicode(16)) + version = Column(Unicode(16)) + verification_status = Column(Boolean) + + +class CorrespondenceType(object): + mtype = Package + + @classmethod + def from_record(cls, mysession, record): + (mod, corr, name, s_dict, e_dict) = record + if not s_dict: + start = None + else: + start = cls.mtype.by_value(mysession, name=name, **s_dict) + if s_dict != e_dict: + end = cls.mtype.by_value(mysession, name=name, **e_dict) + else: + end = start + return cls(start=start, end=end, modified=mod, correct=corr) + + +class PackageCorrespondence(Base, CorrespondenceType): + mtype = Package + __tablename__ = 'package_pair' + id = Column(Integer, primary_key=True) + start_id = Column(Integer, ForeignKey('package.id')) + start = relation(Package, primaryjoin=start_id == Package.id) + end_id = Column(Integer, ForeignKey('package.id'), nullable=True) + end = relation(Package, primaryjoin=end_id == Package.id) + modified = Column(Boolean) + correct = Column(Boolean) + +package_snap = Table('package_snap', Base.metadata, + Column('ppair_id', + Integer, + ForeignKey('package_pair.id')), + Column('snapshot_id', + Integer, + ForeignKey('snapshot.id'))) + + +class Service(Base, Uniquer): + __tablename__ = 'service' + id = Column(Integer, primary_key=True) + name = Column(Unicode(16)) + type = Column(Unicode(12)) + status = Column(Boolean) + + +class ServiceCorrespondence(Base, CorrespondenceType): + mtype = Service + __tablename__ = 'service_pair' + id = Column(Integer, primary_key=True) + start_id = Column(Integer, ForeignKey('service.id')) + start = relation(Service, primaryjoin=start_id == Service.id) + end_id = Column(Integer, ForeignKey('service.id'), nullable=True) + end = relation(Service, primaryjoin=end_id == Service.id) + modified = Column(Boolean) + correct = Column(Boolean) + +service_snap = Table('service_snap', Base.metadata, + Column('spair_id', + Integer, + ForeignKey('service_pair.id')), + Column('snapshot_id', + Integer, + ForeignKey('snapshot.id'))) + + +class File(Base, Uniquer): + __tablename__ = 'file' + id = Column(Integer, primary_key=True) + name = Column(UnicodeText) + type = Column(Unicode(12)) + owner = Column(Unicode(12)) + group = Column(Unicode(16)) + perms = Column(Integer) + contents = Column(UnicodeText) + + +class FileCorrespondence(Base, CorrespondenceType): + mtype = File + __tablename__ = 'file_pair' + id = Column(Integer, primary_key=True) + start_id = Column(Integer, ForeignKey('file.id')) + start = relation(File, primaryjoin=start_id == File.id) + end_id = Column(Integer, ForeignKey('file.id'), nullable=True) + end = relation(File, primaryjoin=end_id == File.id) + modified = Column(Boolean) + correct = Column(Boolean) + +file_snap = Table('file_snap', Base.metadata, + Column('fpair_id', + Integer, + ForeignKey('file_pair.id')), + Column('snapshot_id', + Integer, + ForeignKey('snapshot.id'))) + +extra_pkg_snap = Table('extra_pkg_snap', Base.metadata, + Column('package_id', + Integer, + ForeignKey('package.id')), + Column('snapshot_id', + Integer, + ForeignKey('snapshot.id'))) + +extra_file_snap = Table('extra_file_snap', Base.metadata, + Column('file_id', + Integer, + ForeignKey('file.id')), + Column('snapshot_id', + Integer, + ForeignKey('snapshot.id'))) + +extra_service_snap = Table('extra_service_snap', Base.metadata, + Column('service_id', + Integer, + ForeignKey('service.id')), + Column('snapshot_id', + Integer, + ForeignKey('snapshot.id'))) + + +class Action(Base): + __tablename__ = 'action' + id = Column(Integer, primary_key=True) + command = Column(UnicodeText) + return_code = Column(Integer) + output = Column(UnicodeText) + +action_snap = Table('action_snap', Base.metadata, + Column('action_id', Integer, ForeignKey('action.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + + +class Snapshot(Base): + __tablename__ = 'snapshot' + id = Column(Integer, primary_key=True) + correct = Column(Boolean) + revision = Column(Unicode(36)) + metadata_id = Column(Integer, ForeignKey('metadata.id')) + client_metadata = relation(Metadata, primaryjoin=metadata_id == Metadata.id) + timestamp = Column(DateTime, default=datetime.datetime.now) + client_id = Column(Integer, ForeignKey('client.id')) + client = relation(Client, backref=backref('snapshots')) + packages = relation(PackageCorrespondence, secondary=package_snap) + services = relation(ServiceCorrespondence, secondary=service_snap) + files = relation(FileCorrespondence, secondary=file_snap) + actions = relation(Action, secondary=action_snap) + extra_packages = relation(Package, secondary=extra_pkg_snap) + extra_services = relation(Service, secondary=extra_service_snap) + extra_files = relation(File, secondary=extra_file_snap) + + c_dispatch = dict([('Package', ('packages', PackageCorrespondence)), + ('Service', ('services', ServiceCorrespondence)), + ('Path', ('files', FileCorrespondence))]) + e_dispatch = dict([('Package', ('extra_packages', Package)), + ('Service', ('extra_services', Service)), + ('Path', ('extra_files', File))]) + + @classmethod + def from_data(cls, session, correct, revision, metadata, entries, extra): + dbm = Metadata.from_metadata(session, metadata) + snap = cls(correct=correct, client_metadata=dbm, revision=revision, + timestamp=datetime.datetime.now(), client=dbm.client) + for (dispatch, data) in [(cls.c_dispatch, entries), + (cls.e_dispatch, extra)]: + for key in dispatch: + dest, ecls = dispatch[key] + for edata in list(data[key].values()): + getattr(snap, dest).append(ecls.from_record(session, edata)) + return snap + + @classmethod + def by_client(cls, session, clientname): + return session.query(cls).join(cls.client_metadata, + Metadata.client).filter(Client.name == clientname) + + @classmethod + def get_current(cls, session, clientname): + return session.query(Snapshot).join(Snapshot.client_metadata, + Metadata.client).filter(Client.name == clientname).order_by(desc(Snapshot.timestamp)).first() + + @classmethod + def get_by_date(cls, session, clientname, timestamp): + return session.query(Snapshot)\ + .join(Snapshot.client_metadata, Metadata.client)\ + .filter(Snapshot.timestamp < timestamp)\ + .filter(Client.name == clientname)\ + .order_by(desc(Snapshot.timestamp))\ + .first() diff --git a/src/lib/Bcfg2/Server/__init__.py b/src/lib/Bcfg2/Server/__init__.py new file mode 100644 index 000000000..96777b0bf --- /dev/null +++ b/src/lib/Bcfg2/Server/__init__.py @@ -0,0 +1,4 @@ +"""This is the set of modules for Bcfg2.Server.""" + +__all__ = ["Admin", "Core", "FileMonitor", "Plugin", "Plugins", + "Hostbase", "Reports", "Snapshots"] diff --git a/src/lib/Bcfg2/Statistics.py b/src/lib/Bcfg2/Statistics.py new file mode 100644 index 000000000..a0cb8f39b --- /dev/null +++ b/src/lib/Bcfg2/Statistics.py @@ -0,0 +1,32 @@ + +class Statistic(object): + def __init__(self, name, initial_value): + self.name = name + self.min = float(initial_value) + self.max = float(initial_value) + self.ave = float(initial_value) + self.count = 1 + + def add_value(self, value): + if value < self.min: + self.min = value + if value > self.max: + self.max = value + self.count += 1 + self.ave = (((self.ave * (self.count - 1)) + value) / self.count) + + def get_value(self): + return (self.name, (self.min, self.max, self.ave, self.count)) + +class Statistics(object): + def __init__(self): + self.data = dict() + + def add_value(self, name, value): + if name not in self.data: + self.data[name] = Statistic(name, value) + else: + self.data[name].add_value(value) + + def display(self): + return dict([value.get_value() for value in list(self.data.values())]) diff --git a/src/lib/Bcfg2/__init__.py b/src/lib/Bcfg2/__init__.py new file mode 100644 index 000000000..357f66f6d --- /dev/null +++ b/src/lib/Bcfg2/__init__.py @@ -0,0 +1,3 @@ +"""Base modules definition.""" + +__all__ = ['Server', 'Client', 'Component', 'Logger', 'Options', 'Proxy', 'Statistics'] |