diff options
Diffstat (limited to 'build/lib/Bcfg2/Server')
109 files changed, 13765 insertions, 0 deletions
diff --git a/build/lib/Bcfg2/Server/Admin/Backup.py b/build/lib/Bcfg2/Server/Admin/Backup.py new file mode 100644 index 000000000..d6458f97d --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Backup.py @@ -0,0 +1,33 @@ +import glob +import os +import sys +import time +import tarfile +import Bcfg2.Server.Admin +import Bcfg2.Options + +class Backup(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Make a backup of the Bcfg2 repository." + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin backup") + #"\n\nbcfg2-admin backup restore") + __usage__ = ("bcfg2-admin backup") + + def __init__(self, configfile): + Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile, + self.__usage__) + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + # Get Bcfg2 repo directory + opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY} + setup = Bcfg2.Options.OptionParser(opts) + setup.parse(sys.argv[1:]) + self.datastore = setup['repo'] + timestamp = time.strftime('%Y%m%d%H%M%S') + format = 'gz' + mode = 'w:' + format + filename = timestamp + '.tar' + '.' + format + out = tarfile.open(self.datastore + '/' + filename, mode=mode) + out.add(self.datastore, os.path.basename(self.datastore)) + out.close() + print "Archive %s was stored under %s" % (filename, self.datastore) diff --git a/build/lib/Bcfg2/Server/Admin/Bundle.py b/build/lib/Bcfg2/Server/Admin/Bundle.py new file mode 100644 index 000000000..41cd5727e --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Bundle.py @@ -0,0 +1,100 @@ +import lxml.etree +import glob +import sys +import re +import Bcfg2.Server.Admin +import Bcfg2.Options +from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError + +class Bundle(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Create or delete bundle entries" + __longhelp__ = (__shorthelp__ + #"\n\nbcfg2-admin bundle add <bundle> " + #"\n\nbcfg2-admin bundle del <bundle>" + "\n\nbcfg2-admin bundle list-xml" + "\n\nbcfg2-admin bundle list-genshi" + "\n\nbcfg2-admin bundle show") + __usage__ = ("bcfg2-admin bundle [options] [add|del] [group]") + + def __init__(self, configfile): + Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile, + self.__usage__) + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + reg='((?:[a-z][a-z\\.\\d\\-]+)\\.(?:[a-z][a-z\\-]+))(?![\\w\\.])' + + #Get all bundles out of the Bundle/ directory + opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY} + setup = Bcfg2.Options.OptionParser(opts) + setup.parse(sys.argv[1:]) + repo = setup['repo'] + xml_list = glob.glob("%s/Bundler/*.xml" % repo) + genshi_list = glob.glob("%s/Bundler/*.genshi" % repo) + + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin bundle help for usage.") +# if args[0] == 'add': +# try: +# self.metadata.add_bundle(args[1]) +# except MetadataConsistencyError: +# print "Error in adding bundle." +# raise SystemExit(1) +# elif args[0] in ['delete', 'remove', 'del', 'rm']: +# try: +# self.metadata.remove_bundle(args[1]) +# except MetadataConsistencyError: +# print "Error in deleting bundle." +# raise SystemExit(1) + #Lists all available xml bundles + elif args[0] in ['list-xml', 'ls-xml']: + bundle_name = [] + for bundle_path in xml_list: + rg = re.compile(reg,re.IGNORECASE|re.DOTALL) + bundle_name.append(rg.search(bundle_path).group(1)) + for bundle in bundle_name: + print bundle.split('.')[0] + #Lists all available genshi bundles + elif args[0] in ['list-genshi', 'ls-gen']: + bundle_name = [] + for bundle_path in genshi_list: + rg = re.compile(reg,re.IGNORECASE|re.DOTALL) + bundle_name.append(rg.search(bundle_path).group(1)) + for bundle in bundle_name: + print bundle.split('.')[0] + #Shows a list of all available bundles and prints bundle + #details after the user choose one bundle. + #FIXME: Add support for detailed output of genshi bundles + elif args[0] in ['show']: + bundle_name = [] + bundle_list = xml_list + genshi_list + for bundle_path in bundle_list: + rg = re.compile(reg,re.IGNORECASE|re.DOTALL) + bundle_name.append(rg.search(bundle_path).group(1)) + text = "Available bundles (Number of bundles: %s)" % \ + (len(bundle_list)) + print text + print "%s" % (len(text) * "-") + for i in range(len(bundle_list)): + print "[%i]\t%s" % (i, bundle_name[i]) + print "Enter the line number of a bundle for details:", + lineno = raw_input() + if int(lineno) >= int(len(bundle_list)): + print "No line with this number." + else: + if '%s/Bundler/%s' % \ + (repo, bundle_name[int(lineno)]) in genshi_list: + print "Detailed output for *.genshi bundle is not supported." + else: + print 'Details for the "%s" bundle:' % \ + (bundle_name[int(lineno)].split('.')[0]) + tree = lxml.etree.parse(bundle_list[int(lineno)]) + #Prints bundle content + #print lxml.etree.tostring(tree) + names = ['Action', 'Package', 'Path', 'Service'] + for name in names: + for node in tree.findall("//" + name): + print "%s:\t%s" % (name, node.attrib["name"]) + else: + print "No command specified" + raise SystemExit(1) diff --git a/build/lib/Bcfg2/Server/Admin/Client.py b/build/lib/Bcfg2/Server/Admin/Client.py new file mode 100644 index 000000000..0eee22ae4 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Client.py @@ -0,0 +1,64 @@ +import lxml.etree +import Bcfg2.Server.Admin +from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError + +class Client(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Create, delete, or modify client entries" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin client add <client> " + "attr1=val1 attr2=val2\n" + "\n\nbcfg2-admin client update <client> " + "attr1=val1 attr2=val2\n" + "\n\nbcfg2-admin client list\n" + "bcfg2-admin client del <client>") + __usage__ = ("bcfg2-admin client [options] [add|del|update|list] [attr=val]") + + def __init__(self, configfile): + Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile, + self.__usage__) + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin client help for usage.") + if args[0] == 'add': + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'uuid', 'password', + 'location', 'secure', 'address']: + print "Attribute %s unknown" % attr + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.add_client(args[1], attr_d) + except MetadataConsistencyError: + print "Error in adding client" + raise SystemExit(1) + elif args[0] in ['update', 'up']: + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'uuid', 'password', + 'location', 'secure', 'address']: + print "Attribute %s unknown" % attr + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.update_client(args[1], attr_d) + except MetadataConsistencyError: + print "Error in updating client" + raise SystemExit(1) + elif args[0] in ['delete', 'remove', 'del', 'rm']: + try: + self.metadata.remove_client(args[1]) + except MetadataConsistencyError: + print "Error in deleting client" + raise SystemExit(1) + elif args[0] in ['list', 'ls']: + tree = lxml.etree.parse(self.metadata.data + "/clients.xml") + for node in tree.findall("//Client"): + print node.attrib["name"] + else: + print "No command specified" + raise SystemExit(1) diff --git a/build/lib/Bcfg2/Server/Admin/Compare.py b/build/lib/Bcfg2/Server/Admin/Compare.py new file mode 100644 index 000000000..f97233b0e --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Compare.py @@ -0,0 +1,137 @@ +import lxml.etree, os +import Bcfg2.Server.Admin + +class Compare(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("Determine differences between files or " + "directories of client specification instances") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin compare <file1> <file2>" + "\nbcfg2-admin compare -r <dir1> <dir2>") + __usage__ = ("bcfg2-admin compare <old> <new>\n\n" + " -r\trecursive") + + def __init__(self, configfile): + Bcfg2.Server.Admin.Mode.__init__(self, configfile) + self.important = {'Package':['name', 'version'], + 'Service':['name', 'status'], + 'Directory':['name', 'owner', 'group', 'perms'], + 'SymLink':['name', 'to'], + 'ConfigFile':['name', 'owner', 'group', 'perms'], + 'Permissions':['name', 'perms'], + 'PostInstall':['name']} + + def compareStructures(self, new, old): + for child in new.getchildren(): + equiv = old.xpath('%s[@name="%s"]' % + (child.tag, child.get('name'))) + if child.tag in self.important: + print "tag type %s not handled" % (child.tag) + continue + if len(equiv) == 0: + print ("didn't find matching %s %s" % + (child.tag, child.get('name'))) + continue + elif len(equiv) >= 1: + if child.tag == 'ConfigFile': + if child.text != equiv[0].text: + print " %s %s contents differ" \ + % (child.tag, child.get('name')) + continue + noattrmatch = [field for field in self.important[child.tag] if \ + child.get(field) != equiv[0].get(field)] + if not noattrmatch: + new.remove(child) + old.remove(equiv[0]) + else: + print " %s %s attributes %s do not match" % \ + (child.tag, child.get('name'), noattrmatch) + if len(old.getchildren()) == 0 and len(new.getchildren()) == 0: + return True + if new.tag == 'Independent': + name = 'Base' + else: + name = new.get('name') + both = [] + oldl = ["%s %s" % (entry.tag, entry.get('name')) for entry in old] + newl = ["%s %s" % (entry.tag, entry.get('name')) for entry in new] + for entry in newl: + if entry in oldl: + both.append(entry) + newl.remove(entry) + oldl.remove(entry) + for entry in both: + print " %s differs (in bundle %s)" % (entry, name) + for entry in oldl: + print " %s only in old configuration (in bundle %s)" % (entry, name) + for entry in newl: + print " %s only in new configuration (in bundle %s)" % (entry, name) + return False + + def compareSpecifications(self, path1, path2): + try: + new = lxml.etree.parse(path1).getroot() + except IOError: + print "Failed to read %s" % (path1) + raise SystemExit(1) + + try: + old = lxml.etree.parse(path2).getroot() + except IOError: + print "Failed to read %s" % (path2) + raise SystemExit(1) + + for src in [new, old]: + for bundle in src.findall('./Bundle'): + if bundle.get('name')[-4:] == '.xml': + bundle.set('name', bundle.get('name')[:-4]) + + rcs = [] + for bundle in new.findall('./Bundle'): + equiv = old.xpath('Bundle[@name="%s"]' % (bundle.get('name'))) + if len(equiv) == 0: + print "couldnt find matching bundle for %s" % bundle.get('name') + continue + if len(equiv) == 1: + if self.compareStructures(bundle, equiv[0]): + new.remove(bundle) + old.remove(equiv[0]) + rcs.append(True) + else: + rcs.append(False) + else: + print "Unmatched bundle %s" % (bundle.get('name')) + rcs.append(False) + i1 = new.find('./Independent') + i2 = old.find('./Independent') + if self.compareStructures(i1, i2): + new.remove(i1) + old.remove(i2) + else: + rcs.append(False) + return False not in rcs + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin compare help for usage.") + if '-r' in args: + args = list(args) + args.remove('-r') + (oldd, newd) = args + (old, new) = [os.listdir(spot) for spot in args] + for item in old: + print "Entry:", item + state = self.__call__([oldd + '/' + item, newd + '/' + item]) + new.remove(item) + if state: + print "Entry:", item, "good" + else: + print "Entry:", item, "bad" + if new: + print "new has extra entries", new + return + try: + (old, new) = args + except IndexError: + print self.__call__.__doc__ + raise SystemExit(1) diff --git a/build/lib/Bcfg2/Server/Admin/Examples.py b/build/lib/Bcfg2/Server/Admin/Examples.py new file mode 100644 index 000000000..3335c5e10 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Examples.py @@ -0,0 +1,71 @@ +import dulwich +import time +import tarfile +from subprocess import Popen +import Bcfg2.Server.Admin +from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError + +class Examples(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Pulls in the data from the Bcfg2 sample repository" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin examples pull\n" + "\n\nbcfg2-admin examples update\n" + "bcfg2-admin examples backup") + __usage__ = ("bcfg2-admin examples [options] [add|del|update|list] [attr=val]") + + def __init__(self, configfile): + Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile, + self.__usage__) + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + + + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin examples help for usage.") + + if args[0] == 'pull': + try: + # FIXME: Repo URL is hardcoded for now + Popen(['git', 'clone', 'https://github.com/solj/bcfg2-repo.git', datastore]) + except MetadataConsistencyError: + print "Error in pulling examples." + raise SystemExit(1) + +#fatal: destination path 'bcfg2-test' already exists and is not an empty directory. + + elif args[0] == 'backup': + try: + self.metadata.add_group(args[1], attr_d) + except MetadataConsistencyError: + print "Error in adding group" + raise SystemExit(1) + + + elif args[0] == 'backup': + try: + self.metadata.add_group(args[1], attr_d) + except MetadataConsistencyError: + print "Error in adding group" + raise SystemExit(1) + + else: + print "No command specified" + raise SystemExit(1) + + def repobackup(): + """Make a backup of the existing files in the Bcfg2 repo directory.""" + if os.path.isdir(datastore): + print 'Backup in progress...' + target = time.strftime('%Y%m%d%H%M%S') + + + out = tarfile.open(filename, w.gz) + else: + logger.error("%s doesn't exist." % datastore) + #raise Bcfg2.Server.Plugin.PluginInitError diff --git a/build/lib/Bcfg2/Server/Admin/Group.py b/build/lib/Bcfg2/Server/Admin/Group.py new file mode 100644 index 000000000..6a1c13775 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Group.py @@ -0,0 +1,66 @@ +import lxml.etree +import Bcfg2.Server.Admin +from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError + +class Group(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Create, delete, or modify group entries" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin group add <group> " + "attr1=val1 attr2=val2\n" + "\n\nbcfg2-admin group update <group> " + "attr1=val1 attr2=val2\n" + "\n\nbcfg2-admin group list\n" + "bcfg2-admin group del <group>") + __usage__ = ("bcfg2-admin group [options] [add|del|update|list] [attr=val]") + + def __init__(self, configfile): + Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile, + self.__usage__) + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin group help for usage.") + if args[0] == 'add': + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'public', 'default', + 'name', 'auth', 'toolset', 'category', + 'comment']: + print "Attribute %s unknown" % attr + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.add_group(args[1], attr_d) + except MetadataConsistencyError: + print "Error in adding group" + raise SystemExit(1) + elif args[0] in ['update', 'up']: + attr_d = {} + for i in args[2:]: + attr, val = i.split('=', 1) + if attr not in ['profile', 'public', 'default', + 'name', 'auth', 'toolset', 'category', + 'comment']: + print "Attribute %s unknown" % attr + raise SystemExit(1) + attr_d[attr] = val + try: + self.metadata.update_group(args[1], attr_d) + except MetadataConsistencyError: + print "Error in updating group" + raise SystemExit(1) + elif args[0] in ['delete', 'remove', 'del', 'rm']: + try: + self.metadata.remove_group(args[1]) + except MetadataConsistencyError: + print "Error in deleting group" + raise SystemExit(1) + elif args[0] in ['list', 'ls']: + tree = lxml.etree.parse(self.metadata.data + "/groups.xml") + for node in tree.findall("//Group"): + print node.attrib["name"] + else: + print "No command specified" + raise SystemExit(1) diff --git a/build/lib/Bcfg2/Server/Admin/Init.py b/build/lib/Bcfg2/Server/Admin/Init.py new file mode 100644 index 000000000..c6d1f9e3d --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Init.py @@ -0,0 +1,280 @@ +import getpass +import os +import random +import socket +import string +import subprocess +import Bcfg2.Server.Admin +import Bcfg2.Server.Plugin +import Bcfg2.Options + +# default config file +config = ''' +[server] +repository = %s +plugins = %s + +[statistics] +sendmailpath = %s +database_engine = sqlite3 +# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'. +database_name = +# Or path to database file if using sqlite3. +#<repository>/etc/brpt.sqlite is default path if left empty +database_user = +# Not used with sqlite3. +database_password = +# Not used with sqlite3. +database_host = +# Not used with sqlite3. +database_port = +# Set to empty string for default. Not used with sqlite3. +web_debug = True + +[communication] +protocol = %s +password = %s +certificate = %s/%s +key = %s/%s +ca = %s/%s + +[components] +bcfg2 = %s +''' + +# Default groups +groups = '''<Groups version='3.0'> + <Group profile='true' public='true' default='true' name='basic'> + <Group name='%s'/> + </Group> + <Group name='ubuntu'/> + <Group name='debian'/> + <Group name='freebsd'/> + <Group name='gentoo'/> + <Group name='redhat'/> + <Group name='suse'/> + <Group name='mandrake'/> + <Group name='solaris'/> +</Groups> +''' + +# Default contents of clients.xml +clients = '''<Clients version="3.0"> + <Client profile="basic" pingable="Y" pingtime="0" name="%s"/> +</Clients> +''' + +# Mapping of operating system names to groups +os_list = [ + ('Red Hat/Fedora/RHEL/RHAS/Centos', 'redhat'), + ('SUSE/SLES', 'suse'), + ('Mandrake', 'mandrake'), + ('Debian', 'debian'), + ('Ubuntu', 'ubuntu'), + ('Gentoo', 'gentoo'), + ('FreeBSD', 'freebsd') + ] + +# Complete list of plugins +plugin_list = ['Account', 'Base', 'Bundler', 'Cfg', + 'Decisions', 'Deps', 'Metadata', 'Packages', + 'Pkgmgr', 'Probes', 'Properties', 'Rules', + 'Snapshots', 'SSHbase', 'Statistics', 'Svcmgr', + 'TCheetah', 'TGenshi'] + +# Default list of plugins to use +default_plugins = ['SSHbase', 'Cfg', 'Pkgmgr', 'Rules', + 'Metadata', 'Base', 'Bundler'] + +def gen_password(length): + """Generates a random alphanumeric password with length characters.""" + chars = string.letters + string.digits + newpasswd = '' + for i in range(length): + newpasswd = newpasswd + random.choice(chars) + return newpasswd + +def create_key(hostname, keypath, certpath): + """Creates a bcfg2.key at the directory specifed by keypath.""" + kcstr = "openssl req -batch -x509 -nodes -subj '/C=US/ST=Illinois/L=Argonne/CN=%s' -days 1000 -newkey rsa:2048 -keyout %s -noout" % (hostname, keypath) + subprocess.call((kcstr), shell=True) + ccstr = "openssl req -batch -new -subj '/C=US/ST=Illinois/L=Argonne/CN=%s' -key %s | openssl x509 -req -days 1000 -signkey %s -out %s" % (hostname, keypath, keypath, certpath) + subprocess.call((ccstr), shell=True) + os.chmod(keypath, 0600) + +def create_conf(confpath, confdata): + # don't overwrite existing bcfg2.conf file + if os.path.exists(confpath): + result = raw_input("\nWarning: %s already exists. " + "Overwrite? [y/N]: " % confpath) + if result not in ['Y', 'y']: + print("Leaving %s unchanged" % confpath) + return + try: + open(confpath, "w").write(confdata) + os.chmod(confpath, 0600) + except Exception, e: + print("Error %s occured while trying to write configuration " + "file to '%s'\n" % + (e, confpath)) + raise SystemExit(1) + + +class Init(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("Interactively initialize a new repository.") + __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin init" + __usage__ = "bcfg2-admin init" + options = { + 'configfile': Bcfg2.Options.CFILE, + 'plugins' : Bcfg2.Options.SERVER_PLUGINS, + 'proto' : Bcfg2.Options.SERVER_PROTOCOL, + 'repo' : Bcfg2.Options.SERVER_REPOSITORY, + 'sendmail' : Bcfg2.Options.SENDMAIL_PATH, + } + repopath = "" + response = "" + def __init__(self, configfile): + Bcfg2.Server.Admin.Mode.__init__(self, configfile) + + def _set_defaults(self): + """Set default parameters.""" + self.configfile = self.opts['configfile'] + self.repopath = self.opts['repo'] + self.password = gen_password(8) + self.server_uri = "https://%s:6789" % socket.getfqdn() + self.plugins = default_plugins + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + + # Parse options + self.opts = Bcfg2.Options.OptionParser(self.options) + self.opts.parse(args) + self._set_defaults() + + # Prompt the user for input + self._prompt_config() + self._prompt_repopath() + self._prompt_password() + self._prompt_hostname() + self._prompt_server() + self._prompt_groups() + + # Initialize the repository + self.init_repo() + + def _prompt_hostname(self): + """Ask for the server hostname.""" + data = raw_input("What is the server's hostname [%s]: " % socket.getfqdn()) + if data != '': + self.shostname = data + else: + self.shostname = socket.getfqdn() + + def _prompt_config(self): + """Ask for the configuration file path.""" + newconfig = raw_input("Store bcfg2 configuration in [%s]: " % + self.configfile) + if newconfig != '': + self.configfile = newconfig + + def _prompt_repopath(self): + """Ask for the repository path.""" + while True: + newrepo = raw_input("Location of bcfg2 repository [%s]: " % + self.repopath) + if newrepo != '': + self.repopath = newrepo + if os.path.isdir(self.repopath): + response = raw_input("Directory %s exists. Overwrite? [y/N]:"\ + % self.repopath) + if response.lower().strip() == 'y': + break + else: + break + + def _prompt_password(self): + """Ask for a password or generate one if none is provided.""" + newpassword = getpass.getpass( + "Input password used for communication verification " + "(without echoing; leave blank for a random): ").strip() + if len(newpassword) != 0: + self.password = newpassword + + def _prompt_server(self): + """Ask for the server name.""" + newserver = raw_input("Input the server location [%s]: " % self.server_uri) + if newserver != '': + self.server_uri = newserver + + def _prompt_groups(self): + """Create the groups.xml file.""" + prompt = '''Input base Operating System for clients:\n''' + for entry in os_list: + prompt += "%d: %s\n" % (os_list.index(entry) + 1, entry[0]) + prompt += ': ' + while True: + try: + self.os_sel = os_list[int(raw_input(prompt))-1][1] + break + except ValueError: + continue + + def _prompt_plugins(self): + default = raw_input("Use default plugins? (%s) [Y/n]: " % ''.join(default_plugins)).lower() + if default != 'y' or default != '': + while True: + plugins_are_valid = True + plug_str = raw_input("Specify plugins: ") + plugins = plug_str.split(',') + for plugin in plugins: + plugin = plugin.strip() + if not plugin in plugin_list: + plugins_are_valid = False + print "ERROR: plugin %s not recognized" % plugin + if plugins_are_valid: + break + + def _init_plugins(self): + """Initialize each plugin-specific portion of the repository.""" + for plugin in self.plugins: + if plugin == 'Metadata': + Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(self.repopath, groups, self.os_sel, clients) + else: + try: + module = __import__("Bcfg2.Server.Plugins.%s" % plugin, '', + '', ["Bcfg2.Server.Plugins"]) + cls = getattr(module, plugin) + cls.init_repo(self.repopath) + except Exception, e: + print 'Plugin setup for %s failed: %s\n Check that dependencies are installed?' % (plugin, e) + + def init_repo(self): + """Setup a new repo and create the content of the configuration file.""" + keypath = os.path.dirname(os.path.abspath(self.configfile)) + confdata = config % ( + self.repopath, + ','.join(self.opts['plugins']), + self.opts['sendmail'], + self.opts['proto'], + self.password, + keypath, 'bcfg2.crt', + keypath, 'bcfg2.key', + keypath, 'bcfg2.crt', + self.server_uri + ) + + # Create the configuration file and SSL key + create_conf(self.configfile, confdata) + kpath = keypath + '/bcfg2.key' + cpath = keypath + '/bcfg2.crt' + create_key(self.shostname, kpath, cpath) + + # Create the repository + path = "%s/%s" % (self.repopath, 'etc') + try: + os.makedirs(path) + self._init_plugins() + print "Repository created successfuly in %s" % (self.repopath) + except OSError: + print("Failed to create %s." % path) diff --git a/build/lib/Bcfg2/Server/Admin/Minestruct.py b/build/lib/Bcfg2/Server/Admin/Minestruct.py new file mode 100644 index 000000000..02edf2b75 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Minestruct.py @@ -0,0 +1,69 @@ +import getopt +import lxml.etree +import sys + +import Bcfg2.Server.Admin + +class Minestruct(Bcfg2.Server.Admin.StructureMode): + """Pull extra entries out of statistics.""" + __shorthelp__ = "Extract extra entry lists from statistics" + __longhelp__ = (__shorthelp__ + + "\n\nbcfg2-admin minestruct [-f filename] " + "[-g groups] client") + __usage__ = ("bcfg2-admin minestruct [options] <client>\n\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-f <filename>", + "build a particular file", + "-g <groups>", + "only build config for groups")) + + def __init__(self, configfile): + Bcfg2.Server.Admin.StructureMode.__init__(self, configfile, + self.__usage__) + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin minestruct help for usage.") + try: + (opts, args) = getopt.getopt(args, 'f:g:h') + except: + self.log.error(self.__shorthelp__) + raise SystemExit(1) + + client = args[0] + output = sys.stdout + groups = [] + + for (opt, optarg) in opts: + if opt == '-f': + try: + output = open(optarg, 'w') + except IOError: + self.log.error("Failed to open file: %s" % (optarg)) + raise SystemExit(1) + elif opt == '-g': + groups = optarg.split(':') + + try: + extra = set() + for source in self.bcore.pull_sources: + for item in source.GetExtra(client): + extra.add(item) + except: + self.log.error("Failed to find extra entry info for client %s" % + client) + raise SystemExit(1) + root = lxml.etree.Element("Base") + self.log.info("Found %d extra entries" % (len(extra))) + add_point = root + for g in groups: + add_point = lxml.etree.SubElement(add_point, "Group", name=g) + for tag, name in extra: + self.log.info("%s: %s" % (tag, name)) + lxml.etree.SubElement(add_point, tag, name=name) + + tree = lxml.etree.ElementTree(root) + tree.write(output, pretty_print=True) diff --git a/build/lib/Bcfg2/Server/Admin/Perf.py b/build/lib/Bcfg2/Server/Admin/Perf.py new file mode 100644 index 000000000..6f1cb8dbb --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Perf.py @@ -0,0 +1,37 @@ +import Bcfg2.Options +import Bcfg2.Proxy +import Bcfg2.Server.Admin + +import sys + +class Perf(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("Query server for performance data") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin perf") + __usage__ = ("bcfg2-admin perf") + + def __init__(self, configfile): + Bcfg2.Server.Admin.Mode.__init__(self, configfile) + + def __call__(self, args): + output = [('Name', 'Min', 'Max', 'Mean', 'Count')] + optinfo = { + 'ca': Bcfg2.Options.CLIENT_CA, + 'certificate': Bcfg2.Options.CLIENT_CERT, + 'key': Bcfg2.Options.SERVER_KEY, + 'password': Bcfg2.Options.SERVER_PASSWORD, + 'server': Bcfg2.Options.SERVER_LOCATION, + 'user': Bcfg2.Options.CLIENT_USER, + } + setup = Bcfg2.Options.OptionParser(optinfo) + setup.parse(sys.argv[2:]) + proxy = Bcfg2.Proxy.ComponentProxy(setup['server'], + setup['user'], + setup['password'], + key = setup['key'], + cert = setup['certificate'], + ca = setup['ca']) + data = proxy.get_statistics() + for key, value in data.iteritems(): + data = tuple(["%.06f" % (item) for item in value[:-1]] + [value[-1]]) + output.append((key, ) + data) + self.print_table(output) diff --git a/build/lib/Bcfg2/Server/Admin/Pull.py b/build/lib/Bcfg2/Server/Admin/Pull.py new file mode 100644 index 000000000..aa732c67f --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Pull.py @@ -0,0 +1,138 @@ +import getopt +import sys +import Bcfg2.Server.Admin + +class Pull(Bcfg2.Server.Admin.MetadataCore): + """Pull mode retrieves entries from clients and + integrates the information into the repository. + """ + __shorthelp__ = ("Integrate configuration information " + "from clients into the server repository") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin pull [-v] [-f][-I] [-s] " + "<client> <entry type> <entry name>") + __usage__ = ("bcfg2-admin pull [options] <client> <entry type> " + "<entry name>\n\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-v", + "be verbose", + "-f", + "force", + "-I", + "interactive", + "-s", + "stdin")) + allowed = ['Metadata', 'BB', "DBStats", "Statistics", "Cfg", "SSHbase"] + + def __init__(self, configfile): + Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile, + self.__usage__) + self.log = False + self.mode = 'interactive' + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + use_stdin = False + try: + opts, gargs = getopt.getopt(args, 'vfIs') + except: + print self.__shorthelp__ + raise SystemExit(1) + for opt in opts: + if opt[0] == '-v': + self.log = True + elif opt[0] == '-f': + self.mode = 'force' + elif opt[0] == '-I': + self.mode == 'interactive' + elif opt[0] == '-s': + use_stdin = True + + if use_stdin: + for line in sys.stdin: + try: + self.PullEntry(*line.split(None, 3)) + except SystemExit: + print " for %s" % line + except: + print "Bad entry: %s" % line.strip() + elif len(gargs) < 3: + print self.__longhelp__ + raise SystemExit(1) + else: + self.PullEntry(gargs[0], gargs[1], gargs[2]) + + def BuildNewEntry(self, client, etype, ename): + """Construct a new full entry for given client/entry from statistics.""" + new_entry = {'type':etype, 'name':ename} + for plugin in self.bcore.pull_sources: + try: + (owner, group, perms, contents) = \ + plugin.GetCurrentEntry(client, etype, ename) + break + except Bcfg2.Server.Plugin.PluginExecutionError: + if plugin == self.bcore.pull_sources[-1]: + print "Pull Source failure; could not fetch current state" + raise SystemExit(1) + + try: + data = {'owner':owner, 'group':group, 'perms':perms, 'text':contents} + except UnboundLocalError: + print("Unable to build entry. " + "Do you have a statistics plugin enabled?") + raise SystemExit(1) + for k, v in data.iteritems(): + if v: + new_entry[k] = v + #print new_entry + return new_entry + + def Choose(self, choices): + """Determine where to put pull data.""" + if self.mode == 'interactive': + for choice in choices: + print "Plugin returned choice:" + if id(choice) == id(choices[0]): + print "(current entry)", + if choice.all: + print " => global entry" + elif choice.group: + print (" => group entry: %s (prio %d)" % + (choice.group, choice.prio)) + else: + print " => host entry: %s" % (choice.hostname) + if raw_input("Use this entry? [yN]: ") in ['y', 'Y']: + return choice + return False + else: + # mode == 'force' + if not choices: + return False + return choices[0] + + def PullEntry(self, client, etype, ename): + """Make currently recorded client state correct for entry.""" + new_entry = self.BuildNewEntry(client, etype, ename) + + meta = self.bcore.build_metadata(client) + # find appropriate plugin in bcore + glist = [gen for gen in self.bcore.generators if + ename in gen.Entries.get(etype, {})] + if len(glist) != 1: + self.errExit("Got wrong numbers of matching generators for entry:" \ + + "%s" % ([g.name for g in glist])) + plugin = glist[0] + if not isinstance(plugin, Bcfg2.Server.Plugin.PullTarget): + self.errExit("Configuration upload not supported by plugin %s" \ + % (plugin.name)) + try: + choices = plugin.AcceptChoices(new_entry, meta) + specific = self.Choose(choices) + if specific: + plugin.AcceptPullData(specific, new_entry, self.log) + except Bcfg2.Server.Plugin.PluginExecutionError: + self.errExit("Configuration upload not supported by plugin %s" \ + % (plugin.name)) + # FIXME svn commit if running under svn diff --git a/build/lib/Bcfg2/Server/Admin/Query.py b/build/lib/Bcfg2/Server/Admin/Query.py new file mode 100644 index 000000000..b5af9bad2 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Query.py @@ -0,0 +1,78 @@ +import logging +import Bcfg2.Logger +import Bcfg2.Server.Admin + +class Query(Bcfg2.Server.Admin.Mode): + __shorthelp__ = "Query clients" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin query [-n] [-c] " + "[-f filename] g=group p=profile") + __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile>\n\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-n", + "query results delimited with newlines", + "-c", + "query results delimited with commas", + "-f filename", + "write query to file")) + + def __init__(self, cfile): + logging.root.setLevel(100) + Bcfg2.Logger.setup_logging(100, to_console=False, to_syslog=False) + Bcfg2.Server.Admin.Mode.__init__(self, cfile) + try: + self.bcore = Bcfg2.Server.Core.Core(self.get_repo_path(), + ['Metadata', 'Probes'], + 'foo', False, 'UTF-8') + except Bcfg2.Server.Core.CoreInitError, msg: + self.errExit("Core load failed because %s" % msg) + self.bcore.fam.handle_events_in_interval(1) + self.meta = self.bcore.metadata + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + clients = self.meta.clients.keys() + filename_arg = False + filename = None + for arg in args: + if filename_arg == True: + filename = arg + filename_arg = False + continue + if arg in ['-n', '-c']: + continue + if arg in ['-f']: + filename_arg = True + continue + try: + k, v = arg.split('=') + except: + print "Unknown argument %s" % arg + continue + if k == 'p': + nc = self.meta.get_client_names_by_profiles(v.split(',')) + elif k == 'g': + nc = self.meta.get_client_names_by_groups(v.split(',')) + # add probed groups (if present) + for conn in self.bcore.connectors: + if isinstance(conn, Bcfg2.Server.Plugins.Probes.Probes): + for c, glist in conn.cgroups.items(): + for g in glist: + if g in v.split(','): + nc.append(c) + else: + print "One of g= or p= must be specified" + raise SystemExit(1) + clients = [c for c in clients if c in nc] + if '-n' in args: + for client in clients: + print client + else: + print ','.join(clients) + if '-f' in args: + f = open(filename, "w") + for client in clients: + f.write(client + "\n") + f.close() + print "Wrote results to %s" % (filename) diff --git a/build/lib/Bcfg2/Server/Admin/Reports.py b/build/lib/Bcfg2/Server/Admin/Reports.py new file mode 100644 index 000000000..a4dd19064 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Reports.py @@ -0,0 +1,357 @@ +'''Admin interface for dynamic reports''' +import Bcfg2.Logger +import Bcfg2.Server.Admin +import ConfigParser +import datetime +import os +import logging +import pickle +import platform +import sys +import traceback +from Bcfg2.Server.Reports.importscript import load_stats +from Bcfg2.Server.Reports.updatefix import update_database +from Bcfg2.Server.Reports.utils import * +from lxml.etree import XML, XMLSyntaxError + +# FIXME: Remove when server python dep is 2.5 or greater +if sys.version_info >= (2, 5): + from hashlib import md5 +else: + from md5 import md5 + +# Load django +import django.core.management + +# FIXME - settings file uses a hardcoded path for /etc/bcfg2.conf +try: + import Bcfg2.Server.Reports.settings +except Exception, e: + sys.stderr.write("Failed to load configuration settings. %s\n" % e) + sys.exit(1) + +project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__) +project_name = os.path.basename(project_directory) +sys.path.append(os.path.join(project_directory, '..')) +project_module = __import__(project_name, '', '', ['']) +sys.path.pop() + +# Set DJANGO_SETTINGS_MODULE appropriately. +os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name +from django.db import connection, transaction + +from Bcfg2.Server.Reports.reports.models import Client, Interaction, Entries, \ + Entries_interactions, Performance, \ + Reason, Ping, TYPE_CHOICES, InternalDatabaseVersion + +def printStats(fn): + """ + Print db stats. + + Decorator for purging. Prints database statistics after a run. + """ + def print_stats(*data): + start_client = Client.objects.count() + start_i = Interaction.objects.count() + start_ei = Entries_interactions.objects.count() + start_perf = Performance.objects.count() + start_ping = Ping.objects.count() + + fn(*data) + + print "Clients removed: %s" % (start_client - Client.objects.count()) + print "Interactions removed: %s" % (start_i - Interaction.objects.count()) + print "Interactions->Entries removed: %s" % \ + (start_ei - Entries_interactions.objects.count()) + print "Metrics removed: %s" % (start_perf - Performance.objects.count()) + print "Ping metrics removed: %s" % (start_ping - Ping.objects.count()) + + return print_stats + +class Reports(Bcfg2.Server.Admin.Mode): + '''Admin interface for dynamic reports''' + __shorthelp__ = "Manage dynamic reports" + __longhelp__ = (__shorthelp__) + __usage__ = ("bcfg2-admin reports [command] [options]\n" + " -v|--verbose Be verbose\n" + " -q|--quiet Print only errors\n" + "\n" + " Commands:\n" + " init Initialize the database\n" + " load_stats Load statistics data\n" + " -s|--stats Path to statistics.xml file\n" + " -c|--clients-file Path to clients.xml file\n" + " -O3 Fast mode. Duplicates data!\n" + " purge Purge records\n" + " --client [n] Client to operate on\n" + " --days [n] Records older then n days\n" + " --expired Expired clients only\n" + " scrub Scrub the database for duplicate reasons and orphaned entries\n" + " update Apply any updates to the reporting database\n" + "\n") + + def __init__(self, cfile): + Bcfg2.Server.Admin.Mode.__init__(self, cfile) + self.log.setLevel(logging.INFO) + self.django_commands = [ 'syncdb', 'sqlall', 'validate' ] + self.__usage__ = self.__usage__ + " Django commands:\n " + \ + "\n ".join(self.django_commands) + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0 or args[0] == '-h': + print(self.__usage__) + raise SystemExit(0) + + verb = 0 + + if '-v' in args or '--verbose' in args: + self.log.setLevel(logging.DEBUG) + verb = 1 + if '-q' in args or '--quiet' in args: + self.log.setLevel(logging.WARNING) + + # FIXME - dry run + + if args[0] in self.django_commands: + self.django_command_proxy(args[0]) + elif args[0] == 'scrub': + self.scrub() + elif args[0] == 'init': + update_database() + elif args[0] == 'update': + update_database() + elif args[0] == 'load_stats': + quick = '-O3' in args + stats_file=None + clients_file=None + i=1 + while i < len(args): + if args[i] == '-s' or args[i] == '--stats': + stats_file = args[i+1] + if stats_file[0] == '-': + self.errExit("Invalid statistics file: %s" % stats_file) + elif args[i] == '-c' or args[i] == '--clients-file': + clients_file = args[i+1] + if clients_file[0] == '-': + self.errExit("Invalid clients file: %s" % clients_file) + i = i + 1 + self.load_stats(stats_file, clients_file, verb, quick) + elif args[0] == 'purge': + expired=False + client=None + maxdate=None + state=None + i=1 + while i < len(args): + if args[i] == '-c' or args[i] == '--client': + if client: + self.errExit("Only one client per run") + client = args[i+1] + print client + i = i + 1 + elif args[i] == '--days': + if maxdate: + self.errExit("Max date specified multiple times") + try: + maxdate = datetime.datetime.now() - datetime.timedelta(days=int(args[i+1])) + except: + self.log.error("Invalid number of days: %s" % args[i+1]) + raise SystemExit, -1 + i = i + 1 + elif args[i] == '--expired': + expired=True + i = i + 1 + if expired: + if state: + self.log.error("--state is not valid with --expired") + raise SystemExit, -1 + self.purge_expired(maxdate) + else: + self.purge(client, maxdate, state) + else: + print "Unknown command: %s" % args[0] + + @transaction.commit_on_success + def scrub(self): + ''' Perform a thorough scrub and cleanup of the database ''' + + # Currently only reasons are a problem + try: + start_count = Reason.objects.count() + except Exception, e: + self.log.error("Failed to load reason objects: %s" % e) + return + dup_reasons = [] + + cmp_reasons = dict() + batch_update = [] + for reason in BatchFetch(Reason.objects): + ''' Loop through each reason and create a key out of the data. \ + This lets us take advantage of a fast hash lookup for \ + comparisons ''' + id = reason.id + reason.id = None + key=md5(pickle.dumps(reason)).hexdigest() + reason.id = id + + if key in cmp_reasons: + self.log.debug("Update interactions from %d to %d" \ + % (reason.id, cmp_reasons[key])) + dup_reasons.append([reason.id]) + batch_update.append([cmp_reasons[key], reason.id]) + else: + cmp_reasons[key] = reason.id + self.log.debug("key %d" % reason.id) + + self.log.debug("Done with updates, deleting dupes") + try: + cursor = connection.cursor() + cursor.executemany('update reports_entries_interactions set reason_id=%s where reason_id=%s', batch_update) + cursor.executemany('delete from reports_reason where id = %s', dup_reasons) + transaction.set_dirty() + except Exception, ex: + self.log.error("Failed to delete reasons: %s" % ex) + raise + + self.log.info("Found %d dupes out of %d" % (len(dup_reasons), start_count)) + + # Cleanup orphans + start_count = Reason.objects.count() + Reason.prune_orphans() + self.log.info("Pruned %d Reason records" % (start_count - Reason.objects.count())) + + start_count = Entries.objects.count() + Entries.prune_orphans() + self.log.info("Pruned %d Entries records" % (start_count - Entries.objects.count())) + + def django_command_proxy(self, command): + '''Call a django command''' + if command == 'sqlall': + django.core.management.call_command(command, 'reports') + else: + django.core.management.call_command(command) + + def load_stats(self, stats_file=None, clientspath=None, verb=0, quick=False): + '''Load statistics data into the database''' + location = '' + + if not stats_file: + try: + stats_file = "%s/etc/statistics.xml" % self.cfp.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + self.errExit("Could not read bcfg2.conf; exiting") + try: + statsdata = XML(open(stats_file).read()) + except (IOError, XMLSyntaxError): + self.errExit("StatReports: Failed to parse %s"%(stats_file)) + + if not clientspath: + try: + clientspath = "%s/Metadata/clients.xml" % \ + self.cfp.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + self.errExit("Could not read bcfg2.conf; exiting") + try: + clientsdata = XML(open(clientspath).read()) + except (IOError, XMLSyntaxError): + self.errExit("StatReports: Failed to parse %s"%(clientspath)) + + try: + load_stats(clientsdata, statsdata, verb, self.log, quick=quick, location=platform.node()) + except: + pass + + @printStats + def purge(self, client=None, maxdate=None, state=None): + '''Purge historical data from the database''' + + filtered = False # indicates whether or not a client should be deleted + + if not client and not maxdate and not state: + self.errExit("Reports.prune: Refusing to prune all data") + + ipurge = Interaction.objects + if client: + try: + cobj = Client.objects.get(name=client) + ipurge = ipurge.filter(client=cobj) + except Client.DoesNotExist: + self.log.error("Client %s not in database" % client) + raise SystemExit, -1 + self.log.debug("Filtering by client: %s" % client) + + if maxdate: + filtered = True + if not isinstance(maxdate, datetime.datetime): + raise TypeError, "maxdate is not a DateTime object" + self.log.debug("Filtering by maxdate: %s" % maxdate) + ipurge = ipurge.filter(timestamp__lt=maxdate) + + # Handle ping data as well + ping = Ping.objects.filter(endtime__lt=maxdate) + if client: + ping = ping.filter(client=cobj) + ping.delete() + + if state: + filtered = True + if state not in ('dirty','clean','modified'): + raise TypeError, "state is not one of the following values " + \ + "('dirty','clean','modified')" + self.log.debug("Filtering by state: %s" % state) + ipurge = ipurge.filter(state=state) + + count = ipurge.count() + rnum = 0 + try: + while rnum < count: + grp = list(ipurge[:1000].values("id")) + # just in case... + if not grp: + break + Interaction.objects.filter(id__in=[x['id'] for x in grp]).delete() + rnum += len(grp) + self.log.debug("Deleted %s of %s" % (rnum, count)) + except: + self.log.error("Failed to remove interactions") + (a, b, c) = sys.exc_info() + msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1] + del a, b, c + self.log.error(msg) + + # bulk operations bypass the Interaction.delete method + self.log.debug("Pruning orphan Performance objects") + Performance.prune_orphans() + + if client and not filtered: + '''Delete the client, ping data is automatic''' + try: + self.log.debug("Purging client %s" % client) + cobj.delete() + except: + self.log.error("Failed to delete client %s" % client) + (a, b, c) = sys.exc_info() + msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1] + del a, b, c + self.log.error(msg) + + @printStats + def purge_expired(self, maxdate=None): + '''Purge expired clients from the database''' + + if maxdate: + if not isinstance(maxdate, datetime.datetime): + raise TypeError, "maxdate is not a DateTime object" + self.log.debug("Filtering by maxdate: %s" % maxdate) + clients = Client.objects.filter(expiration__lt=maxdate) + else: + clients = Client.objects.filter(expiration__isnull=False) + + for client in clients: + self.log.debug("Purging client %s" % client) + Interaction.objects.filter(client=client).delete() + client.delete() + self.log.debug("Pruning orphan Performance objects") + Performance.prune_orphans() + diff --git a/build/lib/Bcfg2/Server/Admin/Snapshots.py b/build/lib/Bcfg2/Server/Admin/Snapshots.py new file mode 100644 index 000000000..004de0ddb --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Snapshots.py @@ -0,0 +1,163 @@ +from datetime import date +import sys + +# prereq issues can be signaled with ImportError, so no try needed +import sqlalchemy, sqlalchemy.orm +import Bcfg2.Server.Admin +import Bcfg2.Server.Snapshots +import Bcfg2.Server.Snapshots.model +from Bcfg2.Server.Snapshots.model import Snapshot, Client, Metadata, Base, \ + File, Group, Package, Service + +class Snapshots(Bcfg2.Server.Admin.Mode): + __shorthelp__ = "Interact with the Snapshots system" + __longhelp__ = (__shorthelp__) + __usage__ = ("bcfg2-admin snapshots [init|query qtype]") + + q_dispatch = {'client':Client, + 'group':Group, + 'metadata':Metadata, + 'package':Package, + 'snapshot':Snapshot} + + def __init__(self, configfile): + Bcfg2.Server.Admin.Mode.__init__(self, configfile) + #self.session = Bcfg2.Server.Snapshots.setup_session(debug=True) + self.session = Bcfg2.Server.Snapshots.setup_session(configfile) + self.cfile = configfile + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + if len(args) == 0 or args[0] == '-h': + print(self.__usage__) + raise SystemExit(0) + + if args[0] == 'query': + if args[1] in self.q_dispatch: + q_obj = self.q_dispatch[args[1]] + if q_obj == Client: + rows = [] + labels = ('Client', 'Active') + for host in \ + self.session.query(q_obj).filter(q_obj.active == False): + rows.append([host.name, 'No']) + for host in \ + self.session.query(q_obj).filter(q_obj.active == True): + rows.append([host.name, 'Yes']) + self.print_table([labels]+rows, + justify='left', + hdr=True, + vdelim=" ", + padding=1) + elif q_obj == Group: + print("Groups:") + for group in self.session.query(q_obj).all(): + print(" %s" % group.name) + else: + results = self.session.query(q_obj).all() + else: + print('error') + raise SystemExit(1) + elif args[0] == 'init': + # Initialize the Snapshots database + dbpath = Bcfg2.Server.Snapshots.db_from_config(self.cfile) + engine = sqlalchemy.create_engine(dbpath, echo=True) + metadata = Base.metadata + metadata.create_all(engine) + Session = sqlalchemy.orm.sessionmaker() + Session.configure(bind=engine) + session = Session() + session.commit() + elif args[0] == 'dump': + client = args[1] + snap = Snapshot.get_current(self.session, unicode(client)) + if not snap: + print("Current snapshot for %s not found" % client) + sys.exit(1) + print("Client %s last run at %s" % (client, snap.timestamp)) + for pkg in snap.packages: + print("C:", pkg.correct, 'M:', pkg.modified) + print("start", pkg.start.name, pkg.start.version) + print("end", pkg.end.name, pkg.end.version) + elif args[0] == 'reports': + # bcfg2-admin reporting interface for Snapshots + if '-a' in args[1:]: + # Query all hosts for Name, Status, Revision, Timestamp + q = self.session.query(Client.name, + Snapshot.correct, + Snapshot.revision, + Snapshot.timestamp)\ + .filter(Client.id==Snapshot.client_id)\ + .group_by(Client.id) + rows = [] + labels = ('Client', 'Correct', 'Revision', 'Time') + for item in q.all(): + cli, cor, time, rev = item + rows.append([cli, cor, time, rev]) + self.print_table([labels]+rows, + justify='left', + hdr=True, vdelim=" ", + padding=1) + elif '-b' in args[1:]: + # Query a single host for bad entries + if len(args) < 3: + print("Usage: bcfg2-admin snapshots -b <client>") + return + client = args[2] + snap = Snapshot.get_current(self.session, unicode(client)) + if not snap: + print("Current snapshot for %s not found" % client) + sys.exit(1) + print("Bad entries:") + bad_pkgs = [self.session.query(Package) + .filter(Package.id==p.start_id).one().name \ + for p in snap.packages if p.correct == False] + for p in bad_pkgs: + print(" Package:%s" % p) + bad_files = [self.session.query(File) + .filter(File.id==f.start_id).one().name \ + for f in snap.files if f.correct == False] + for filename in bad_files: + print(" File:%s" % filename) + bad_svcs = [self.session.query(Service) + .filter(Service.id==s.start_id).one().name \ + for s in snap.services if s.correct == False] + for svc in bad_svcs: + print(" Service:%s" % svc) + elif '-e' in args[1:]: + # Query a single host for extra entries + client = args[2] + snap = Snapshot.get_current(self.session, unicode(client)) + if not snap: + print("Current snapshot for %s not found" % client) + sys.exit(1) + print("Extra entries:") + for pkg in snap.extra_packages: + print(" Package:%s" % pkg.name) + # FIXME: Do we know about extra files yet? + for f in snap.extra_files: + print(" File:%s" % f.name) + for svc in snap.extra_services: + print(" Service:%s" % svc.name) + elif '--date' in args[1:]: + year, month, day = args[2:] + timestamp = date(int(year), int(month), int(day)) + snaps = [] + for client in self.session.query(Client).filter(Client.active == True): + snaps.append(Snapshot.get_by_date(self.session, + client.name, + timestamp)) + rows = [] + labels = ('Client', 'Correct', 'Revision', 'Time') + for snap in snaps: + rows.append([snap.client.name, + snap.correct, + snap.revision, + snap.timestamp]) + self.print_table([labels]+rows, + justify='left', + hdr=True, + vdelim=" ", + padding=1) + else: + print("Unknown options: ", args[1:]) diff --git a/build/lib/Bcfg2/Server/Admin/Tidy.py b/build/lib/Bcfg2/Server/Admin/Tidy.py new file mode 100644 index 000000000..c02ddf110 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Tidy.py @@ -0,0 +1,66 @@ +import os +import re +import socket + +import Bcfg2.Server.Admin + +class Tidy(Bcfg2.Server.Admin.Mode): + __shorthelp__ = "Clean up useless files in the repo" + __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin tidy [-f] [-I]" + __usage__ = ("bcfg2-admin tidy [options]\n\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-f", + "force", + "-I", + "interactive")) + + def __init__(self, cfile): + Bcfg2.Server.Admin.Mode.__init__(self, cfile) + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + badfiles = self.buildTidyList() + if '-f' in args or '-I' in args: + if '-I' in args: + for name in badfiles[:]: + answer = raw_input("Unlink file %s? [yN] " % name) + if answer not in ['y', 'Y']: + badfiles.remove(name) + for name in badfiles: + try: + os.unlink(name) + except IOError: + print "Failed to unlink %s" % name + else: + for name in badfiles: + print name + + def buildTidyList(self): + """Clean up unused or unusable files from the repository.""" + hostmatcher = re.compile('.*\.H_(\S+)$') + to_remove = [] + good = [] + bad = [] + + # clean up unresolvable hosts in SSHbase + for name in os.listdir("%s/SSHbase" % (self.get_repo_path())): + if hostmatcher.match(name): + hostname = hostmatcher.match(name).group(1) + if hostname in good + bad: + continue + try: + socket.gethostbyname(hostname) + good.append(hostname) + except: + bad.append(hostname) + for name in os.listdir("%s/SSHbase" % (self.get_repo_path())): + if not hostmatcher.match(name): + to_remove.append("%s/SSHbase/%s" % (self.get_repo_path(), name)) + else: + if hostmatcher.match(name).group(1) in bad: + to_remove.append("%s/SSHbase/%s" % + (self.get_repo_path(), name)) + # clean up file~ + # clean up files without parsable names in Cfg + return to_remove diff --git a/build/lib/Bcfg2/Server/Admin/Viz.py b/build/lib/Bcfg2/Server/Admin/Viz.py new file mode 100644 index 000000000..245ca8398 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Viz.py @@ -0,0 +1,101 @@ +import getopt +from subprocess import Popen, PIPE +import Bcfg2.Server.Admin + +class Viz(Bcfg2.Server.Admin.MetadataCore): + __shorthelp__ = "Produce graphviz diagrams of metadata structures" + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin viz [--includehosts] " + "[--includebundles] [--includekey] " + "[-o output.png] [--raw]") + __usage__ = ("bcfg2-admin viz [options]\n\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" + " %-25s%s\n" % + ("-H, --includehosts", + "include hosts in the viz output", + "-b, --includebundles", + "include bundles in the viz output", + "-k, --includekey", + "show a key for different digraph shapes", + "-o, --outfile <file>", + "write viz output to an output file")) + + colors = ['steelblue1', 'chartreuse', 'gold', 'magenta', + 'indianred1', 'limegreen', 'orange1', 'lightblue2', + 'green1', 'blue1', 'yellow1', 'darkturquoise', 'gray66'] + plugin_blacklist = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr', 'Packages', + 'Rules', 'Account', 'Decisions', 'Deps', 'Git', 'Svn', + 'Fossil', 'Bzr', 'Bundler', 'TGenshi', 'SGenshi', 'Base'] + + def __init__(self, cfile): + + Bcfg2.Server.Admin.MetadataCore.__init__(self, cfile, + self.__usage__, + pblacklist=self.plugin_blacklist) + + def __call__(self, args): + Bcfg2.Server.Admin.MetadataCore.__call__(self, args) + # First get options to the 'viz' subcommand + try: + opts, args = getopt.getopt(args, 'Hbko:', + ['includehosts', 'includebundles', + 'includekey', 'outfile=']) + except getopt.GetoptError, msg: + print msg + + #FIXME: is this for --raw? + #rset = False + hset = False + bset = False + kset = False + outputfile = False + for opt, arg in opts: + if opt in ("-H", "--includehosts"): + hset = True + elif opt in ("-b", "--includebundles"): + bset = True + elif opt in ("-k", "--includekey"): + kset = True + elif opt in ("-o", "--outfile"): + outputfile = arg + + data = self.Visualize(self.get_repo_path(), hset, bset, + kset, outputfile) + print data + raise SystemExit, 0 + + def Visualize(self, repopath, hosts=False, + bundles=False, key=False, output=False): + """Build visualization of groups file.""" + if output: + format = output.split('.')[-1] + else: + format = 'png' + + cmd = "dot -T%s" % (format) + if output: + cmd += " -o %s" % output + dotpipe = Popen(cmd, shell=True, stdin=PIPE, + stdout=PIPE, close_fds=True) + try: + dotpipe.stdin.write("digraph groups {\n") + except: + print "write to dot process failed. Is graphviz installed?" + raise SystemExit(1) + dotpipe.stdin.write('\trankdir="LR";\n') + dotpipe.stdin.write(self.metadata.viz(hosts, bundles, + key, self.colors)) + if key: + dotpipe.stdin.write("\tsubgraph cluster_key {\n") + dotpipe.stdin.write('''\tstyle="filled";\n''') + dotpipe.stdin.write('''\tcolor="lightblue";\n''') + dotpipe.stdin.write('''\tBundle [ shape="septagon" ];\n''') + dotpipe.stdin.write('''\tGroup [shape="ellipse"];\n''') + dotpipe.stdin.write('''\tProfile [style="bold", shape="ellipse"];\n''') + dotpipe.stdin.write('''\tHblock [label="Host1|Host2|Host3", shape="record"];\n''') + dotpipe.stdin.write('''\tlabel="Key";\n''') + dotpipe.stdin.write("\t}\n") + dotpipe.stdin.write("}\n") + dotpipe.stdin.close() + return dotpipe.stdout.read() diff --git a/build/lib/Bcfg2/Server/Admin/Web.py b/build/lib/Bcfg2/Server/Admin/Web.py new file mode 100644 index 000000000..5ad14f2b9 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Web.py @@ -0,0 +1,79 @@ +import os +import sys +import BaseHTTPServer +import SimpleHTTPServer +import daemon +import Bcfg2.Server.Admin +import Bcfg2.Options + +# For debugging output only +import logging +logger = logging.getLogger('Bcfg2.Server.Admin.Web') + +class Web(Bcfg2.Server.Admin.Mode): + __shorthelp__ = "A simple webserver to display the content of the Bcfg2 repos." + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin web start\n" + "\n\nbcfg2-admin web stop") + __usage__ = ("bcfg2-admin web [start|stop]") + + def __init__(self, configfile): + Bcfg2.Server.Admin.Mode.__init__(self, configfile) + + def __call__(self, args): + Bcfg2.Server.Admin.Mode.__call__(self, args) + opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY} + setup = Bcfg2.Options.OptionParser(opts) + setup.parse(sys.argv[1:]) + repo = setup['repo'] + + if len(args) == 0 or args[0] == '-h': + print(self.__usage__) + raise SystemExit(0) + + if len(args) == 0: + self.errExit("No argument specified.\n" + "Please see bcfg2-admin web help for usage.") + + if args[0] in ['start', 'up']: + # Change directory to the Bcfg2 repo + if not os.path.exists(repo): + #print "Path '%s' doesn't exisit" % repo + logger.error("%s doesn't exist" % repo) + else: + os.chdir(repo) + self.start_web() + + elif args[0] in ['stop', 'down']: + self.stop_web() + + else: + print "No command specified" + raise SystemExit(1) + + # The web server part with hardcoded port number + def start_web(self, port=6788): + """Starts the webserver for directory listing of the Bcfg2 repo.""" + try: + server_class = BaseHTTPServer.HTTPServer + handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler + server_address = ('', port) + server = server_class(server_address, handler_class) + #server.serve_forever() + # Make the context manager for becoming a daemon process + daemon_context = daemon.DaemonContext() + daemon_context.files_preserve = [server.fileno()] + + # Become a daemon process + with daemon_context: + server.serve_forever() + except: + logger.error("Failed to start webserver") + #raise Bcfg2.Server.Admin.AdminInitError + + def stop_web(self): + """Stops the webserver.""" +# self.shutdown = 1 + self.shutdown() + # self.stopped = True +# self.serve_forever() + diff --git a/build/lib/Bcfg2/Server/Admin/Xcmd.py b/build/lib/Bcfg2/Server/Admin/Xcmd.py new file mode 100644 index 000000000..80d5cfb25 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/Xcmd.py @@ -0,0 +1,49 @@ +import Bcfg2.Options +import Bcfg2.Proxy +import Bcfg2.Server.Admin + +import sys +import xmlrpclib + +class Xcmd(Bcfg2.Server.Admin.Mode): + __shorthelp__ = ("XML-RPC Command Interface") + __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin xcmd command") + __usage__ = ("bcfg2-admin xcmd <command>") + + def __call__(self, args): + optinfo = { + 'server': Bcfg2.Options.SERVER_LOCATION, + 'user': Bcfg2.Options.CLIENT_USER, + 'password': Bcfg2.Options.SERVER_PASSWORD, + 'key': Bcfg2.Options.SERVER_KEY, + 'certificate' : Bcfg2.Options.CLIENT_CERT, + 'ca' : Bcfg2.Options.CLIENT_CA + } + setup = Bcfg2.Options.OptionParser(optinfo) + setup.parse(sys.argv[2:]) + Bcfg2.Proxy.RetryMethod.max_retries = 1 + proxy = Bcfg2.Proxy.ComponentProxy(setup['server'], + setup['user'], + setup['password'], + key = setup['key'], + cert = setup['certificate'], + ca = setup['ca'], timeout=180) + if len(setup['args']) == 0: + print("Usage: xcmd <xmlrpc method> <optional arguments>") + return + cmd = setup['args'][0] + args = () + if len(setup['args']) > 1: + args = tuple(setup['args'][1:]) + try: + data = apply(getattr(proxy, cmd), args) + except xmlrpclib.Fault, flt: + if flt.faultCode == 7: + print("Unknown method %s" % cmd) + return + elif flt.faultCode == 20: + return + else: + raise + if data != None: + print data diff --git a/build/lib/Bcfg2/Server/Admin/__init__.py b/build/lib/Bcfg2/Server/Admin/__init__.py new file mode 100644 index 000000000..bb5c41895 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/__init__.py @@ -0,0 +1,114 @@ +__revision__ = '$Revision$' + +__all__ = ['Mode', 'Client', 'Compare', 'Init', 'Minestruct', 'Perf', + 'Pull', 'Query', 'Reports', 'Snapshots', 'Tidy', 'Viz', + 'Xcmd', 'Group', 'Backup'] + +import ConfigParser +import logging +import lxml.etree +import sys + +import Bcfg2.Server.Core +import Bcfg2.Options + +class ModeOperationError(Exception): + pass + +class Mode(object): + """Help message has not yet been added for mode.""" + __shorthelp__ = 'Shorthelp not defined yet' + __longhelp__ = 'Longhelp not defined yet' + __args__ = [] + def __init__(self, configfile): + self.configfile = configfile + self.__cfp = False + self.log = logging.getLogger('Bcfg2.Server.Admin.Mode') + + def getCFP(self): + if not self.__cfp: + self.__cfp = ConfigParser.ConfigParser() + self.__cfp.read(self.configfile) + return self.__cfp + + cfp = property(getCFP) + + def __call__(self, args): + if len(args) > 0 and args[0] == 'help': + print self.__longhelp__ + raise SystemExit(0) + + def errExit(self, emsg): + print emsg + raise SystemExit(1) + + def get_repo_path(self): + """Return repository path""" + return self.cfp.get('server', 'repository') + + def load_stats(self, client): + stats = lxml.etree.parse("%s/etc/statistics.xml" % + (self.get_repo_path())) + hostent = stats.xpath('//Node[@name="%s"]' % client) + if not hostent: + self.errExit("Could not find stats for client %s" % (client)) + return hostent[0] + + def print_table(self, rows, justify='left', hdr=True, vdelim=" ", padding=1): + """Pretty print a table + + rows - list of rows ([[row 1], [row 2], ..., [row n]]) + hdr - if True the first row is treated as a table header + vdelim - vertical delimiter between columns + padding - # of spaces around the longest element in the column + justify - may be left,center,right + + """ + hdelim = "=" + justify = {'left':str.ljust, + 'center':str.center, + 'right':str.rjust}[justify.lower()] + + """ + Calculate column widths (longest item in each column + plus padding on both sides) + + """ + cols = list(zip(*rows)) + colWidths = [max([len(str(item))+2*padding for \ + item in col]) for col in cols] + borderline = vdelim.join([w*hdelim for w in colWidths]) + + # print out the table + print(borderline) + for row in rows: + print(vdelim.join([justify(str(item), width) for \ + (item, width) in zip(row, colWidths)])) + if hdr: + print(borderline) + hdr = False + +class MetadataCore(Mode): + """Base class for admin-modes that handle metadata.""" + def __init__(self, configfile, usage, pwhitelist=None, pblacklist=None): + Mode.__init__(self, configfile) + options = {'plugins': Bcfg2.Options.SERVER_PLUGINS, + 'configfile': Bcfg2.Options.CFILE} + setup = Bcfg2.Options.OptionParser(options) + setup.hm = usage + setup.parse(sys.argv[1:]) + if pwhitelist is not None: + setup['plugins'] = [x for x in setup['plugins'] if x in pwhitelist] + elif pblacklist is not None: + setup['plugins'] = [x for x in setup['plugins'] if x not in pblacklist] + try: + self.bcore = Bcfg2.Server.Core.Core(self.get_repo_path(), + setup['plugins'], + 'foo', 'UTF-8') + except Bcfg2.Server.Core.CoreInitError, msg: + self.errExit("Core load failed because %s" % msg) + self.bcore.fam.handle_events_in_interval(5) + self.metadata = self.bcore.metadata + +class StructureMode(MetadataCore): + pass diff --git a/build/lib/Bcfg2/Server/Admin/test.py b/build/lib/Bcfg2/Server/Admin/test.py new file mode 100644 index 000000000..06271b186 --- /dev/null +++ b/build/lib/Bcfg2/Server/Admin/test.py @@ -0,0 +1,73 @@ +import os +import time +import tarfile +import sys +datastore = '/var/lib/bcfg2' + +#Popen(['git', 'clone', 'https://github.com/solj/bcfg2-repo.git', datastore]) +#timestamp = time.strftime('%Y%m%d%H%M%S') +#format = 'gz' +#mode = 'w:' + format +#filename = timestamp + '.tar' + '.' + format +#out = tarfile.open('/home/fab/' + filename, mode=mode) + + +#content = os.listdir(os.getcwd()) +#for item in content: +# out.add(item) +#out.close() +#print "Archive %s was stored.\nLocation: %s" % (filename, datastore) + +#print os.getcwd() +#print os.listdir(os.getcwd()) + +#import shlex +#args = shlex.split('env LC_ALL=C git clone https://github.com/solj/bcfg2-repo.git datastore') +#print args + +#Popen("env LC_ALL=C git clone https://github.com/solj/bcfg2-repo.git datastore") + +#timestamp = time.strftime('%Y%m%d%H%M%S') +#format = 'gz' +#mode = 'w:' + format +#filename = timestamp + '.tar' + '.' + format +#out = tarfile.open(name = filename, mode = mode) +##content = os.listdir(datastore) +##for item in content: +## out.add(item) +##out.close() + +###t = tarfile.open(name = destination, mode = 'w:gz') +#out.add(datastore, os.path.basename(datastore)) +#out.close() + +#print datastore, os.path.basename(datastore) + +#content = os.listdir(datastore) +#for item in content: +# #out.add(item) +# print item + +#timestamp = time.strftime('%Y%m%d%H%M%S') +#format = 'gz' +#mode = 'w:' + format +#filename = timestamp + '.tar' + '.' + format + +if len(sys.argv) == 0: + destination = datastore + '/' +else: + destination = sys.argv[1] + +print destination +#out = tarfile.open(destination + filename, mode=mode) +#out.add(self.datastore, os.path.basename(self.datastore)) +#out.close() +#print "Archive %s was stored at %s" % (filename, destination) + +#print 'Die Kommandozeilenparameter sind:' +##for i in sys.argv: +## print i + +#print sys.argv[0] +#print sys.argv[1] +##print sys.argv[2] diff --git a/build/lib/Bcfg2/Server/Core.py b/build/lib/Bcfg2/Server/Core.py new file mode 100644 index 000000000..ac67b8a69 --- /dev/null +++ b/build/lib/Bcfg2/Server/Core.py @@ -0,0 +1,439 @@ +"""Bcfg2.Server.Core provides the runtime support for Bcfg2 modules.""" +__revision__ = '$Revision$' + +import atexit +import logging +import lxml.etree +import select +import threading +import time +import xmlrpclib + +from Bcfg2.Component import Component, exposed +from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError +import Bcfg2.Server.FileMonitor +import Bcfg2.Server.Plugins.Metadata + +logger = logging.getLogger('Bcfg2.Server.Core') + +def critical_error(operation): + """Log and err, traceback and return an xmlrpc fault to client.""" + logger.error(operation, exc_info=1) + raise xmlrpclib.Fault(7, "Critical unexpected failure: %s" % (operation)) + +try: + import psyco + psyco.full() +except: + pass + +class CoreInitError(Exception): + """This error is raised when the core cannot be initialized.""" + pass + +class Core(Component): + """The Core object is the container for all Bcfg2 Server logic and modules.""" + name = 'bcfg2-server' + implementation = 'bcfg2-server' + + def __init__(self, repo, plugins, password, encoding, + cfile='/etc/bcfg2.conf', ca=None, + filemonitor='default', start_fam_thread=False): + Component.__init__(self) + self.datastore = repo + if filemonitor not in Bcfg2.Server.FileMonitor.available: + logger.error("File monitor driver %s not available; forcing to default" % filemonitor) + filemonitor = 'default' + try: + self.fam = Bcfg2.Server.FileMonitor.available[filemonitor]() + except IOError: + logger.error("Failed to instantiate fam driver %s" % filemonitor, + exc_info=1) + raise CoreInitError, "failed to instantiate fam driver (used %s)" % \ + filemonitor + self.pubspace = {} + self.cfile = cfile + self.cron = {} + self.plugins = {} + self.plugin_blacklist = {} + self.revision = '-1' + self.password = password + self.encoding = encoding + atexit.register(self.shutdown) + # Create an event to signal worker threads to shutdown + self.terminate = threading.Event() + + if '' in plugins: + plugins.remove('') + + for plugin in plugins: + if not plugin in self.plugins: + self.init_plugins(plugin) + # Remove blacklisted plugins + for p, bl in self.plugin_blacklist.items(): + if len(bl) > 0: + logger.error("The following plugins conflict with %s;" + "Unloading %s" % (p, bl)) + for plug in bl: + del self.plugins[plug] + # This section loads the experimental plugins + expl = [plug for (name, plug) in self.plugins.iteritems() + if plug.experimental] + if expl: + logger.info("Loading experimental plugin(s): %s" % \ + (" ".join([x.name for x in expl]))) + logger.info("NOTE: Interfaces subject to change") + depr = [plug for (name, plug) in self.plugins.iteritems() + if plug.deprecated] + # This section loads the deprecated plugins + if depr: + logger.info("Loading deprecated plugin(s): %s" % \ + (" ".join([x.name for x in depr]))) + + + mlist = [p for p in self.plugins.values() if \ + isinstance(p, Bcfg2.Server.Plugin.Metadata)] + if len(mlist) == 1: + self.metadata = mlist[0] + else: + logger.error("No Metadata Plugin loaded; failed to instantiate Core") + raise CoreInitError, "No Metadata Plugin" + self.statistics = [plugin for plugin in self.plugins.values() if \ + isinstance(plugin, Bcfg2.Server.Plugin.Statistics)] + self.pull_sources = [plugin for plugin in self.statistics if \ + isinstance(plugin, Bcfg2.Server.Plugin.PullSource)] + self.generators = [plugin for plugin in self.plugins.values() if \ + isinstance(plugin, Bcfg2.Server.Plugin.Generator)] + self.structures = [plugin for plugin in self.plugins.values() if \ + isinstance(plugin, Bcfg2.Server.Plugin.Structure)] + self.connectors = [plugin for plugin in self.plugins.values() if \ + isinstance(plugin, Bcfg2.Server.Plugin.Connector)] + self.ca = ca + self.fam_thread = threading.Thread(target=self._file_monitor_thread) + if start_fam_thread: + self.fam_thread.start() + + def _file_monitor_thread(self): + """The thread for monitor the files.""" + famfd = self.fam.fileno() + terminate = self.terminate + while not terminate.isSet(): + try: + if famfd: + select.select([famfd], [], [], 2) + else: + if not self.fam.pending(): + terminate.wait(15) + self.fam.handle_event_set(self.lock) + except: + continue + # VCS plugin periodic updates + for plugin in self.plugins.values(): + if isinstance(plugin, Bcfg2.Server.Plugin.Version): + self.revision = plugin.get_revision() + + def init_plugins(self, plugin): + """Handling for the plugins.""" + try: + mod = getattr(__import__("Bcfg2.Server.Plugins.%s" % + (plugin)).Server.Plugins, plugin) + except ImportError, e: + try: + mod = __import__(plugin) + except: + logger.error("Failed to load plugin %s" % (plugin)) + return + plug = getattr(mod, plugin) + # Blacklist conflicting plugins + cplugs = [conflict for conflict in plug.conflicts + if conflict in self.plugins] + self.plugin_blacklist[plug.name] = cplugs + try: + self.plugins[plugin] = plug(self, self.datastore) + except PluginInitError: + logger.error("Failed to instantiate plugin %s" % (plugin)) + except: + logger.error("Unexpected instantiation failure for plugin %s" % + (plugin), exc_info=1) + + def shutdown(self): + """Shuting down the plugins.""" + if not self.terminate.isSet(): + self.terminate.set() + for plugin in self.plugins.values(): + plugin.shutdown() + + def validate_data(self, metadata, data, base_cls): + """Checks the data structure.""" + for plugin in self.plugins.values(): + if isinstance(plugin, base_cls): + try: + if base_cls == Bcfg2.Server.Plugin.StructureValidator: + plugin.validate_structures(metadata, data) + elif base_cls == Bcfg2.Server.Plugin.GoalValidator: + plugin.validate_goals(metadata, data) + except Bcfg2.Server.Plugin.ValidationError, err: + logger.error("Plugin %s structure validation failed: %s" \ + % (plugin.name, err.message)) + raise + except: + logger.error("Plugin %s: unexpected structure validation failure" \ + % (plugin.name), exc_info=1) + + def GetStructures(self, metadata): + """Get all structures for client specified by metadata.""" + structures = reduce(lambda x, y:x+y, + [struct.BuildStructures(metadata) for struct \ + in self.structures], []) + sbundles = [b.get('name') for b in structures if b.tag == 'Bundle'] + missing = [b for b in metadata.bundles if b not in sbundles] + if missing: + logger.error("Client %s configuration missing bundles: %s" \ + % (metadata.hostname, ':'.join(missing))) + return structures + + def BindStructure(self, structure, metadata): + """Bind a complete structure.""" + for entry in structure.getchildren(): + if entry.tag.startswith("Bound"): + entry.tag = entry.tag[5:] + continue + try: + self.Bind(entry, metadata) + except PluginExecutionError: + if 'failure' not in entry.attrib: + entry.set('failure', 'bind error') + logger.error("Failed to bind entry: %s %s" % \ + (entry.tag, entry.get('name'))) + except: + logger.error("Unexpected failure in BindStructure: %s %s" \ + % (entry.tag, entry.get('name')), exc_info=1) + + def Bind(self, entry, metadata): + """Bind an entry using the appropriate generator.""" + if 'altsrc' in entry.attrib: + oldname = entry.get('name') + entry.set('name', entry.get('altsrc')) + entry.set('realname', oldname) + del entry.attrib['altsrc'] + try: + ret = self.Bind(entry, metadata) + entry.set('name', oldname) + del entry.attrib['realname'] + return ret + except: + entry.set('name', oldname) + logger.error("Failed binding entry %s:%s with altsrc %s" \ + % (entry.tag, entry.get('name'), + entry.get('altsrc'))) + logger.error("Falling back to %s:%s" % (entry.tag, + entry.get('name'))) + + glist = [gen for gen in self.generators if + entry.get('name') in gen.Entries.get(entry.tag, {})] + if len(glist) == 1: + return glist[0].Entries[entry.tag][entry.get('name')](entry, metadata) + elif len(glist) > 1: + generators = ", ".join([gen.name for gen in glist]) + logger.error("%s %s served by multiple generators: %s" % \ + (entry.tag, entry.get('name'), generators)) + g2list = [gen for gen in self.generators if + gen.HandlesEntry(entry, metadata)] + if len(g2list) == 1: + return g2list[0].HandleEntry(entry, metadata) + entry.set('failure', 'no matching generator') + raise PluginExecutionError, (entry.tag, entry.get('name')) + + def BuildConfiguration(self, client): + """Build configuration for clients.""" + start = time.time() + config = lxml.etree.Element("Configuration", version='2.0', \ + revision=self.revision) + try: + meta = self.build_metadata(client) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + logger.error("Metadata consistency error for client %s" % client) + return lxml.etree.Element("error", type='metadata error') + + try: + structures = self.GetStructures(meta) + except: + logger.error("error in GetStructures", exc_info=1) + return lxml.etree.Element("error", type='structure error') + + self.validate_data(meta, structures, + Bcfg2.Server.Plugin.StructureValidator) + + # Perform altsrc consistency checking + esrcs = {} + for struct in structures: + for entry in struct: + key = (entry.tag, entry.get('name')) + if key in esrcs: + if esrcs[key] != entry.get('altsrc'): + logger.error("Found inconsistent altsrc mapping for entry %s:%s" % key) + else: + esrcs[key] = entry.get('altsrc', None) + del esrcs + + for astruct in structures: + try: + self.BindStructure(astruct, meta) + config.append(astruct) + except: + logger.error("error in BindStructure", exc_info=1) + self.validate_data(meta, config, Bcfg2.Server.Plugin.GoalValidator) + logger.info("Generated config for %s in %.03fs" % \ + (client, time.time() - start)) + return config + + def GetDecisions(self, metadata, mode): + """Get data for the decision list.""" + result = [] + for plugin in self.plugins.values(): + try: + if isinstance(plugin, Bcfg2.Server.Plugin.Decision): + result += plugin.GetDecisions(metadata, mode) + except: + logger.error("Plugin: %s failed to generate decision list" \ + % plugin.name, exc_info=1) + return result + + def build_metadata(self, client_name): + """Build the metadata structure.""" + if not hasattr(self, 'metadata'): + # some threads start before metadata is even loaded + raise Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError + imd = self.metadata.get_initial_metadata(client_name) + for conn in self.connectors: + grps = conn.get_additional_groups(imd) + self.metadata.merge_additional_groups(imd, grps) + for conn in self.connectors: + data = conn.get_additional_data(imd) + self.metadata.merge_additional_data(imd, conn.name, data) + imd.query.by_name = self.build_metadata + return imd + + def process_statistics(self, client_name, statistics): + """Proceed statistics for client.""" + meta = self.build_metadata(client_name) + state = statistics.find(".//Statistics") + if state.get('version') >= '2.0': + for plugin in self.statistics: + try: + plugin.process_statistics(meta, statistics) + except: + logger.error("Plugin %s failed to process stats from %s" \ + % (plugin.name, meta.hostname), + exc_info=1) + + logger.info("Client %s reported state %s" % (client_name, + state.get('state'))) + # XMLRPC handlers start here + + @exposed + def GetProbes(self, address): + """Fetch probes for a particular client.""" + resp = lxml.etree.Element('probes') + try: + name = self.metadata.resolve_client(address) + meta = self.build_metadata(name) + + for plugin in [p for p in list(self.plugins.values()) \ + if isinstance(p, Bcfg2.Server.Plugin.Probing)]: + for probe in plugin.GetProbes(meta): + resp.append(probe) + return lxml.etree.tostring(resp, encoding='UTF-8', + xml_declaration=True) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + warning = 'Client metadata resolution error for %s; check server log' % address[0] + self.logger.warning(warning) + raise xmlrpclib.Fault(6, warning) + except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError: + err_msg = 'Metadata system runtime failure' + self.logger.error(err_msg) + raise xmlrpclib.Fault(6, err_msg) + except: + critical_error("Error determining client probes") + + @exposed + def RecvProbeData(self, address, probedata): + """Receive probe data from clients.""" + try: + name = self.metadata.resolve_client(address) + meta = self.build_metadata(name) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + warning = 'Metadata consistency error' + self.logger.warning(warning) + raise xmlrpclib.Fault(6, warning) + # clear dynamic groups + self.metadata.cgroups[meta.hostname] = [] + try: + xpdata = lxml.etree.XML(probedata) + except: + self.logger.error("Failed to parse probe data from client %s" % \ + (address[0])) + return False + + sources = [] + [sources.append(data.get('source')) for data in xpdata + if data.get('source') not in sources] + for source in sources: + if source not in self.plugins: + self.logger.warning("Failed to locate plugin %s" % (source)) + continue + dl = [data for data in xpdata if data.get('source') == source] + try: + self.plugins[source].ReceiveData(meta, dl) + except: + logger.error("Failed to process probe data from client %s" % \ + (address[0]), exc_info=1) + return True + + @exposed + def AssertProfile(self, address, profile): + """Set profile for a client.""" + try: + client = self.metadata.resolve_client(address) + self.metadata.set_profile(client, profile, address) + except (Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError, + Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError): + warning = 'Metadata consistency error' + self.logger.warning(warning) + raise xmlrpclib.Fault(6, warning) + return True + + @exposed + def GetConfig(self, address, checksum=False): + """Build config for a client.""" + try: + client = self.metadata.resolve_client(address) + config = self.BuildConfiguration(client) + return lxml.etree.tostring(config, encoding='UTF-8', + xml_declaration=True) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + self.logger.warning("Metadata consistency failure for %s" % (address)) + raise xmlrpclib.Fault(6, "Metadata consistency failure") + + @exposed + def RecvStats(self, address, stats): + """Act on statistics upload.""" + sdata = lxml.etree.XML(stats) + client = self.metadata.resolve_client(address) + self.process_statistics(client, sdata) + return "<ok/>" + + def authenticate(self, cert, user, password, address): + if self.ca: + acert = cert + else: + # No ca, so no cert validation can be done + acert = None + return self.metadata.AuthenticateConnection(acert, user, password, address) + + @exposed + def GetDecisionList(self, address, mode): + """Get the data of the decision list.""" + client = self.metadata.resolve_client(address) + meta = self.build_metadata(client) + return self.GetDecisions(meta, mode) diff --git a/build/lib/Bcfg2/Server/FileMonitor.py b/build/lib/Bcfg2/Server/FileMonitor.py new file mode 100644 index 000000000..0f09f7751 --- /dev/null +++ b/build/lib/Bcfg2/Server/FileMonitor.py @@ -0,0 +1,307 @@ +"""Bcfg2.Server.FileMonitor provides the support for monitorung files.""" + +import logging +import os +import stat +from time import sleep, time + +logger = logging.getLogger('Bcfg2.Server.FileMonitor') + +def ShouldIgnore(event): + """Test if the event should be suppresed.""" + # FIXME should move event suppression out of the core + if event.filename.split('/')[-1] == '.svn': + return True + if event.filename.endswith('~') or \ + event.filename.startswith('#') or event.filename.startswith('.#'): + #logger.error("Suppressing event for file %s" % (event.filename)) + return True + return False + +class Event(object): + def __init__(self, request_id, filename, code): + self.requestID = request_id + self.filename = filename + self.action = code + + def code2str(self): + """return static code for event""" + return self.action + +available = {} +class FileMonitor(object): + """File Monitor baseclass.""" + def __init__(self, debug=False): + object.__init__(self) + self.debug = debug + self.handles = dict() + + def get_event(self): + return None + + def pending(self): + return False + + def fileno(self): + return 0 + + def handle_one_event(self, event): + if ShouldIgnore(event): + return + if event.requestID not in self.handles: + logger.info("Got event for unexpected id %s, file %s" % + (event.requestID, event.filename)) + return + if self.debug: + logger.info("Dispatching event %s %s to obj %s" \ + % (event.code2str(), event.filename, + self.handles[event.requestID])) + try: + self.handles[event.requestID].HandleEvent(event) + except: + logger.error("error in handling of gamin event for %s" % \ + (event.filename), exc_info=1) + + def handle_event_set(self, lock=None): + count = 1 + event = self.get_event() + start = time() + if lock: + lock.acquire() + try: + self.handle_one_event(event) + while self.pending(): + self.handle_one_event(self.get_event()) + count += 1 + except: + pass + if lock: + lock.release() + end = time() + logger.info("Handled %d events in %.03fs" % (count, (end-start))) + + def handle_events_in_interval(self, interval): + end = time() + interval + while time() < end: + if self.pending(): + self.handle_event_set() + end = time() + interval + else: + sleep(0.5) + + +class FamFam(object): + """The fam object is a set of callbacks for file alteration events (FAM support).""" + + def __init__(self): + object.__init__(self) + self.fm = _fam.open() + self.users = {} + self.handles = {} + self.debug = False + + def fileno(self): + """Return fam file handle number.""" + return self.fm.fileno() + + def handle_event_set(self, _): + self.Service() + + def handle_events_in_interval(self, interval): + now = time() + while (time() - now) < interval: + if self.Service(): + now = time() + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.HandleEvent.""" + mode = os.stat(path)[stat.ST_MODE] + if stat.S_ISDIR(mode): + handle = self.fm.monitorDirectory(path, None) + else: + handle = self.fm.monitorFile(path, None) + self.handles[handle.requestID()] = handle + if obj != None: + self.users[handle.requestID()] = obj + return handle.requestID() + + def Service(self, interval=0.50): + """Handle all fam work.""" + count = 0 + collapsed = 0 + rawevents = [] + start = time() + now = time() + while (time() - now) < interval: + if self.fm.pending(): + while self.fm.pending(): + count += 1 + rawevents.append(self.fm.nextEvent()) + now = time() + unique = [] + bookkeeping = [] + for event in rawevents: + if ShouldIgnore(event): + continue + if event.code2str() != 'changed': + # process all non-change events + unique.append(event) + else: + if (event.filename, event.requestID) not in bookkeeping: + bookkeeping.append((event.filename, event.requestID)) + unique.append(event) + else: + collapsed += 1 + for event in unique: + if event.requestID in self.users: + try: + self.users[event.requestID].HandleEvent(event) + except: + logger.error("handling event for file %s" % (event.filename), exc_info=1) + end = time() + logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" % + (count, (end - start), collapsed)) + return count + + + +class Fam(FileMonitor): + """ + The fam object is a set of callbacks for + file alteration events (FAM support). + """ + + def __init__(self, debug=False): + FileMonitor.__init__(self, debug) + self.fm = _fam.open() + + def fileno(self): + return self.fm.fileno() + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.HandleEvent.""" + mode = os.stat(path)[stat.ST_MODE] + if stat.S_ISDIR(mode): + handle = self.fm.monitorDirectory(path, None) + else: + handle = self.fm.monitorFile(path, None) + if obj != None: + self.handles[handle.requestID()] = obj + return handle.requestID() + + def pending(self): + return self.fm.pending() + + def get_event(self): + return self.fm.nextEvent() + +class Pseudo(FileMonitor): + """ + The fam object is a set of callbacks for + file alteration events (static monitor support). + """ + + def __init__(self, debug=False): + FileMonitor.__init__(self, debug=False) + self.pending_events = [] + + def pending(self): + return len(self.pending_events) != 0 + + def get_event(self): + return self.pending_events.pop() + + def AddMonitor(self, path, obj): + """add a monitor to path, installing a callback to obj.HandleEvent""" + handleID = len(self.handles.keys()) + mode = os.stat(path)[stat.ST_MODE] + handle = Event(handleID, path, 'exists') + if stat.S_ISDIR(mode): + dirList = os.listdir(path) + self.pending_events.append(handle) + for includedFile in dirList: + self.pending_events.append(Event(handleID, includedFile, 'exists')) + self.pending_events.append(Event(handleID, path, 'endExist')) + else: + self.pending_events.append(Event(handleID, path, 'exists')) + if obj != None: + self.handles[handleID] = obj + return handleID + + +try: + from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \ + GAMChanged, GAMDeleted, GAMMoved + + class GaminEvent(Event): + """ + This class provides an event analogous to + python-fam events based on gamin sources. + """ + def __init__(self, request_id, filename, code): + Event.__init__(self, request_id, filename, code) + action_map = {GAMCreated: 'created', GAMExists: 'exists', + GAMChanged: 'changed', GAMDeleted: 'deleted', + GAMEndExist: 'endExist', GAMMoved: 'moved'} + if code in action_map: + self.action = action_map[code] + + class Gamin(FileMonitor): + """ + The fam object is a set of callbacks for + file alteration events (Gamin support) + """ + def __init__(self, debug=False): + FileMonitor.__init__(self, debug) + self.mon = WatchMonitor() + self.counter = 0 + self.events = [] + + def fileno(self): + return self.mon.get_fd() + + def queue(self, path, action, request_id): + """queue up the event for later handling""" + self.events.append(GaminEvent(request_id, path, action)) + + def AddMonitor(self, path, obj): + """Add a monitor to path, installing a callback to obj.HandleEvent.""" + handle = self.counter + self.counter += 1 + mode = os.stat(path)[stat.ST_MODE] + + # Flush queued gamin events + while self.mon.event_pending(): + self.mon.handle_one_event() + + if stat.S_ISDIR(mode): + self.mon.watch_directory(path, self.queue, handle) + else: + self.mon.watch_file(path, self.queue, handle) + self.handles[handle] = obj + return handle + + def pending(self): + return len(self.events) > 0 or self.mon.event_pending() + + def get_event(self): + if self.mon.event_pending(): + self.mon.handle_one_event() + return self.events.pop(0) + + available['gamin'] = Gamin +except ImportError: + # fall back to _fam + pass + +try: + import _fam + available['fam'] = FamFam +except ImportError: + pass +available['pseudo'] = Pseudo + +for fdrv in ['gamin', 'fam', 'pseudo']: + if fdrv in available: + available['default'] = available[fdrv] + break diff --git a/build/lib/Bcfg2/Server/Hostbase/__init__.py b/build/lib/Bcfg2/Server/Hostbase/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/__init__.py diff --git a/build/lib/Bcfg2/Server/Hostbase/backends.py b/build/lib/Bcfg2/Server/Hostbase/backends.py new file mode 100644 index 000000000..aa822409c --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/backends.py @@ -0,0 +1,68 @@ +from django.contrib.auth.models import User +#from ldapauth import * +from nisauth import * + +__revision__ = '$Revision$' + +## class LDAPBackend(object): + +## def authenticate(self,username=None,password=None): +## try: + +## l = ldapauth(username,password) +## temp_pass = User.objects.make_random_password(100) +## ldap_user = dict(username=l.sAMAccountName, +## ) +## user_session_obj = dict( +## email=l.email, +## first_name=l.name_f, +## last_name=l.name_l, +## uid=l.badge_no +## ) +## #fixme: need to add this user session obj to session +## #print str(ldap_user) +## user,created = User.objects.get_or_create(username=username) +## #print user +## #print "created " + str(created) +## return user + +## except LDAPAUTHError,e: +## #print str(e) +## return None + +## def get_user(self,user_id): +## try: +## return User.objects.get(pk=user_id) +## except User.DoesNotExist, e: +## print str(e) +## return None + + +class NISBackend(object): + + def authenticate(self, username=None, password=None): + try: + n = nisauth(username, password) + temp_pass = User.objects.make_random_password(100) + nis_user = dict(username=username, + ) + + user_session_obj = dict( + email = username + "@mcs.anl.gov", + first_name = None, + last_name = None, + uid = n.uid + ) + user, created = User.objects.get_or_create(username=username) + + return user + + except NISAUTHError, e: + return None + + + def get_user(self, user_id): + try: + return User.objects.get(pk=user_id) + except User.DoesNotExist, e: + return None diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/admin.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/admin.py new file mode 100644 index 000000000..70a2233cc --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/admin.py @@ -0,0 +1,15 @@ +from django.contrib import admin + +from models import Host, Interface, IP, MX, Name, CName, Nameserver, ZoneAddress, Zone, Log, ZoneLog + +admin.site.register(Host) +admin.site.register(Interface) +admin.site.register(IP) +admin.site.register(MX) +admin.site.register(Name) +admin.site.register(CName) +admin.site.register(Nameserver) +admin.site.register(ZoneAddress) +admin.site.register(Zone) +admin.site.register(Log) +admin.site.register(ZoneLog) diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/models.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/models.py new file mode 100644 index 000000000..3f08a09a0 --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/models.py @@ -0,0 +1,210 @@ +from django.db import models + +# Create your models here. +class Host(models.Model): + NETGROUP_CHOICES = ( + ('none', 'none'),('cave', 'cave'),('ccst', 'ccst'),('mcs', 'mcs'), + ('mmlab', 'mmlab'),('sp', 'sp'),('red', 'red'),('virtual', 'virtual'), + ('win', 'win'),('xterm', 'xterm'),('lcrc', 'lcrc'),('anlext', 'anlext'), + ('teragrid', 'teragrid') + ) + STATUS_CHOICES = ( + ('active','active'),('dormant','dormant') + ) + SUPPORT_CHOICES = ( + ('green','green'),('yellow','yellow'),('red','red') + ) + CLASS_CHOICES = ( + ('scientific','scientific'), + ('operations','operations'),('guest','guest'), + ('confidential','confidential'),('public','public') + ) + WHATAMI_CHOICES = ( + ('aix-3', 'aix-3'), ('aix-4', 'aix-4'), + ('aix-5', 'aix-5'), ('baytech', 'baytech'), + ('decserver', 'decserver'), ('dialup', 'dialup'), + ('dos', 'dos'), ('freebsd', 'freebsd'), + ('hpux', 'hpux'), ('irix-5', 'irix-5'), + ('irix-6', 'irix-6'), ('linux', 'linux'), + ('linux-2', 'linux-2'), ('linux-rh73', 'linux-rh73'), + ('linux-rh8', 'linux-rh8'), ('linux-sles8', 'linux-sles8'), + ('linux-sles8-64', 'linux-sles8-64'), ('linux-sles8-ia32', 'linux-sles8-ia32'), + ('linux-sles8-ia64', 'linux-sles8-ia64'), ('mac', 'mac'), + ('network', 'network'), ('next', 'next'), + ('none', 'none'), ('osf', 'osf'), ('printer', 'printer'), + ('robot', 'robot'), ('solaris-2', 'solaris-2'), + ('sun4', 'sun4'), ('unknown', 'unknown'), ('virtual', 'virtual'), + ('win31', 'win31'), ('win95', 'win95'), + ('winNTs', 'winNTs'), ('winNTw', 'winNTw'), + ('win2k', 'win2k'), ('winXP', 'winXP'), ('xterm', 'xterm') + ) + hostname = models.CharField(max_length=64) + whatami = models.CharField(max_length=16) + netgroup = models.CharField(max_length=32, choices=NETGROUP_CHOICES) + security_class = models.CharField('class', max_length=16) + support = models.CharField(max_length=8, choices=SUPPORT_CHOICES) + csi = models.CharField(max_length=32, blank=True) + printq = models.CharField(max_length=32, blank=True) + outbound_smtp = models.BooleanField() + primary_user = models.EmailField() + administrator = models.EmailField(blank=True) + location = models.CharField(max_length=16) + comments = models.TextField(blank=True) + expiration_date = models.DateField(null=True, blank=True) + last = models.DateField(auto_now=True, auto_now_add=True) + status = models.CharField(max_length=7, choices=STATUS_CHOICES) + dirty = models.BooleanField() + + class Admin: + list_display = ('hostname', 'last') + search_fields = ['hostname'] + + def __str__(self): + return self.hostname + + def get_logs(self): + """ + Get host's log. + """ + return Log.objects.filter(hostname=self.hostname) + +class Interface(models.Model): + TYPE_CHOICES = ( + ('eth', 'ethernet'), ('wl', 'wireless'), ('virtual', 'virtual'), ('myr', 'myr'), + ('mgmt', 'mgmt'), ('tape', 'tape'), ('fe', 'fe'), ('ge', 'ge'), + ) + # FIXME: The new admin interface has change a lot. + #host = models.ForeignKey(Host, edit_inline=models.TABULAR, num_in_admin=2) + host = models.ForeignKey(Host) + # FIXME: The new admin interface has change a lot. + #mac_addr = models.CharField(max_length=32, core=True) + mac_addr = models.CharField(max_length=32) + hdwr_type = models.CharField('type', max_length=16, choices=TYPE_CHOICES, blank=True) + # FIXME: The new admin interface has change a lot. + # radio_admin=True, blank=True) + dhcp = models.BooleanField() + + def __str__(self): + return self.mac_addr + + class Admin: + list_display = ('mac_addr', 'host') + search_fields = ['mac_addr'] + +class IP(models.Model): + interface = models.ForeignKey(Interface) + # FIXME: The new admin interface has change a lot. + # edit_inline=models.TABULAR, num_in_admin=1) + #ip_addr = models.IPAddressField(core=True) + ip_addr = models.IPAddressField() + + def __str__(self): + return self.ip_addr + + class Admin: + pass + + class Meta: + ordering = ('ip_addr', ) + +class MX(models.Model): + priority = models.IntegerField(blank=True) + # FIXME: The new admin interface has change a lot. + #mx = models.CharField(max_length=64, blank=True, core=True) + mx = models.CharField(max_length=64, blank=True) + + def __str__(self): + return (" ".join([str(self.priority), self.mx])) + + class Admin: + pass + +class Name(models.Model): + DNS_CHOICES = ( + ('global','global'),('internal','ANL internal'), + ('private','private') + ) + # FIXME: The new admin interface has change a lot. + #ip = models.ForeignKey(IP, edit_inline=models.TABULAR, num_in_admin=1) + ip = models.ForeignKey(IP) + # FIXME: The new admin interface has change a lot. + #name = models.CharField(max_length=64, core=True) + name = models.CharField(max_length=64) + dns_view = models.CharField(max_length=16, choices=DNS_CHOICES) + only = models.BooleanField(blank=True) + mxs = models.ManyToManyField(MX) + + def __str__(self): + return self.name + + class Admin: + pass + +class CName(models.Model): + # FIXME: The new admin interface has change a lot. + #name = models.ForeignKey(Name, edit_inline=models.TABULAR, num_in_admin=1) + name = models.ForeignKey(Name) + # FIXME: The new admin interface has change a lot. + #cname = models.CharField(max_length=64, core=True) + cname = models.CharField(max_length=64) + + def __str__(self): + return self.cname + + class Admin: + pass + +class Nameserver(models.Model): + name = models.CharField(max_length=64, blank=True) + + def __str__(self): + return self.name + + class Admin: + pass + +class ZoneAddress(models.Model): + ip_addr = models.IPAddressField(blank=True) + + def __str__(self): + return self.ip_addr + + class Admin: + pass + +class Zone(models.Model): + zone = models.CharField(max_length=64) + serial = models.IntegerField() + admin = models.CharField(max_length=64) + primary_master = models.CharField(max_length=64) + expire = models.IntegerField() + retry = models.IntegerField() + refresh = models.IntegerField() + ttl = models.IntegerField() + nameservers = models.ManyToManyField(Nameserver, blank=True) + mxs = models.ManyToManyField(MX, blank=True) + addresses = models.ManyToManyField(ZoneAddress, blank=True) + aux = models.TextField(blank=True) + + def __str__(self): + return self.zone + + class Admin: + pass + +class Log(models.Model): + # FIXME: Proposal hostname = models.ForeignKey(Host) + hostname = models.CharField(max_length=64) + date = models.DateTimeField(auto_now=True, auto_now_add=True) + log = models.TextField() + + def __str__(self): + return self.hostname + +class ZoneLog(models.Model): + zone = models.CharField(max_length=64) + date = models.DateTimeField(auto_now=True, auto_now_add=True) + log = models.TextField() + + def __str__(self): + return self.zone diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/urls.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/urls.py new file mode 100644 index 000000000..0ee204abe --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/urls.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +from django.conf.urls.defaults import * +from django.contrib.auth.decorators import login_required +from django.core.urlresolvers import reverse +from django.views.generic.create_update import create_object, update_object, delete_object +from django.views.generic.list_detail import object_detail, object_list + +from models import Host, Zone, Log + +host_detail_dict = { + 'queryset':Host.objects.all(), + 'template_name':'host.html', + 'template_object_name':'host', +} + +host_delete_dict = { + 'model':Host, + 'post_delete_redirect':'/', +} + +host_log_detail_dict = host_detail_dict.copy() +host_log_detail_dict['template_name'] = 'logviewer.html' + +host_dns_detail_dict = host_detail_dict.copy() +host_dns_detail_dict['template_name'] = 'dns.html' + +zone_new_dict = { + 'model':Zone, + 'template_name':'zonenew.html', + 'post_save_redirect':'../%(id)s', +} + +zones_list_dict = { + 'queryset':Zone.objects.all(), + 'template_name':'zones.html', + 'template_object_name':'zone', +} + +zone_detail_dict = { + 'queryset':Zone.objects.all(), + 'template_name':'zoneview.html', + 'template_object_name':'zone', +} + +urlpatterns = patterns('', + (r'^(?P<object_id>\d+)/$', object_detail, host_detail_dict, 'host_detail'), + (r'^zones/new/$', login_required(create_object), zone_new_dict, 'zone_new'), + (r'^zones/(?P<object_id>\d+)/edit', login_required(update_object), zone_new_dict, 'zone_edit'), + (r'^zones/$', object_list, zones_list_dict, 'zone_list'), + (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'), + (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'), + (r'^\d+/logs/(?P<object_id>\d+)/', object_detail, { 'queryset':Log.objects.all() }, 'log_detail'), + (r'^(?P<object_id>\d+)/logs/', object_detail, host_log_detail_dict, 'host_log_list'), + (r'^(?P<object_id>\d+)/dns', object_detail, host_dns_detail_dict, 'host_dns_list'), + (r'^(?P<object_id>\d+)/remove', login_required(delete_object), host_delete_dict, 'host_delete'), +) + +urlpatterns += patterns('Bcfg2.Server.Hostbase.hostbase.views', + (r'^$', 'search'), + (r'^(?P<host_id>\d+)/edit', 'edit'), + (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'), + (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/(?P<name_id>\d+)/confirm', 'confirm'), + (r'^(?P<host_id>\d+)/dns/edit', 'dnsedit'), + (r'^new', 'new'), + (r'^(?P<host_id>\d+)/copy', 'copy'), +# (r'^hostinfo', 'hostinfo'), + (r'^zones/(?P<zone_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'), +) diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/views.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/views.py new file mode 100644 index 000000000..ff1d4710d --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/views.py @@ -0,0 +1,972 @@ +"""Views.py +Contains all the views associated with the hostbase app +Also has does form validation +""" +__revision__ = "$Revision: $" + +from django.http import HttpResponse, HttpResponseRedirect + +from django.contrib.auth.decorators import login_required +from django.contrib.auth import logout +from django.template import RequestContext +from Bcfg2.Server.Hostbase.hostbase.models import * +from datetime import date +from django.db import connection +from django.shortcuts import render_to_response +from django import forms +from Bcfg2.Server.Hostbase import settings, regex +import re, copy + +attribs = ['hostname', 'whatami', 'netgroup', 'security_class', 'support', + 'csi', 'printq', 'primary_user', 'administrator', 'location', + 'status', 'comments'] + +zoneattribs = ['zone', 'admin', 'primary_master', 'expire', 'retry', + 'refresh', 'ttl', 'aux'] + +dispatch = {'mac_addr':'i.mac_addr LIKE \'%%%%%s%%%%\'', + 'ip_addr':'p.ip_addr LIKE \'%%%%%s%%%%\'', + 'name':'n.name LIKE \'%%%%%s%%%%\'', +## 'hostname':'n.name LIKE \'%%%%%s%%%%\'', +## 'cname':'n.name LIKE \'%%%%%s%%%%\'', + 'mx':'m.mx LIKE \'%%%%%s%%%%\'', + 'dns_view':'n.dns_view = \'%s\'', + 'hdwr_type':'i.hdwr_type = \'%s\'', + 'dhcp':'i.dhcp = \'%s\''} + +def search(request): + """Search for hosts in the database + If more than one field is entered, logical AND is used + """ + if 'sub' in request.GET: + querystring = """SELECT DISTINCT h.hostname, h.id, h.status + FROM (((((hostbase_host h + INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id) + INNER JOIN hostbase_name_mxs x ON n.id = x.name_id) + INNER JOIN hostbase_mx m ON m.id = x.mx_id) + LEFT JOIN hostbase_cname c ON n.id = c.name_id + WHERE """ + + _and = False + for field in request.POST: + if request.POST[field] and field == 'hostname': + if _and: + querystring += ' AND ' + querystring += 'n.name LIKE \'%%%%%s%%%%\' or c.cname LIKE \'%%%%%s%%%%\'' % (request.POST[field], request.POST[field]) + _and = True + elif request.POST[field] and field in dispatch: + if _and: + querystring += ' AND ' + querystring += dispatch[field] % request.POST[field] + _and = True + elif request.POST[field]: + if _and: + querystring += ' AND ' + querystring += "h.%s LIKE \'%%%%%s%%%%\'" % (field, request.POST[field]) + _and = True + + if not _and: + cursor = connection.cursor() + cursor.execute("""SELECT hostname, id, status + FROM hostbase_host ORDER BY hostname""") + results = cursor.fetchall() + else: + querystring += " ORDER BY h.hostname" + cursor = connection.cursor() + cursor.execute(querystring) + results = cursor.fetchall() + + return render_to_response('results.html', + {'hosts': results, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + else: + return render_to_response('search.html', + {'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'DNS_CHOICES': Name.DNS_CHOICES, + 'yesno': [(1, 'yes'), (0, 'no')], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + + +def gethostdata(host_id, dnsdata=False): + """Grabs the necessary data about a host + Replaces a lot of repeated code""" + hostdata = {} + hostdata['ips'] = {} + hostdata['names'] = {} + hostdata['cnames'] = {} + hostdata['mxs'] = {} + hostdata['host'] = Host.objects.get(id=host_id) + hostdata['interfaces'] = hostdata['host'].interface_set.all() + for interface in hostdata['interfaces']: + hostdata['ips'][interface.id] = interface.ip_set.all() + if dnsdata: + for ip in hostdata['ips'][interface.id]: + hostdata['names'][ip.id] = ip.name_set.all() + for name in hostdata['names'][ip.id]: + hostdata['cnames'][name.id] = name.cname_set.all() + hostdata['mxs'][name.id] = name.mxs.all() + return hostdata + +def fill(template, hostdata, dnsdata=False): + """Fills a generic template + Replaces a lot of repeated code""" + if dnsdata: + template.names = hostdata['names'] + template.cnames = hostdata['cnames'] + template.mxs = hostdata['mxs'] + template.host = hostdata['host'] + template.interfaces = hostdata['interfaces'] + template.ips = hostdata['ips'] + return template + +def edit(request, host_id): + """edit general host information""" + manipulator = Host.ChangeManipulator(host_id) + changename = False + if request.method == 'POST': + host = Host.objects.get(id=host_id) + before = host.__dict__.copy() + if request.POST['hostname'] != host.hostname: + oldhostname = host.hostname.split(".")[0] + changename = True + interfaces = host.interface_set.all() + old_interfaces = [interface.__dict__.copy() for interface in interfaces] + + new_data = request.POST.copy() + + errors = manipulator.get_validation_errors(new_data) + if not errors: + + # somehow keep track of multiple interface change manipulators + # as well as multiple ip chnage manipulators??? (add manipulators???) + # change to many-to-many?????? + + # dynamically look up mx records? + text = '' + + for attrib in attribs: + if host.__dict__[attrib] != request.POST[attrib]: + text = do_log(text, attrib, host.__dict__[attrib], request.POST[attrib]) + host.__dict__[attrib] = request.POST[attrib] + + if 'expiration_date' in request.POST: + ymd = request.POST['expiration_date'].split("-") + if date(int(ymd[0]), int(ymd[1]), int(ymd[2])) != host.__dict__['expiration_date']: + text = do_log(text, 'expiration_date', host.__dict__['expiration_date'], + request.POST['expiration_date']) + host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2])) + + for inter in interfaces: + changetype = False + ips = IP.objects.filter(interface=inter.id) + if inter.mac_addr != request.POST['mac_addr%d' % inter.id]: + text = do_log(text, 'mac_addr', inter.mac_addr, request.POST['mac_addr%d' % inter.id]) + inter.mac_addr = request.POST['mac_addr%d' % inter.id].lower().replace('-',':') + if inter.hdwr_type != request.POST['hdwr_type%d' % inter.id]: + oldtype = inter.hdwr_type + text = do_log(text, 'hdwr_type', oldtype, request.POST['hdwr_type%d' % inter.id]) + inter.hdwr_type = request.POST['hdwr_type%d' % inter.id] + changetype = True + if (('dhcp%d' % inter.id) in request.POST and not inter.dhcp or + not ('dhcp%d' % inter.id) in request.POST and inter.dhcp): + text = do_log(text, 'dhcp', inter.dhcp, int(not inter.dhcp)) + inter.dhcp = not inter.dhcp + for ip in ips: + names = ip.name_set.all() + if not ip.ip_addr == request.POST['ip_addr%d' % ip.id]: + oldip = ip.ip_addr + oldsubnet = oldip.split(".")[2] + ip.ip_addr = request.POST['ip_addr%d' % ip.id] + ip.save() + text = do_log(text, 'ip_addr', oldip, ip.ip_addr) + for name in names: + if name.name.split(".")[0].endswith('-%s' % oldsubnet): + name.name = name.name.replace('-%s' % oldsubnet, '-%s' % ip.ip_addr.split(".")[2]) + name.save() + if changetype: + for name in names: + if name.name.split(".")[0].endswith('-%s' % oldtype): + name.name = name.name.replace('-%s' % oldtype, '-%s' % inter.hdwr_type) + name.save() + if changename: + for name in names: + if name.name.startswith(oldhostname): + name.name = name.name.replace(oldhostname, host.hostname.split(".")[0]) + name.save() + if request.POST['%dip_addr' % inter.id]: + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_ip = IP(interface=inter, ip_addr=request.POST['%dip_addr' % inter.id]) + new_ip.save() + text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr) + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + inter.save() + if request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr=request.POST['mac_addr_new'].lower().replace('-',':'), + hdwr_type=request.POST['hdwr_type_new'], + dhcp=request.POST['dhcp_new']) + text = do_log(text, '*new*', 'mac_addr', new_inter.mac_addr) + new_inter.save() + if request.POST['mac_addr_new'] and request.POST['ip_addr_new']: + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr) + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']: + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_inter = Interface(host=host, mac_addr="", + hdwr_type=request.POST['hdwr_type_new'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr) + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if text: + log = Log(hostname=host.hostname, log=text) + log.save() + host.save() + return HttpResponseRedirect('/hostbase/%s/' % host.id) + else: + return render_to_response('errors.html', + {'failures': errors, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + else: + host = Host.objects.get(id=host_id) + interfaces = [] + for interface in host.interface_set.all(): + interfaces.append([interface, interface.ip_set.all()]) + return render_to_response('edit.html', + {'host': host, + 'interfaces': interfaces, + 'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def confirm(request, item, item_id, host_id=None, name_id=None, zone_id=None): + """Asks if the user is sure he/she wants to remove an item""" + if 'sub' in request.GET: + if item == 'interface': + for ip in Interface.objects.get(id=item_id).ip_set.all(): + for name in ip.name_set.all(): + name.cname_set.all().delete() + ip.name_set.all().delete() + Interface.objects.get(id=item_id).ip_set.all().delete() + Interface.objects.get(id=item_id).delete() + elif item=='ip': + for name in IP.objects.get(id=item_id).name_set.all(): + name.cname_set.all().delete() + IP.objects.get(id=item_id).name_set.all().delete() + IP.objects.get(id=item_id).delete() + elif item=='cname': + CName.objects.get(id=item_id).delete() + elif item=='mx': + mx = MX.objects.get(id=item_id) + Name.objects.get(id=name_id).mxs.remove(mx) + elif item=='name': + Name.objects.get(id=item_id).cname_set.all().delete() + Name.objects.get(id=item_id).delete() + elif item=='nameserver': + nameserver = Nameserver.objects.get(id=item_id) + Zone.objects.get(id=zone_id).nameservers.remove(nameserver) + elif item=='zonemx': + mx = MX.objects.get(id=item_id) + Zone.objects.get(id=zone_id).mxs.remove(mx) + elif item=='address': + address = ZoneAddress.objects.get(id=item_id) + Zone.objects.get(id=zone_id).addresses.remove(address) + if item == 'cname' or item == 'mx' or item == 'name': + return HttpResponseRedirect('/hostbase/%s/dns/edit' % host_id) + elif item == 'nameserver' or item == 'zonemx' or item == 'address': + return HttpResponseRedirect('/hostbase/zones/%s/edit' % zone_id) + else: + return HttpResponseRedirect('/hostbase/%s/edit' % host_id) + else: + interface = None + ips = [] + names = [] + cnames = [] + mxs = [] + zonemx = None + nameserver = None + address = None + if item == 'interface': + interface = Interface.objects.get(id=item_id) + ips = interface.ip_set.all() + for ip in ips: + for name in ip.name_set.all(): + names.append((ip.id, name)) + for cname in name.cname_set.all(): + cnames.append((name.id, cname)) + for mx in name.mxs.all(): + mxs.append((name.id, mx)) + elif item=='ip': + ips = [IP.objects.get(id=item_id)] + for name in ips[0].name_set.all(): + names.append((ips[0].id, name)) + for cname in name.cname_set.all(): + cnames.append((name.id, cname)) + for mx in name.mxs.all(): + mxs.append((name.id, mx)) + elif item=='name': + names = [Name.objects.get(id=item_id)] + for cname in names[0].cname_set.all(): + cnames.append((names[0].id, cname)) + for mx in names[0].mxs.all(): + mxs.append((names[0].id, mx)) + elif item=='cname': + cnames = [CName.objects.get(id=item_id)] + elif item=='mx': + mxs = [MX.objects.get(id=item_id)] + elif item=='zonemx': + zonemx = MX.objects.get(id=item_id) + elif item=='nameserver': + nameserver = Nameserver.objects.get(id=item_id) + elif item=='address': + address = ZoneAddress.objects.get(id=item_id) + return render_to_response('confirm.html', + {'interface': interface, + 'ips': ips, + 'names': names, + 'cnames': cnames, + 'id': item_id, + 'type': item, + 'host_id': host_id, + 'mxs': mxs, + 'zonemx': zonemx, + 'nameserver': nameserver, + 'address': address, + 'zone_id': zone_id, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def dnsedit(request, host_id): + """Edits specific DNS information + Data is validated before committed to the database""" + text = '' + if 'sub' in request.GET: + hostdata = gethostdata(host_id, True) + for ip in hostdata['names']: + ipaddr = IP.objects.get(id=ip) + ipaddrstr = ipaddr.__str__() + for name in hostdata['cnames']: + for cname in hostdata['cnames'][name]: + if regex.host.match(request.POST['cname%d' % cname.id]): + text = do_log(text, 'cname', cname.cname, request.POST['cname%d' % cname.id]) + cname.cname = request.POST['cname%d' % cname.id] + cname.save() + for name in hostdata['mxs']: + for mx in hostdata['mxs'][name]: + if (mx.priority != request.POST['priority%d' % mx.id] and mx.mx != request.POST['mx%d' % mx.id]): + text = do_log(text, 'mx', ' '.join([str(mx.priority), str(mx.mx)]), + ' '.join([request.POST['priority%d' % mx.id], request.POST['mx%d' % mx.id]])) + nameobject = Name.objects.get(id=name) + nameobject.mxs.remove(mx) + newmx, created = MX.objects.get_or_create(priority=request.POST['priority%d' % mx.id], mx=request.POST['mx%d' % mx.id]) + if created: + newmx.save() + nameobject.mxs.add(newmx) + nameobject.save() + for name in hostdata['names'][ip]: + name.name = request.POST['name%d' % name.id] + name.dns_view = request.POST['dns_view%d' % name.id] + if (request.POST['%dcname' % name.id] and + regex.host.match(request.POST['%dcname' % name.id])): + cname = CName(name=name, + cname=request.POST['%dcname' % name.id]) + text = do_log(text, '*new*', 'cname', cname.cname) + cname.save() + if (request.POST['%dpriority' % name.id] and + request.POST['%dmx' % name.id]): + mx, created = MX.objects.get_or_create(priority=request.POST['%dpriority' % name.id], + mx=request.POST['%dmx' % name.id]) + if created: + mx.save() + text = do_log(text, '*new*', 'mx', + ' '.join([request.POST['%dpriority' % name.id], + request.POST['%dmx' % name.id]])) + name.mxs.add(mx) + name.save() + if request.POST['%sname' % ipaddrstr]: + name = Name(ip=ipaddr, + dns_view=request.POST['%sdns_view' % ipaddrstr], + name=request.POST['%sname' % ipaddrstr], only=False) + text = do_log(text, '*new*', 'name', name.name) + name.save() + if (request.POST['%scname' % ipaddrstr] and + regex.host.match(request.POST['%scname' % ipaddrstr])): + cname = CName(name=name, + cname=request.POST['%scname' % ipaddrstr]) + text = do_log(text, '*new*', 'cname', cname.cname) + cname.save() + if (request.POST['%smx' % ipaddrstr] and + request.POST['%spriority' % ipaddrstr]): + mx, created = MX.objects.get_or_create(priority=request.POST['%spriority' % ipaddrstr], + mx=request.POST['%smx' % ipaddrstr]) + if created: + mx.save() + text = do_log(text, '*new*', 'mx', + ' '.join([request.POST['%spriority' % ipaddrstr], request.POST['%smx' % ipaddrstr]])) + name.mxs.add(mx) + if text: + log = Log(hostname=hostdata['host'].hostname, log=text) + log.save() + return HttpResponseRedirect('/hostbase/%s/dns' % host_id) + else: + host = Host.objects.get(id=host_id) + ips = [] + info = [] + cnames = [] + mxs = [] + interfaces = host.interface_set.all() + for interface in host.interface_set.all(): + ips.extend(interface.ip_set.all()) + for ip in ips: + info.append([ip, ip.name_set.all()]) + for name in ip.name_set.all(): + cnames.extend(name.cname_set.all()) + mxs.append((name.id, name.mxs.all())) + return render_to_response('dnsedit.html', + {'host': host, + 'info': info, + 'cnames': cnames, + 'mxs': mxs, + 'request': request, + 'interfaces': interfaces, + 'DNS_CHOICES': Name.DNS_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def new(request): + """Function for creating a new host in hostbase + Data is validated before committed to the database""" + if 'sub' in request.GET: + try: + Host.objects.get(hostname=request.POST['hostname'].lower()) + return render_to_response('errors.html', + {'failures': ['%s already exists in hostbase' % request.POST['hostname']], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + except: + pass + if not validate(request, True): + if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']: + return render_to_response('errors.html', + {'failures': ['ip_addr: You must enter an ip address'], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + host = Host() + # this is the stuff that validate() should take care of + # examine the check boxes for any changes + host.outbound_smtp = 'outbound_smtp' in request.POST + for attrib in attribs: + if attrib in request.POST: + host.__dict__[attrib] = request.POST[attrib].lower() + if 'comments' in request.POST: + host.comments = request.POST['comments'] + if 'expiration_date' in request.POST: +# ymd = request.POST['expiration_date'].split("-") +# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2])) + host.__dict__['expiration_date'] = date(2000, 1, 1) + host.status = 'active' + host.save() + else: + return render_to_response('errors.html', + {'failures': validate(request, True), + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + + if request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new'], + dhcp = 'dhcp_new' in request.POST) + new_inter.save() + if request.POST['mac_addr_new'] and request.POST['ip_addr_new']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) +# Change all this things. Use a "post_save" signal handler for model Host to create all sociate models +# and use a generi view. + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new2'], + dhcp = 'dhcp_new2' in request.POST) + new_inter.save() + if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new2'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + host.save() + return HttpResponseRedirect('/hostbase/%s/' % host.id) + else: + return render_to_response('new.html', + {'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES, + 'CLASS_CHOICES': Host.CLASS_CHOICES, + 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES, + 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +def copy(request, host_id): + """Function for creating a new host in hostbase + Data is validated before committed to the database""" + if 'sub' in request.GET: + try: + Host.objects.get(hostname=request.POST['hostname'].lower()) + return render_to_response('errors.html', + {'failures': ['%s already exists in hostbase' % request.POST['hostname']], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + except: + pass + if not validate(request, True): + if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']: + return render_to_response('errors.html', + {'failures': ['ip_addr: You must enter an ip address'], + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + host = Host() + # this is the stuff that validate() should take care of + # examine the check boxes for any changes + host.outbound_smtp = 'outbound_smtp' in request.POST + for attrib in attribs: + if attrib in request.POST: + host.__dict__[attrib] = request.POST[attrib].lower() + if 'comments' in request.POST: + host.comments = request.POST['comments'] + if 'expiration_date' in request.POST: +# ymd = request.POST['expiration_date'].split("-") +# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2])) + host.__dict__['expiration_date'] = date(2000, 1, 1) + host.status = 'active' + host.save() + else: + return render_to_response('errors.html', + {'failures': validate(request, True), + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + + if request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new'], + dhcp = 'dhcp_new' in request.POST) + new_inter.save() + if request.POST['mac_addr_new'] and request.POST['ip_addr_new']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'), + hdwr_type = request.POST['hdwr_type_new2'], + dhcp = 'dhcp_new2' in request.POST) + new_inter.save() + if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']: + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']: + new_inter = Interface(host=host, + mac_addr="", + hdwr_type=request.POST['hdwr_type_new2'], + dhcp=False) + new_inter.save() + new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2']) + new_ip.save() + mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX) + if created: + mx.save() + new_name = "-".join([host.hostname.split(".")[0], + new_ip.ip_addr.split(".")[2]]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + new_name = "-".join([host.hostname.split(".")[0], + new_inter.hdwr_type]) + new_name += "." + host.hostname.split(".", 1)[1] + name = Name(ip=new_ip, name=new_name, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + name = Name(ip=new_ip, name=host.hostname, + dns_view='global', only=False) + name.save() + name.mxs.add(mx) + host.save() + return HttpResponseRedirect('/hostbase/%s/' % host.id) + else: + host = Host.objects.get(id=host_id) + return render_to_response('copy.html', + {'host': host, + 'TYPE_CHOICES': Interface.TYPE_CHOICES, + 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES, + 'CLASS_CHOICES': Host.CLASS_CHOICES, + 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES, + 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES, + 'logged_in': request.session.get('_auth_user_id', False)}, + context_instance = RequestContext(request)) + +# FIXME: delete all this things in a signal handler "pre_delete" +#def remove(request, host_id): +# host = Host.objects.get(id=host_id) +# if 'sub' in request: +# for interface in host.interface_set.all(): +# for ip in interface.ip_set.all(): +# for name in ip.name_set.all(): +# name.cname_set.all().delete() +# ip.name_set.all().delete() +# interface.ip_set.all().delete() +# interface.delete() +# host.delete() + +def validate(request, new=False, host_id=None): + """Function for checking form data""" + failures = [] + if (request.POST['expiration_date'] + and regex.date.match(request.POST['expiration_date'])): + try: + (year, month, day) = request.POST['expiration_date'].split("-") + date(int(year), int(month), int(day)) + except (ValueError): + failures.append('expiration_date') + elif request.POST['expiration_date']: + failures.append('expiration_date') + + if not (request.POST['hostname'] + and regex.host.match(request.POST['hostname'])): + failures.append('hostname') + +## if not regex.printq.match(request.POST['printq']) and request.POST['printq']: +## failures.append('printq') + +## if not regex.user.match(request.POST['primary_user']): +## failures.append('primary_user') + +## if (not regex.user.match(request.POST['administrator']) +## and request.POST['administrator']): +## failures.append('administrator') + +## if not (request.POST['location'] +## and regex.location.match(request.POST['location'])): +## failures.append('location') + + if new: + if (not regex.macaddr.match(request.POST['mac_addr_new']) + and request.POST['mac_addr_new']): + failures.append('mac_addr (#1)') + if ((request.POST['mac_addr_new'] or request.POST['ip_addr_new']) and + not 'hdwr_type_new' in request.REQUEST): + failures.append('hdwr_type (#1)') + if ((request.POST['mac_addr_new2'] or request.POST['ip_addr_new2']) and + not 'hdwr_type_new2' in request.REQUEST): + failures.append('hdwr_type (#2)') + + if (not regex.macaddr.match(request.POST['mac_addr_new2']) + and request.POST['mac_addr_new2']): + failures.append('mac_addr (#2)') + + if (not regex.ipaddr.match(request.POST['ip_addr_new']) + and request.POST['ip_addr_new']): + failures.append('ip_addr (#1)') + if (not regex. ipaddr.match(request.POST['ip_addr_new2']) + and request.POST['ip_addr_new2']): + failures.append('ip_addr (#2)') + + [failures.append('ip_addr (#1)') for number in + request.POST['ip_addr_new'].split(".") + if number.isdigit() and int(number) > 255 + and 'ip_addr (#1)' not in failures] + [failures.append('ip_addr (#2)') for number in + request.POST['ip_addr_new2'].split(".") + if number.isdigit() and int(number) > 255 + and 'ip_addr (#2)' not in failures] + + elif host_id: + interfaces = Interface.objects.filter(host=host_id) + for interface in interfaces: + if (not regex.macaddr.match(request.POST['mac_addr%d' % interface.id]) + and request.POST['mac_addr%d' % interface.id]): + failures.append('mac_addr (%s)' % request.POST['mac_addr%d' % interface.id]) + for ip in interface.ip_set.all(): + if not regex.ipaddr.match(request.POST['ip_addr%d' % ip.id]): + failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id]) + [failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id]) + for number in request.POST['ip_addr%d' % ip.id].split(".") + if (number.isdigit() and int(number) > 255 and + 'ip_addr (%s)' % request.POST['ip_addr%d' % ip.id] not in failures)] + if (request.POST['%dip_addr' % interface.id] + and not regex.ipaddr.match(request.POST['%dip_addr' % interface.id])): + failures.append('ip_addr (%s)' % request.POST['%dip_addr' % interface.id]) + if (request.POST['mac_addr_new'] + and not regex.macaddr.match(request.POST['mac_addr_new'])): + failures.append('mac_addr (%s)' % request.POST['mac_addr_new']) + if (request.POST['ip_addr_new'] + and not regex.ipaddr.match(request.POST['ip_addr_new'])): + failures.append('ip_addr (%s)' % request.POST['ip_addr_new']) + + if not failures: + return 0 + return failures + +def do_log(text, attribute, previous, new): + if previous != new: + text += "%-20s%-20s -> %s\n" % (attribute, previous, new) + return text + +## login required stuff +## uncomment the views below that you would like to restrict access to + +## uncomment the lines below this point to restrict access to pages that modify the database +## anonymous users can still view data in Hostbase + +edit = login_required(edit) +confirm = login_required(confirm) +dnsedit = login_required(dnsedit) +new = login_required(new) +copy = login_required(copy) +#remove = login_required(remove) +#zoneedit = login_required(zoneedit) +#zonenew = login_required(zonenew) + +## uncomment the lines below this point to restrict access to all of hostbase + +## search = login_required(search) +## look = login_required(look) +## dns = login_required(dns) +## zones = login_required(zones) +## zoneview = login_required(zoneview) + diff --git a/build/lib/Bcfg2/Server/Hostbase/ldapauth.py b/build/lib/Bcfg2/Server/Hostbase/ldapauth.py new file mode 100644 index 000000000..f2148181f --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/ldapauth.py @@ -0,0 +1,172 @@ +"""Checks with LDAP (ActiveDirectory) to see if the current user is an LDAP(AD) user, +and returns a subset of the user's profile that is needed by Argonne/CIS to +to set user level privleges in Django""" + +__revision__ = '$Revision: 2456 $' + +import os +import ldap + +class LDAPAUTHError(Exception): + """LDAPAUTHError is raised when somehting goes boom.""" + pass + +class ldapauth(object): + group_test = False + check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP'] + securitylevel = 0 + distinguishedName = None + sAMAccountName = None + telephoneNumber = None + title = None + memberOf = None + department = None #this will be a list + mail = None + extensionAttribute1 = None #badgenumber + badge_no = None + + def __init__(self,login,passwd): + """get username (if using ldap as auth the + apache env var REMOTE_USER should be used) + from username get user profile from AD/LDAP + """ + #p = self.user_profile(login,passwd) + d = self.user_dn(login) #success, distname + print d[1] + if d[0] == 'success': + pass + p = self.user_bind(d[1],passwd) + if p[0] == 'success': + #parse results + parsed = self.parse_results(p[2]) + print self.department + self.group_test = self.member_of() + securitylevel = self.security_level() + print "ACCESS LEVEL: " + str(securitylevel) + else: + raise LDAPAUTHError(p[2]) + else: + raise LDAPAUTHError(p[2]) + + def user_profile(self,login,passwd=None): + """NOT USED RIGHT NOW""" + ldap_login = "CN=%s" % login + svc_acct = os.environ['LDAP_SVC_ACCT_NAME'] + svc_pass = os.environ['LDAP_SVC_ACCT_PASS'] + #svc_acct = 'CN=%s,DC=anl,DC=gov' % login + #svc_pass = passwd + + search_pth = os.environ['LDAP_SEARCH_PTH'] + + try: + conn = ldap.initialize(os.environ['LDAP_URI']) + conn.bind(svc_acct,svc_pass,ldap.AUTH_SIMPLE) + result_id = conn.search(search_pth, + ldap.SCOPE_SUBTREE, + ldap_login,None) + result_type,result_data = conn.result(result_id,0) + return ('success','User profile found',result_data,) + except ldap.LDAPError,e: + #connection failed + return ('error','LDAP connect failed',e,) + + def user_bind(self,distinguishedName,passwd): + """Binds to LDAP Server""" + search_pth = os.environ['LDAP_SEARCH_PTH'] + try: + conn = ldap.initialize(os.environ['LDAP_URI']) + conn.bind(distinguishedName,passwd,ldap.AUTH_SIMPLE) + cn = distinguishedName.split(",") + result_id = conn.search(search_pth, + ldap.SCOPE_SUBTREE, + cn[0],None) + result_type,result_data = conn.result(result_id,0) + return ('success','User profile found',result_data,) + except ldap.LDAPError,e: + #connection failed + return ('error','LDAP connect failed',e,) + + def user_dn(self,cn): + """Uses Service Account to get distinguishedName""" + ldap_login = "CN=%s" % cn + svc_acct = os.environ['LDAP_SVC_ACCT_NAME'] + svc_pass = os.environ['LDAP_SVC_ACCT_PASS'] + search_pth = os.environ['LDAP_SEARCH_PTH'] + + try: + conn = ldap.initialize(os.environ['LDAP_URI']) + conn.bind(svc_acct,svc_pass,ldap.AUTH_SIMPLE) + result_id = conn.search(search_pth, + ldap.SCOPE_SUBTREE, + ldap_login,None) + result_type,result_data = conn.result(result_id,0) + raw_obj = result_data[0][1] + distinguishedName = raw_obj['distinguishedName'] + return ('success',distinguishedName[0],) + except ldap.LDAPError,e: + #connection failed + return ('error','LDAP connect failed',e,) + + def parse_results(self,user_obj): + """Clean up the huge ugly object handed to us in the LDAP query""" + #user_obj is a list formatted like this: + #[('LDAP_DN',{user_dict},),] + try: + raw_obj = user_obj[0][1] + self.memberOf = raw_obj['memberOf'] + self.sAMAccountName = raw_obj['sAMAccountName'][0] + self.distinguishedName = raw_obj['distinguishedName'][0] + self.telephoneNumber = raw_obj['telephoneNumber'][0] + self.title = raw_obj['title'][0] + self.department = raw_obj['department'][0] + self.mail = raw_obj['mail'][0] + self.badge_no = raw_obj['extensionAttribute1'][0] + self.email = raw_obj['extensionAttribute2'][0] + display_name = raw_obj['displayName'][0].split(",") + self.name_f = raw_obj['givenName'][0] + self.name_l = display_name[0] + self.is_staff = False + self.is_superuser = False + + return + except KeyError, e: + raise LDAPAUTHError("Portions of the LDAP User profile not present") + + def member_of(self): + """See if this user is in our group that is allowed to login""" + m = [g for g in self.memberOf if g == self.check_member_of] + #print m + if len(m) == 1: + return True + else: + return False + + def security_level(self): + level = self.securitylevel + + user = os.environ['LDAP_GROUP_USER'] + m = [g for g in self.memberOf if g == user] + if len(m) == 1: + if level < 1: + level = 1 + + cspr = os.environ['LDAP_GROUP_SECURITY_LOW'] + m = [g for g in self.memberOf if g == cspr] + if len(m) == 1: + if level < 2: + level = 2 + + cspo = os.environ['LDAP_GROUP_SECURITY_HIGH'] + m = [g for g in self.memberOf if g == cspo] + if len(m) == 1: + if level < 3: + level = 3 + + admin = os.environ['LDAP_GROUP_ADMIN'] + m = [g for g in self.memberOf if g == admin] + if len(m) == 1: + if level < 4: + level = 4 + + return level + diff --git a/build/lib/Bcfg2/Server/Hostbase/manage.py b/build/lib/Bcfg2/Server/Hostbase/manage.py new file mode 100644 index 000000000..5e78ea979 --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/manage.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +try: + import settings # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) + sys.exit(1) + +if __name__ == "__main__": + execute_manager(settings) diff --git a/build/lib/Bcfg2/Server/Hostbase/nisauth.py b/build/lib/Bcfg2/Server/Hostbase/nisauth.py new file mode 100644 index 000000000..9c7da8c0a --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/nisauth.py @@ -0,0 +1,42 @@ +import os +import crypt, nis +from Bcfg2.Server.Hostbase.settings import AUTHORIZED_GROUP + +"""Checks with NIS to see if the current user is in the support group""" + +__revision__ = "$Revision: $" + +class NISAUTHError(Exception): + """NISAUTHError is raised when somehting goes boom.""" + pass + +class nisauth(object): + group_test = False +# check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP'] + samAcctName = None + distinguishedName = None + sAMAccountName = None + telephoneNumber = None + title = None + memberOf = None + department = None #this will be a list + mail = None + extensionAttribute1 = None #badgenumber + badge_no = None + uid = None + + def __init__(self,login,passwd=None): + """get user profile from NIS""" + try: + p = nis.match(login, 'passwd.byname').split(":") + except: + raise NISAUTHError('username') + # check user password using crypt and 2 character salt from passwd file + if p[1] == crypt.crypt(passwd, p[1][:2]): + # check to see if user is in valid support groups + # will have to include these groups in a settings file eventually + if not login in nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[-1].split(',') and p[3] != nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[2]: + raise NISAUTHError('group') + self.uid = p[2] + else: + raise NISAUTHError('password') diff --git a/build/lib/Bcfg2/Server/Hostbase/regex.py b/build/lib/Bcfg2/Server/Hostbase/regex.py new file mode 100644 index 000000000..41cc0f6f0 --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/regex.py @@ -0,0 +1,6 @@ +import re + +date = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}$') +host = re.compile('^[a-z0-9-_]+(\.[a-z0-9-_]+)+$') +macaddr = re.compile('^[0-9abcdefABCDEF]{2}(:[0-9abcdefABCDEF]{2}){5}$|virtual') +ipaddr = re.compile('^[0-9]{1,3}(\.[0-9]{1,3}){3}$') diff --git a/build/lib/Bcfg2/Server/Hostbase/settings.py b/build/lib/Bcfg2/Server/Hostbase/settings.py new file mode 100644 index 000000000..a42fd5b2e --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/settings.py @@ -0,0 +1,142 @@ +from ConfigParser import ConfigParser, NoSectionError, NoOptionError +import os.path + +PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) + +c = ConfigParser() +#This needs to be configurable one day somehow +c.read(['./bcfg2.conf']) + +defaults = {'database_engine':'sqlite3', + 'database_name':'./dev.db', + 'database_user':'', + 'database_password':'', + 'database_host':'', + 'database_port':3306, + 'default_mx':'localhost', + 'priority':10, + 'authorized_group':'admins', + } + +if c.has_section('hostbase'): + options = dict(c.items('hostbase')) +else: + options = defaults + +# Django settings for Hostbase project. +DEBUG = True +TEMPLATE_DEBUG = DEBUG +ADMINS = ( + # ('Your Name', 'your_email@domain.com'), +) +MANAGERS = ADMINS + +# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'. +DATABASE_ENGINE = options['database_engine'] +# Or path to database file if using sqlite3. +DATABASE_NAME = options['database_name'] +# Not used with sqlite3. +DATABASE_USER = options['database_user'] +# Not used with sqlite3. +DATABASE_PASSWORD = options['database_password'] +# Set to empty string for localhost. Not used with sqlite3. +DATABASE_HOST = options['database_host'] +# Set to empty string for default. Not used with sqlite3. +DATABASE_PORT = int(options['database_port']) +# Local time zone for this installation. All choices can be found here: +# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone +try: + TIME_ZONE = c.get('statistics', 'time_zone') +except: + TIME_ZONE = None + +# enter the defauly MX record machines will get in Hostbase +# this setting may move elsewhere eventually +DEFAULT_MX = options['default_mx'] +PRIORITY = int(options['priority']) + +SESSION_EXPIRE_AT_BROWSER_CLOSE = True + +# Uncomment a backend below if you would like to use it for authentication +AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', + 'Bcfg2.Server.Hostbase.backends.NISBackend', + #'Bcfg2.Server.Hostbase.backends.LDAPBacken', + ) +# enter an NIS group name you'd like to give access to edit hostbase records +AUTHORIZED_GROUP = options['authorized_group'] + +#create login url area: +import django.contrib.auth +django.contrib.auth.LOGIN_URL = '/login' +# Absolute path to the directory that holds media. +# Example: "/home/media/media.lawrence.com/" +MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') +# Just for development +SERVE_MEDIA = DEBUG + +# Language code for this installation. All choices can be found here: +# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes +# http://blogs.law.harvard.edu/tech/stories/storyReader$15 +LANGUAGE_CODE = 'en-us' +SITE_ID = 1 +# URL that handles the media served from MEDIA_ROOT. +# Example: "http://media.lawrence.com" +MEDIA_URL = '/site_media/' +# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a +# trailing slash. +# Examples: "http://foo.com/media/", "/media/". +ADMIN_MEDIA_PREFIX = '/media/' +# Make this unique, and don't share it with anybody. +SECRET_KEY = '*%=fv=yh9zur&gvt4&*d#84o(cy^-*$ox-v1e9%32pzf2*qu#s' +# List of callables that know how to import templates from various sources. +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', +# 'django.template.loaders.eggs.load_template_source', +) + +TEMPLATE_CONTEXT_PROCESSORS = ( + "django.core.context_processors.auth", + "django.core.context_processors.debug", + "django.core.context_processors.i18n", + "django.core.context_processors.request", + "django.core.context_processors.media", +# Django development version. +# "django.core.context_processors.csrf", +) + + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.locale.LocaleMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.middleware.doc.XViewMiddleware', +) + +ROOT_URLCONF = 'Bcfg2.Server.Hostbase.urls' + +TEMPLATE_DIRS = ( + # Put strings here, like "/home/html/django_templates". + # Always use forward slashes, even on Windows. + '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates', + '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates', + '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/templates', + '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/templates', + '/usr/share/bcfg2/Hostbase/templates', + os.path.join(PROJECT_ROOT, 'templates'), + os.path.join(PROJECT_ROOT, 'hostbase/webtemplates'), +) + +INSTALLED_APPS = ( + 'django.contrib.admin', + 'django.contrib.admindocs', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.humanize', + 'Bcfg2.Server.Hostbase.hostbase', +) + +LOGIN_URL = '/login/' diff --git a/build/lib/Bcfg2/Server/Hostbase/urls.py b/build/lib/Bcfg2/Server/Hostbase/urls.py new file mode 100644 index 000000000..01fe97d4f --- /dev/null +++ b/build/lib/Bcfg2/Server/Hostbase/urls.py @@ -0,0 +1,27 @@ +from django.conf.urls.defaults import * +from django.conf import settings +from django.views.generic.simple import direct_to_template +from django.contrib import admin + + +admin.autodiscover() + + +urlpatterns = patterns('', + # Uncomment the admin/doc line below and add 'django.contrib.admindocs' + # to INSTALLED_APPS to enable admin documentation: + (r'^admin/doc/', include('django.contrib.admindocs.urls')), + + # Uncomment the next line to enable the admin: + (r'^admin/', include(admin.site.urls)), + + (r'^$',direct_to_template, {'template':'index.html'}, 'index'), + (r'^hostbase/', include('hostbase.urls')), + (r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), + (r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'logout.html'}) +) + +if settings.SERVE_MEDIA: + urlpatterns += patterns('', + (r'^site_media/(?P<path>.*)$', 'django.views.static.serve', + dict(document_root=settings.MEDIA_ROOT)),) diff --git a/build/lib/Bcfg2/Server/Plugin.py b/build/lib/Bcfg2/Server/Plugin.py new file mode 100644 index 000000000..95569e3ac --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugin.py @@ -0,0 +1,886 @@ +"""This module provides the baseclass for Bcfg2 Server Plugins.""" +__revision__ = '$Revision$' + +import copy +import logging +import lxml.etree +import os +import pickle +import posixpath +import re +import Queue +import threading + +from lxml.etree import XML, XMLSyntaxError + +import Bcfg2.Options + +# grab default metadata info from bcfg2.conf +opts = {'owner': Bcfg2.Options.MDATA_OWNER, + 'group': Bcfg2.Options.MDATA_GROUP, + 'important': Bcfg2.Options.MDATA_IMPORTANT, + 'perms': Bcfg2.Options.MDATA_PERMS, + 'paranoid': Bcfg2.Options.MDATA_PARANOID} +mdata_setup = Bcfg2.Options.OptionParser(opts) +mdata_setup.parse([]) +del mdata_setup['args'] + +logger = logging.getLogger('Bcfg2.Plugin') + +default_file_metadata = mdata_setup + +info_regex = re.compile( \ + 'encoding:(\s)*(?P<encoding>\w+)|' + + 'group:(\s)*(?P<group>\S+)|' + + 'important:(\s)*(?P<important>\S+)|' + + 'mtime:(\s)*(?P<mtime>\w+)|' + + 'owner:(\s)*(?P<owner>\S+)|' + + 'paranoid:(\s)*(?P<paranoid>\S+)|' + + 'perms:(\s)*(?P<perms>\w+)|') + +class PluginInitError(Exception): + """Error raised in cases of Plugin initialization errors.""" + pass + +class PluginExecutionError(Exception): + """Error raised in case of Plugin execution errors.""" + pass + +class Plugin(object): + """This is the base class for all Bcfg2 Server plugins. + Several attributes must be defined in the subclass: + name : the name of the plugin + __version__ : a version string + __author__ : the author/contact for the plugin + + Plugins can provide three basic types of functionality: + - Structure creation (overloading BuildStructures) + - Configuration entry binding (overloading HandlesEntry, or loads the Entries table) + - Data collection (overloading GetProbes/ReceiveData) + """ + name = 'Plugin' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + __rmi__ = ['toggle_debug'] + experimental = False + deprecated = False + conflicts = [] + + def __init__(self, core, datastore): + object.__init__(self) + self.Entries = {} + self.core = core + self.data = "%s/%s" % (datastore, self.name) + self.logger = logging.getLogger('Bcfg2.Plugins.%s' % (self.name)) + self.running = True + self.debug_flag = False + + def toggle_debug(self): + self.debug_flag = not self.debug_flag + + def debug_log(self, message, flag=None): + if (flag is None) and self.debug_flag or flag: + self.logger.error(message) + + @classmethod + def init_repo(cls, repo): + path = "%s/%s" % (repo, cls.name) + os.makedirs(path) + + def shutdown(self): + self.running = False + +class Generator(object): + """Generator plugins contribute to literal client configurations.""" + def HandlesEntry(self, entry, metadata): + """This is the slow path method for routing configuration binding requests.""" + return False + + def HandleEntry(self, entry, metadata): + """This is the slow-path handler for configuration entry binding.""" + raise PluginExecutionError + +class Structure(object): + """Structure Plugins contribute to abstract client configurations.""" + def BuildStructures(self, metadata): + """Return a list of abstract goal structures for client.""" + raise PluginExecutionError + +class Metadata(object): + """Signal metadata capabilities for this plugin""" + def add_client(self, client_name, attribs): + """Add client.""" + pass + def remove_client(self, client_name): + """Remove client.""" + pass + def viz(self, hosts, bundles, key, colors): + """Create viz str for viz admin mode.""" + pass + + def get_initial_metadata(self, client_name): + raise PluginExecutionError + + def merge_additional_data(self, imd, source, groups, data): + raise PluginExecutionError + +class Connector(object): + """Connector Plugins augment client metadata instances.""" + def get_additional_groups(self, metadata): + """Determine additional groups for metadata.""" + return list() + + def get_additional_data(self, metadata): + """Determine additional data for metadata instances.""" + return dict() + +class Probing(object): + """Signal probe capability for this plugin.""" + def GetProbes(self, _): + """Return a set of probes for execution on client.""" + return [] + + def ReceiveData(self, _, dummy): + """Receive probe results pertaining to client.""" + pass + +class Statistics(object): + """Signal statistics handling capability.""" + def process_statistics(self, client, xdata): + pass + +class ThreadedStatistics(Statistics, + threading.Thread): + """Threaded statistics handling capability.""" + def __init__(self, core, datastore): + Statistics.__init__(self) + threading.Thread.__init__(self) + # Event from the core signaling an exit + self.terminate = core.terminate + self.work_queue = Queue.Queue(100000) + self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__) + self.daemon = True + self.start() + + def save(self): + """Save any pending data to a file.""" + pending_data = [] + try: + while not self.work_queue.empty(): + (metadata, data) = self.work_queue.get_nowait() + try: + pending_data.append( ( metadata.hostname, lxml.etree.tostring(data) ) ) + except: + self.logger.warning("Dropping interaction for %s" % metadata.hostname) + except Queue.Empty: + pass + + try: + savefile = open(self.pending_file, 'w') + pickle.dump(pending_data, savefile) + savefile.close() + self.logger.info("Saved pending %s data" % self.__class__.__name__) + except: + self.logger.warning("Failed to save pending data") + + def load(self): + """Load any pending data to a file.""" + if not os.path.exists(self.pending_file): + return True + pending_data = [] + try: + savefile = open(self.pending_file, 'r') + pending_data = pickle.load(savefile) + savefile.close() + except Exception, e: + self.logger.warning("Failed to load pending data: %s" % e) + for (pmetadata, pdata) in pending_data: + # check that shutdown wasnt called early + if self.terminate.isSet(): + return False + + try: + while True: + try: + metadata = self.core.build_metadata(pmetadata) + break + except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError: + pass + + self.terminate.wait(5) + if self.terminate.isSet(): + return False + + self.work_queue.put_nowait( (metadata, lxml.etree.fromstring(pdata)) ) + except Queue.Full: + self.logger.warning("Queue.Full: Failed to load queue data") + break + except lxml.etree.LxmlError, lxml_error: + self.logger.error("Unable to load save interaction: %s" % lxml_error) + except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError: + self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata) + try: + os.unlink(self.pending_file) + except: + self.logger.error("Failed to unlink save file: %s" % self.pending_file) + self.logger.info("Loaded pending %s data" % self.__class__.__name__) + return True + + def run(self): + if not self.load(): + return + while not self.terminate.isSet(): + try: + (xdata, client) = self.work_queue.get(block=True, timeout=2) + except Queue.Empty: + continue + except Exception, e: + self.logger.error("ThreadedStatistics: %s" % e) + continue + self.handle_statistic(xdata, client) + if not self.work_queue.empty(): + self.save() + + def process_statistics(self, metadata, data): + warned = False + try: + self.work_queue.put_nowait((metadata, copy.deepcopy(data))) + warned = False + except Queue.Full: + if not warned: + self.logger.warning("%s: Queue is full. Dropping interactions." % self.__class__.__name__) + warned = True + + def handle_statistics(self, metadata, data): + """Handle stats here.""" + pass + +class PullSource(object): + def GetExtra(self, client): + return [] + + def GetCurrentEntry(self, client, e_type, e_name): + raise PluginExecutionError + +class PullTarget(object): + def AcceptChoices(self, entry, metadata): + raise PluginExecutionError + + def AcceptPullData(self, specific, new_entry, verbose): + """This is the null per-plugin implementation + of bcfg2-admin pull.""" + raise PluginExecutionError + +class Decision(object): + """Signal decision handling capability.""" + def GetDecisions(self, metadata, mode): + return [] + +class ValidationError(Exception): + pass + +class StructureValidator(object): + """Validate/modify goal structures.""" + def validate_structures(self, metadata, structures): + raise ValidationError, "not implemented" + +class GoalValidator(object): + """Validate/modify configuration goals.""" + def validate_goals(self, metadata, goals): + raise ValidationError, "not implemented" + +class Version(object): + """Interact with various version control systems.""" + def get_revision(self): + return [] + def commit_data(self): + pass + +# the rest of the file contains classes for coherent file caching + +class FileBacked(object): + """This object caches file data in memory. + HandleEvent is called whenever fam registers an event. + Index can parse the data into member data as required. + This object is meant to be used as a part of DirectoryBacked. + """ + + def __init__(self, name): + object.__init__(self) + self.data = '' + self.name = name + + def HandleEvent(self, event=None): + """Read file upon update.""" + if event and event.code2str() not in ['exists', 'changed', 'created']: + return + try: + self.data = file(self.name).read() + self.Index() + except IOError: + logger.error("Failed to read file %s" % (self.name)) + + def Index(self): + """Update local data structures based on current file state""" + pass + +class DirectoryBacked(object): + """This object is a coherent cache for a filesystem hierarchy of files.""" + __child__ = FileBacked + patterns = re.compile('.*') + + def __init__(self, name, fam): + object.__init__(self) + self.name = name + self.fam = fam + self.entries = {} + self.inventory = False + fam.AddMonitor(name, self) + + def __getitem__(self, key): + return self.entries[key] + + def __iter__(self): + return self.entries.iteritems() + + def AddEntry(self, name): + """Add new entry to data structures upon file creation.""" + if name == '': + logger.info("got add for empty name") + elif name in self.entries: + self.entries[name].HandleEvent() + else: + if ((name[-1] == '~') or + (name[:2] == '.#') or + (name[-4:] == '.swp') or + (name in ['SCCS', '.svn'])): + return + if not self.patterns.match(name): + return + self.entries[name] = self.__child__('%s/%s' % (self.name, name)) + self.entries[name].HandleEvent() + + def HandleEvent(self, event): + """Propagate fam events to underlying objects.""" + action = event.code2str() + if event.filename == '': + logger.info("Got event for blank filename") + return + if action == 'exists': + if event.filename != self.name: + self.AddEntry(event.filename) + elif action == 'created': + self.AddEntry(event.filename) + elif action == 'changed': + if event.filename in self.entries: + self.entries[event.filename].HandleEvent(event) + elif action == 'deleted': + if event.filename in self.entries: + del self.entries[event.filename] + elif action in ['endExist']: + pass + else: + print "Got unknown event %s %s %s" % (event.requestID, + event.code2str(), + event.filename) + +class XMLFileBacked(FileBacked): + """ + This object is a coherent cache for an XML file to be used as a + part of DirectoryBacked. + """ + __identifier__ = 'name' + + def __init__(self, filename): + self.label = "dummy" + self.entries = [] + FileBacked.__init__(self, filename) + + def Index(self): + """Build local data structures.""" + try: + xdata = XML(self.data) + except XMLSyntaxError: + logger.error("Failed to parse %s"%(self.name)) + return + self.label = xdata.attrib[self.__identifier__] + self.entries = xdata.getchildren() + + def __iter__(self): + return iter(self.entries) + +class SingleXMLFileBacked(XMLFileBacked): + """This object is a coherent cache for an independent XML file.""" + def __init__(self, filename, fam): + XMLFileBacked.__init__(self, filename) + fam.AddMonitor(filename, self) + +class StructFile(XMLFileBacked): + """This file contains a set of structure file formatting logic.""" + def __init__(self, name): + XMLFileBacked.__init__(self, name) + self.fragments = {} + + def Index(self): + """Build internal data structures.""" + try: + xdata = lxml.etree.XML(self.data) + except lxml.etree.XMLSyntaxError: + logger.error("Failed to parse file %s" % self.name) + return + self.fragments = {} + work = {lambda x:True: xdata.getchildren()} + while work: + (predicate, worklist) = work.popitem() + self.fragments[predicate] = [item for item in worklist if item.tag != 'Group' + and not isinstance(item, lxml.etree._Comment)] + for group in [item for item in worklist if item.tag == 'Group']: + # if only python had forceable early-binding + if group.get('negate', 'false') in ['true', 'True']: + cmd = "lambda x:'%s' not in x.groups and predicate(x)" + else: + cmd = "lambda x:'%s' in x.groups and predicate(x)" + + newpred = eval(cmd % (group.get('name')), {'predicate':predicate}) + work[newpred] = group.getchildren() + + def Match(self, metadata): + """Return matching fragments of independent.""" + matching = [frag for (pred, frag) in self.fragments.iteritems() if pred(metadata)] + if matching: + return reduce(lambda x, y:x+y, matching) + logger.error("File %s got null match" % (self.name)) + return [] + +class INode: + """ + LNodes provide lists of things available at a particular + group intersection. + """ + raw = {'Client':"lambda x:'%s' == x.hostname and predicate(x)", + 'Group':"lambda x:'%s' in x.groups and predicate(x)"} + nraw = {'Client':"lambda x:'%s' != x.hostname and predicate(x)", + 'Group':"lambda x:'%s' not in x.groups and predicate(x)"} + containers = ['Group', 'Client'] + ignore = [] + + def __init__(self, data, idict, parent=None): + self.data = data + self.contents = {} + if parent == None: + self.predicate = lambda x:True + else: + predicate = parent.predicate + if data.get('negate', 'false') in ['true', 'True']: + psrc = self.nraw + else: + psrc = self.raw + if data.tag in psrc.keys(): + self.predicate = eval(psrc[data.tag] % (data.get('name')), + {'predicate':predicate}) + else: + raise Exception + mytype = self.__class__ + self.children = [] + for item in data.getchildren(): + if item.tag in self.ignore: + continue + elif item.tag in self.containers: + self.children.append(mytype(item, idict, self)) + else: + try: + self.contents[item.tag][item.get('name')] = item.attrib + except KeyError: + self.contents[item.tag] = {item.get('name'):item.attrib} + if item.text: + self.contents[item.tag]['__text__'] = item.text + try: + idict[item.tag].append(item.get('name')) + except KeyError: + idict[item.tag] = [item.get('name')] + + def Match(self, metadata, data): + """Return a dictionary of package mappings.""" + if self.predicate(metadata): + for key in self.contents: + try: + data[key].update(self.contents[key]) + except: + data[key] = {} + data[key].update(self.contents[key]) + for child in self.children: + child.Match(metadata, data) + +class XMLSrc(XMLFileBacked): + """XMLSrc files contain a LNode hierarchy that returns matching entries.""" + __node__ = INode + __cacheobj__ = dict + + def __init__(self, filename, noprio=False): + XMLFileBacked.__init__(self, filename) + self.items = {} + self.cache = None + self.pnode = None + self.priority = -1 + self.noprio = noprio + + def HandleEvent(self, _=None): + """Read file upon update.""" + try: + data = file(self.name).read() + except IOError: + logger.error("Failed to read file %s" % (self.name)) + return + self.items = {} + try: + xdata = lxml.etree.XML(data) + except lxml.etree.XMLSyntaxError: + logger.error("Failed to parse file %s" % (self.name)) + return + self.pnode = self.__node__(xdata, self.items) + self.cache = None + try: + self.priority = int(xdata.get('priority')) + except (ValueError, TypeError): + if not self.noprio: + logger.error("Got bogus priority %s for file %s" % (xdata.get('priority'), self.name)) + del xdata, data + + def Cache(self, metadata): + """Build a package dict for a given host.""" + if self.cache == None or self.cache[0] != metadata: + cache = (metadata, self.__cacheobj__()) + if self.pnode == None: + logger.error("Cache method called early for %s; forcing data load" % (self.name)) + self.HandleEvent() + return + self.pnode.Match(metadata, cache[1]) + self.cache = cache + +class XMLDirectoryBacked(DirectoryBacked): + """Directorybacked for *.xml.""" + patterns = re.compile('.*\.xml') + +class PrioDir(Plugin, Generator, XMLDirectoryBacked): + """This is a generator that handles package assignments.""" + name = 'PrioDir' + __child__ = XMLSrc + + def __init__(self, core, datastore): + Plugin.__init__(self, core, datastore) + Generator.__init__(self) + try: + XMLDirectoryBacked.__init__(self, self.data, self.core.fam) + except OSError: + self.logger.error("Failed to load %s indices" % (self.name)) + raise PluginInitError + + def HandleEvent(self, event): + """Handle events and update dispatch table.""" + XMLDirectoryBacked.HandleEvent(self, event) + self.Entries = {} + for src in self.entries.values(): + for itype, children in src.items.iteritems(): + for child in children: + try: + self.Entries[itype][child] = self.BindEntry + except KeyError: + self.Entries[itype] = {child: self.BindEntry} + + def BindEntry(self, entry, metadata): + """Check package lists of package entries.""" + [src.Cache(metadata) for src in self.entries.values()] + name = entry.get('name') + if not src.cache: + self.logger.error("Called before data loaded") + raise PluginExecutionError + matching = [src for src in self.entries.values() + if src.cache and entry.tag in src.cache[1] + and src.cache[1][entry.tag].has_key(name)] + if len(matching) == 0: + raise PluginExecutionError + elif len(matching) == 1: + index = 0 + else: + prio = [int(src.priority) for src in matching] + if prio.count(max(prio)) > 1: + self.logger.error("Found conflicting sources with " + "same priority for %s, %s %s" % + (metadata.hostname, + entry.tag.lower(), entry.get('name'))) + self.logger.error([item.name for item in matching]) + self.logger.error("Priority was %s" % max(prio)) + raise PluginExecutionError + index = prio.index(max(prio)) + + data = matching[index].cache[1][entry.tag][name] + if '__text__' in data: + entry.text = data['__text__'] + if '__children__' in data: + [entry.append(copy.deepcopy(item)) for item in data['__children__']] + [entry.attrib.__setitem__(key, data[key]) for key in data.keys() \ + if not key.startswith('__')] + +# new unified EntrySet backend + +class SpecificityError(Exception): + """Thrown in case of filename parse failure.""" + pass + +class Specificity: + + def __init__(self, all=False, group=False, hostname=False, prio=0, delta=False): + self.hostname = hostname + self.all = all + self.group = group + self.prio = prio + self.delta = delta + + def matches(self, metadata): + return self.all or \ + self.hostname == metadata.hostname or \ + self.group in metadata.groups + + def __cmp__(self, other): + """Sort most to least specific.""" + if self.all: + return 1 + if self.group: + if other.hostname: + return 1 + if other.group and other.prio > self.prio: + return 1 + if other.group and other.prio == self.prio: + return 0 + return -1 + + def more_specific(self, other): + """Test if self is more specific than other.""" + if self.all: + True + elif self.group: + if other.hostname: + return True + elif other.group and other.prio > self.prio: + return True + return False + +class SpecificData(object): + def __init__(self, name, specific, encoding): + self.name = name + self.specific = specific + + def handle_event(self, event): + if event.code2str() == 'deleted': + return + try: + self.data = open(self.name).read() + except: + logger.error("Failed to read file %s" % self.name) + +class EntrySet: + """Entry sets deal with the host- and group-specific entries.""" + ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px]))$") + def __init__(self, basename, path, entry_type, encoding): + self.path = path + self.entry_type = entry_type + self.entries = {} + self.metadata = default_file_metadata.copy() + self.infoxml = None + self.encoding = encoding + pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % basename + pattern += '(G(?P<prio>\d+)_(?P<group>\S+))))?$' + self.specific = re.compile(pattern) + + def get_matching(self, metadata): + return [item for item in self.entries.values() \ + if item.specific.matches(metadata)] + + def handle_event(self, event): + """Handle FAM events for the TemplateSet.""" + action = event.code2str() + + if event.filename in ['info', 'info.xml', ':info']: + if action in ['exists', 'created', 'changed']: + self.update_metadata(event) + elif action == 'deleted': + self.reset_metadata(event) + return + + if action in ['exists', 'created']: + self.entry_init(event) + else: + if event.filename not in self.entries: + return + if action == 'changed': + self.entries[event.filename].handle_event(event) + elif action == 'deleted': + del self.entries[event.filename] + + def entry_init(self, event): + """Handle template and info file creation.""" + if event.filename in self.entries: + logger.warn("Got duplicate add for %s" % event.filename) + else: + fpath = "%s/%s" % (self.path, event.filename) + try: + spec = self.specificity_from_filename(event.filename) + except SpecificityError: + if not self.ignore.match(event.filename): + logger.error("Could not process filename %s; ignoring" % fpath) + return + self.entries[event.filename] = self.entry_type(fpath, + spec, self.encoding) + self.entries[event.filename].handle_event(event) + + def specificity_from_filename(self, fname): + """Construct a specificity instance from a filename and regex.""" + data = self.specific.match(fname) + if not data: + raise SpecificityError(fname) + kwargs = {} + if data.group('hostname'): + kwargs['hostname'] = data.group('hostname') + elif data.group('group'): + kwargs['group'] = data.group('group') + kwargs['prio'] = int(data.group('prio')) + else: + kwargs['all'] = True + if 'delta' in data.groupdict(): + kwargs['delta'] = data.group('delta') + return Specificity(**kwargs) + + def update_metadata(self, event): + """Process info and info.xml files for the templates.""" + fpath = "%s/%s" % (self.path, event.filename) + if event.filename == 'info.xml': + if not self.infoxml: + self.infoxml = XMLSrc(fpath, True) + self.infoxml.HandleEvent(event) + elif event.filename in [':info', 'info']: + for line in open(fpath).readlines(): + match = info_regex.match(line) + if not match: + logger.warning("Failed to match line: %s"%line) + continue + else: + mgd = match.groupdict() + for key, value in mgd.iteritems(): + if value: + self.metadata[key] = value + if len(self.metadata['perms']) == 3: + self.metadata['perms'] = "0%s" % \ + (self.metadata['perms']) + + def reset_metadata(self, event): + """Reset metadata to defaults if info or info.xml removed.""" + if event.filename == 'info.xml': + self.infoxml = None + elif event.filename == 'info': + self.metadata = default_file_metadata.copy() + + def group_sortfunc(self, x, y): + """sort groups by their priority""" + return cmp(x.specific.prio, y.specific.prio) + + def bind_info_to_entry(self, entry, metadata): + if not self.infoxml: + for key in self.metadata: + entry.set(key, self.metadata[key]) + else: + mdata = {} + self.infoxml.pnode.Match(metadata, mdata) + if 'Info' not in mdata: + logger.error("Failed to set metadata for file %s" % \ + (entry.get('name'))) + raise PluginExecutionError + [entry.attrib.__setitem__(key, value) \ + for (key, value) in mdata['Info'][None].iteritems()] + + def bind_entry(self, entry, metadata): + """Return the appropriate interpreted template from the set of available templates.""" + self.bind_info_to_entry(entry, metadata) + matching = self.get_matching(metadata) + + hspec = [ent for ent in matching if ent.specific.hostname] + if hspec: + return hspec[0].bind_entry(entry, metadata) + + gspec = [ent for ent in matching if ent.specific.group] + if gspec: + gspec.sort(self.group_sortfunc) + return gspec[-1].bind_entry(entry, metadata) + + aspec = [ent for ent in matching if ent.specific.all] + if aspec: + return aspec[0].bind_entry(entry, metadata) + + raise PluginExecutionError + +class GroupSpool(Plugin, Generator): + """Unified interface for handling group-specific data (e.g. .G## files).""" + name = 'GroupSpool' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + filename_pattern = "" + es_child_cls = object + es_cls = EntrySet + + def __init__(self, core, datastore): + Plugin.__init__(self, core, datastore) + Generator.__init__(self) + if self.data[-1] == '/': + self.data = self.data[:-1] + self.Entries['Path'] = {} + self.entries = {} + self.handles = {} + self.AddDirectoryMonitor('') + self.encoding = core.encoding + + def HandleEvent(self, event): + """Unified FAM event handler for DirShadow.""" + action = event.code2str() + if event.filename[0] == '/': + return + epath = "".join([self.data, self.handles[event.requestID], + event.filename]) + if posixpath.isdir(epath): + ident = self.handles[event.requestID] + event.filename + else: + ident = self.handles[event.requestID][:-1] + + if action in ['exists', 'created']: + if posixpath.isdir(epath): + self.AddDirectoryMonitor(epath[len(self.data):]) + if ident not in self.entries and posixpath.isfile(epath): + dirpath = "".join([self.data, ident]) + self.entries[ident] = self.es_cls(self.filename_pattern, + dirpath, + self.es_child_cls, + self.encoding) + self.Entries['Path'][ident] = self.entries[ident].bind_entry + if not posixpath.isdir(epath): + # do not pass through directory events + self.entries[ident].handle_event(event) + if action == 'changed': + self.entries[ident].handle_event(event) + elif action == 'deleted': + fbase = self.handles[event.requestID] + event.filename + if fbase in self.entries: + # a directory was deleted + del self.entries[fbase] + del self.Entries['Path'][fbase] + else: + self.entries[ident].handle_event(event) + + def AddDirectoryMonitor(self, relative): + """Add new directory to FAM structures.""" + if not relative.endswith('/'): + relative += '/' + name = self.data + relative + if relative not in self.handles.values(): + if not posixpath.isdir(name): + print "Failed to open directory %s" % (name) + return + reqid = self.core.fam.AddMonitor(name, self) + self.handles[reqid] = relative diff --git a/build/lib/Bcfg2/Server/Plugins/Account.py b/build/lib/Bcfg2/Server/Plugins/Account.py new file mode 100644 index 000000000..e3ea58761 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Account.py @@ -0,0 +1,93 @@ +"""This handles authentication setup.""" +__revision__ = '$Revision$' + +import Bcfg2.Server.Plugin + +class Account(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Generator): + """This module generates account config files, + based on an internal data repo: + static.(passwd|group|limits.conf) -> static entries + dyn.(passwd|group) -> dynamic entries (usually acquired from yp or somesuch) + useraccess -> users to be granted login access on some hosts + superusers -> users to be granted root privs on all hosts + rootlike -> users to be granted root privs on some hosts + + """ + name = 'Account' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Generator.__init__(self) + self.Entries = {'ConfigFile':{'/etc/passwd':self.from_yp_cb, + '/etc/group':self.from_yp_cb, + '/etc/security/limits.conf':self.gen_limits_cb, + '/root/.ssh/authorized_keys':self.gen_root_keys_cb, + '/etc/sudoers':self.gen_sudoers}} + try: + self.repository = Bcfg2.Server.Plugin.DirectoryBacked(self.data, self.core.fam) + except: + self.logger.error("Failed to load repos: %s, %s" % \ + (self.data, "%s/ssh" % (self.data))) + raise Bcfg2.Server.Plugin.PluginInitError + + def from_yp_cb(self, entry, metadata): + """Build password file from cached yp data.""" + fname = entry.attrib['name'].split('/')[-1] + entry.text = self.repository.entries["static.%s" % (fname)].data + entry.text += self.repository.entries["dyn.%s" % (fname)].data + perms = {'owner':'root', 'group':'root', 'perms':'0644'} + [entry.attrib.__setitem__(key, value) for (key, value) in \ + perms.iteritems()] + + def gen_limits_cb(self, entry, metadata): + """Build limits entries based on current ACLs.""" + entry.text = self.repository.entries["static.limits.conf"].data + superusers = self.repository.entries["superusers"].data.split() + useraccess = [line.split(':') for line in \ + self.repository.entries["useraccess"].data.split()] + users = [user for (user, host) in \ + useraccess if host == metadata.hostname.split('.')[0]] + perms = {'owner':'root', 'group':'root', 'perms':'0600'} + [entry.attrib.__setitem__(key, value) for (key, value) in \ + perms.iteritems()] + entry.text += "".join(["%s hard maxlogins 1024\n" % uname for uname in superusers + users]) + if "*" not in users: + entry.text += "* hard maxlogins 0\n" + + def gen_root_keys_cb(self, entry, metadata): + """Build root authorized keys file based on current ACLs.""" + superusers = self.repository.entries['superusers'].data.split() + try: + rootlike = [line.split(':', 1) for line in \ + self.repository.entries['rootlike'].data.split()] + superusers += [user for (user, host) in rootlike \ + if host == metadata.hostname.split('.')[0]] + except: + pass + rdata = self.repository.entries + entry.text = "".join([rdata["%s.key" % user].data for user \ + in superusers if \ + ("%s.key" % user) in rdata]) + perms = {'owner':'root', 'group':'root', 'perms':'0600'} + [entry.attrib.__setitem__(key, value) for (key, value) \ + in perms.iteritems()] + + def gen_sudoers(self, entry, metadata): + """Build root authorized keys file based on current ACLs.""" + superusers = self.repository.entries['superusers'].data.split() + try: + rootlike = [line.split(':', 1) for line in \ + self.repository.entries['rootlike'].data.split()] + superusers += [user for (user, host) in rootlike \ + if host == metadata.hostname.split('.')[0]] + except: + pass + entry.text = self.repository.entries['static.sudoers'].data + entry.text += "".join(["%s ALL=(ALL) ALL\n" % uname \ + for uname in superusers]) + perms = {'owner':'root', 'group':'root', 'perms':'0440'} + [entry.attrib.__setitem__(key, value) for (key, value) \ + in perms.iteritems()] diff --git a/build/lib/Bcfg2/Server/Plugins/BB.py b/build/lib/Bcfg2/Server/Plugins/BB.py new file mode 100644 index 000000000..137142b66 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/BB.py @@ -0,0 +1,84 @@ +import lxml.etree +import Bcfg2.Server.Plugin +import glob +import os +import socket + +#manage boot symlinks + #add statistics check to do build->boot mods + +#map profiles: first array is not empty we replace the -p with a determined profile. +logger = Bcfg2.Server.Plugin.logger + +class BBfile(Bcfg2.Server.Plugin.XMLFileBacked): + """Class for bb files.""" + def Index(self): + """Build data into an xml object.""" + + try: + self.data = lxml.etree.XML(self.data) + except lxml.etree.XMLSyntaxError: + Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name) + return + self.tftppath = self.data.get('tftp', '/tftpboot') + self.macs = {} + self.users = {} + self.actions = {} + self.bootlinks = [] + + for node in self.data.findall('Node'): + iface = node.find('Interface') + if iface != None: + mac = "01-%s" % (iface.get('mac'.replace(':','-').lower())) + self.actions[node.get('name')] = node.get('action') + self.bootlinks.append((mac, node.get('action'))) + try: + ip = socket.gethostbyname(node.get('name')) + except: + logger.error("failed host resolution for %s" % node.get('name')) + + self.macs[node.get('name')] = (iface.get('mac'), ip) + else: + logger.error("%s" % lxml.etree.tostring(node)) + self.users[node.get('name')] = node.get('user',"").split(':') + + def enforce_bootlinks(self): + for mac, target in self.bootlinks: + path = self.tftppath + '/' + mac + if not os.path.islink(path): + logger.error("Boot file %s not a link" % path) + if target != os.readlink(path): + try: + os.unlink(path) + os.symlink(target, path) + except: + logger.error("Failed to modify link %s" % path) + +class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): + __child__ = BBfile + + +class BB(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + """The BB plugin maps users to machines and metadata to machines.""" + name = 'BB' + version = '$Revision$' + deprecated = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.store = BBDirectoryBacked(self.data, core.fam) + + def get_additional_data(self, metadata): + + users = {} + for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []): + pubkeys = [] + for fname in glob.glob('/home/%s/.ssh/*.pub'%user): + pubkeys.append(open(fname).read()) + + users[user] = pubkeys + + return dict([('users', users), + ('macs', self.store.entries['bb.xml'].macs)]) diff --git a/build/lib/Bcfg2/Server/Plugins/Base.py b/build/lib/Bcfg2/Server/Plugins/Base.py new file mode 100644 index 000000000..8e5ca1cd9 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Base.py @@ -0,0 +1,38 @@ +"""This module sets up a base list of configuration entries.""" +__revision__ = '$Revision$' + +import Bcfg2.Server.Plugin +import copy +import lxml.etree + +class Base(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure, + Bcfg2.Server.Plugin.XMLDirectoryBacked): + """This Structure is good for the pile of independent configs + needed for most actual systems. + """ + name = 'Base' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + __child__ = Bcfg2.Server.Plugin.StructFile + + """Base creates independent clauses based on client metadata.""" + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + try: + Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, + self.data, + self.core.fam) + except OSError: + self.logger.error("Failed to load Base repository") + raise Bcfg2.Server.Plugin.PluginInitError + + def BuildStructures(self, metadata): + """Build structures for client described by metadata.""" + ret = lxml.etree.Element("Independent", version='2.0') + fragments = reduce(lambda x, y: x+y, + [base.Match(metadata) for base + in self.entries.values()], []) + [ret.append(copy.deepcopy(frag)) for frag in fragments] + return [ret] diff --git a/build/lib/Bcfg2/Server/Plugins/Bundler.py b/build/lib/Bcfg2/Server/Plugins/Bundler.py new file mode 100644 index 000000000..47cd7e2c4 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Bundler.py @@ -0,0 +1,76 @@ +"""This provides bundle clauses with translation functionality.""" +__revision__ = '$Revision$' + +import copy +import lxml.etree +import re + +import Bcfg2.Server.Plugin + +try: + import genshi.template + import genshi.template.base + import Bcfg2.Server.Plugins.SGenshi + have_genshi = True +except: + have_genshi = False + +class BundleFile(Bcfg2.Server.Plugin.StructFile): + def get_xml_value(self, metadata): + bundlename = self.name.split('/')[-1][:-4] + bundle = lxml.etree.Element('Bundle', name=bundlename) + [bundle.append(copy.deepcopy(item)) for item in self.Match(metadata)] + return bundle + +class Bundler(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure, + Bcfg2.Server.Plugin.XMLDirectoryBacked): + """The bundler creates dependent clauses based on the bundle/translation scheme from Bcfg1.""" + name = 'Bundler' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + patterns = re.compile('^(?P<name>.*)\.(xml|genshi)$') + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + self.encoding = core.encoding + self.__child__ = self.template_dispatch + try: + Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data, self.core.fam) + except OSError: + self.logger.error("Failed to load Bundle repository") + raise Bcfg2.Server.Plugin.PluginInitError + + def template_dispatch(self, name): + if name.endswith('.xml'): + return BundleFile(name) + elif name.endswith('.genshi'): + if have_genshi: + spec = Bcfg2.Server.Plugin.Specificity() + return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name, + spec, + self.encoding) + + def BuildStructures(self, metadata): + """Build all structures for client (metadata).""" + bundleset = [] + for bundlename in metadata.bundles: + entries = [item for (key, item) in self.entries.iteritems() if \ + self.patterns.match(key).group('name') == bundlename] + if len(entries) == 0: + continue + elif len(entries) == 1: + try: + bundleset.append(entries[0].get_xml_value(metadata)) + except genshi.template.base.TemplateError, t: + self.logger.error("Bundler: Failed to template genshi bundle %s" \ + % (bundlename)) + self.logger.error(t) + except: + self.logger.error("Bundler: Unexpected bundler error for %s" \ + % (bundlename), exc_info=1) + else: + self.logger.error("Got multiple matches for bundle %s" \ + % (bundlename)) + return bundleset diff --git a/build/lib/Bcfg2/Server/Plugins/Bzr.py b/build/lib/Bcfg2/Server/Plugins/Bzr.py new file mode 100644 index 000000000..a9a5eb814 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Bzr.py @@ -0,0 +1,36 @@ +import Bcfg2.Server.Plugin +from bzrlib.workingtree import WorkingTree +from bzrlib import errors + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Bzr') + +class Bzr(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Bzr is a version plugin for dealing with Bcfg2 repos.""" + name = 'Bzr' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # Read revision from bcfg2 repo + revision = self.get_revision() + + logger.debug("Initialized Bazaar plugin with directory = %(dir)s at revision = %(rev)s" % {'dir': datastore, 'rev': revision}) + + def get_revision(self): + """Read Bazaar revision information for the Bcfg2 repository.""" + try: + working_tree = WorkingTree.open(self.datastore) + revision = str(working_tree.branch.revno()) + if working_tree.has_changes(working_tree.basis_tree()) or working_tree.unknowns(): + revision += "+" + except errors.NotBranchError: + logger.error("Failed to read Bazaar branch; disabling Bazaar support") + raise Bcfg2.Server.Plugin.PluginInitError + return revision diff --git a/build/lib/Bcfg2/Server/Plugins/Cfg.py b/build/lib/Bcfg2/Server/Plugins/Cfg.py new file mode 100644 index 000000000..dd1e792ec --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Cfg.py @@ -0,0 +1,165 @@ +"""This module implements a config file repository.""" +__revision__ = '$Revision$' + +import binascii +import logging +import lxml +import os +import re +import tempfile + +import Bcfg2.Server.Plugin + +logger = logging.getLogger('Bcfg2.Plugins.Cfg') + +def process_delta(data, delta): + if not delta.specific.delta: + return data + if delta.specific.delta == 'cat': + datalines = data.split('\n') + for line in delta.data.split('\n'): + if not line: + continue + if line[0] == '+': + datalines.append(line[1:]) + elif line[0] == '-': + if line[1:] in datalines: + datalines.remove(line[1:]) + return "\n".join(datalines) + elif delta.specific.delta == 'diff': + basehandle, basename = tempfile.mkstemp() + basefile = open(basename, 'w') + basefile.write(data) + basefile.close() + os.close(basehandle) + dhandle, dname = tempfile.mkstemp() + dfile = open(dname, 'w') + dfile.write(delta.data) + dfile.close() + os.close(dhandle) + ret = os.system("patch -uf %s < %s > /dev/null 2>&1" \ + % (basefile.name, dfile.name)) + output = open(basefile.name, 'r').read() + [os.unlink(fname) for fname in [basefile.name, dfile.name]] + if ret >> 8 != 0: + raise Bcfg2.Server.Plugin.PluginExecutionError, ('delta', delta) + return output + +class CfgMatcher: + def __init__(self, fname): + name = re.escape(fname) + self.basefile_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|.G(?P<prio>\d+)_(?P<group>\S+))$' % name) + self.delta_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|\\.G(?P<prio>\d+)_(?P<group>\S+))\\.(?P<delta>(cat|diff))$' % name) + self.cat_count = fname.count(".cat") + self.diff_count = fname.count(".diff") + + def match(self, fname): + if fname.count(".cat") > self.cat_count \ + or fname.count('.diff') > self.diff_count: + return self.delta_reg.match(fname) + return self.basefile_reg.match(fname) + +class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): + def __init__(self, basename, path, entry_type, encoding): + Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, + entry_type, encoding) + self.specific = CfgMatcher(path.split('/')[-1]) + + def sort_by_specific(self, one, other): + return cmp(one.specific, other.specific) + + def get_pertinent_entries(self, metadata): + '''return a list of all entries pertinent to a client => [base, delta1, delta2]''' + matching = [ent for ent in self.entries.values() if \ + ent.specific.matches(metadata)] + matching.sort(self.sort_by_specific) + non_delta = [matching.index(m) for m in matching if not m.specific.delta] + if not non_delta: + raise Bcfg2.Server.Plugin.PluginExecutionError + base = min(non_delta) + used = matching[:base+1] + used.reverse() + return used + + def bind_entry(self, entry, metadata): + self.bind_info_to_entry(entry, metadata) + used = self.get_pertinent_entries(metadata) + basefile = used.pop(0) + data = basefile.data + if entry.tag == 'Path': + entry.set('type', 'file') + for delta in used: + data = data.strip() + data = process_delta(data, delta) + if used: + data += '\n' + if entry.get('encoding') == 'base64': + entry.text = binascii.b2a_base64(data) + else: + entry.text = unicode(data, self.encoding) + if entry.text in ['', None]: + entry.set('empty', 'true') + + def list_accept_choices(self, metadata): + '''return a list of candidate pull locations''' + used = self.get_pertinent_entries(metadata) + ret = [] + if used: + ret.append(used[0].specific) + if not ret[0].hostname: + ret.append(Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)) + return ret + + def build_filename(self, specific): + bfname = self.path + '/' + self.path.split('/')[-1] + if specific.all: + return bfname + elif specific.group: + return "%s.G%d_%s" % (bfname, specific.prio, specific.group) + elif specific.hostname: + return "%s.H_%s" % (bfname, specific.hostname) + + def write_update(self, specific, new_entry, log): + if 'text' in new_entry: + name = self.build_filename(specific) + open(name, 'w').write(new_entry['text']) + if log: + logger.info("Wrote file %s" % name) + badattr = [attr for attr in ['owner', 'group', 'perms'] if attr in new_entry] + if badattr: + metadata_updates = {} + metadata_updates.update(self.metadata) + for attr in badattr: + metadata_updates[attr] = new_entry.get(attr) + if self.infoxml: + infoxml = lxml.etree.Element('FileInfo') + infotag = lxml.etree.SubElement(infoxml, 'Info') + [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \ + for attr in metadata_updates] + ofile = open(self.path + "/info.xml","w") + ofile.write(lxml.etree.tostring(infoxml, pretty_print=True)) + ofile.close() + if log: + logger.info("Wrote file %s" % (self.path + "/info.xml")) + else: + infofile = open(self.path + '/:info', 'w') + for x in metadata_updates.iteritems(): + infofile.write("%s: %s\n" % x) + infofile.close() + if log: + logger.info("Wrote file %s" % infofile.name) + +class Cfg(Bcfg2.Server.Plugin.GroupSpool, + Bcfg2.Server.Plugin.PullTarget): + """This generator in the configuration file repository for Bcfg2.""" + name = 'Cfg' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + es_cls = CfgEntrySet + es_child_cls = Bcfg2.Server.Plugin.SpecificData + + def AcceptChoices(self, entry, metadata): + return self.entries[entry.get('name')].list_accept_choices(metadata) + + def AcceptPullData(self, specific, new_entry, log): + return self.entries[new_entry.get('name')].write_update(specific, new_entry, log) diff --git a/build/lib/Bcfg2/Server/Plugins/Cvs.py b/build/lib/Bcfg2/Server/Plugins/Cvs.py new file mode 100644 index 000000000..ea898c023 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Cvs.py @@ -0,0 +1,47 @@ +import os +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Cvs') + +class Cvs(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """CVS is a version plugin for dealing with Bcfg2 repository.""" + name = 'Cvs' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # path to cvs directory for Bcfg2 repo + cvs_dir = "%s/CVSROOT" % datastore + + # Read revision from Bcfg2 repo + if os.path.isdir(cvs_dir): + self.get_revision() + else: + logger.error("%s is not a directory" % cvs_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized cvs plugin with cvs directory = %s" % cvs_dir) + + def get_revision(self): + """Read cvs revision information for the Bcfg2 repository.""" + try: + data = Popen("env LC_ALL=C cvs log", + shell=True, + cwd=self.datastore, + stdout=PIPE).stdout.readlines() + revision = data[3].strip('\n') + except IndexError: + logger.error("Failed to read cvs log; disabling cvs support") + logger.error('''Ran command "cvs log %s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError + diff --git a/build/lib/Bcfg2/Server/Plugins/DBStats.py b/build/lib/Bcfg2/Server/Plugins/DBStats.py new file mode 100644 index 000000000..2712cd45f --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/DBStats.py @@ -0,0 +1,110 @@ +import binascii +import difflib +import logging +import lxml.etree +import platform +import time + +try: + from django.core.exceptions import MultipleObjectsReturned +except ImportError: + pass + +import Bcfg2.Server.Plugin +import Bcfg2.Server.Reports.importscript +from Bcfg2.Server.Reports.reports.models import Client +import Bcfg2.Server.Reports.settings +from Bcfg2.Server.Reports.updatefix import update_database +# for debugging output only +logger = logging.getLogger('Bcfg2.Plugins.DBStats') + +class DBStats(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.ThreadedStatistics, + Bcfg2.Server.Plugin.PullSource): + name = 'DBStats' + __version__ = '$Id$' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore) + Bcfg2.Server.Plugin.PullSource.__init__(self) + self.cpath = "%s/Metadata/clients.xml" % datastore + self.core = core + logger.debug("Searching for new models to add to the statistics database") + try: + update_database() + except Exception, inst: + logger.debug(str(inst)) + logger.debug(str(type(inst))) + + def handle_statistic(self, metadata, data): + newstats = data.find("Statistics") + newstats.set('time', time.asctime(time.localtime())) + # ick + data = lxml.etree.tostring(newstats) + ndx = lxml.etree.XML(data) + e = lxml.etree.Element('Node', name=metadata.hostname) + e.append(ndx) + container = lxml.etree.Element("ConfigStatistics") + container.append(e) + + # FIXME need to build a metadata interface to expose a list of clients + start = time.time() + for i in [1, 2, 3]: + try: + Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clientdata, + container, + 0, + logger, + True, + platform.node()) + logger.info("Imported data for %s in %s seconds" \ + % (metadata.hostname, time.time() - start)) + return + except MultipleObjectsReturned, e: + logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \ + (metadata.hostname, e)) + logger.error("DBStats: Data is inconsistent") + break + except: + logger.error("DBStats: Failed to write to db (lock); retrying", + exc_info=1) + logger.error("DBStats: Retry limit failed for %s; aborting operation" \ + % metadata.hostname) + + def GetExtra(self, client): + c_inst = Client.objects.filter(name=client)[0] + return [(a.entry.kind, a.entry.name) for a in + c_inst.current_interaction.extra()] + + def GetCurrentEntry(self, client, e_type, e_name): + try: + c_inst = Client.objects.filter(name=client)[0] + except IndexError: + self.logger.error("Unknown client: %s" % client) + raise Bcfg2.Server.Plugin.PluginExecutionError + result = c_inst.current_interaction.bad().filter(entry__kind=e_type, + entry__name=e_name) + if not result: + raise Bcfg2.Server.Plugin.PluginExecutionError + entry = result[0] + ret = [] + data = ('owner', 'group', 'perms') + for t in data: + if getattr(entry.reason, "current_%s" % t) == '': + ret.append(getattr(entry.reason, t)) + else: + ret.append(getattr(entry.reason, "current_%s" % t)) + + if entry.reason.current_diff != '': + if entry.reason.is_binary: + ret.append(binascii.a2b_base64(entry.reason.current_diff)) + else: + ret.append('\n'.join(difflib.restore(\ + entry.reason.current_diff.split('\n'), 1))) + elif entry.reason.is_binary: + # If len is zero the object was too large to store + raise Bcfg2.Server.Plugin.PluginExecutionError + else: + ret.append(None) + return ret diff --git a/build/lib/Bcfg2/Server/Plugins/Darcs.py b/build/lib/Bcfg2/Server/Plugins/Darcs.py new file mode 100644 index 000000000..eb34a52c4 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Darcs.py @@ -0,0 +1,49 @@ +import os +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Darcs') + +class Darcs(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Darcs is a version plugin for dealing with Bcfg2 repos.""" + name = 'Darcs' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # path to darcs directory for bcfg2 repo + darcs_dir = "%s/_darcs" % datastore + + # Read changeset from bcfg2 repo + if os.path.isdir(darcs_dir): + self.get_revision() + else: + logger.error("%s is not present." % darcs_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized Darcs plugin with darcs directory = %s" % darcs_dir) + + def get_revision(self): + """Read Darcs changeset information for the Bcfg2 repository.""" + try: + data = Popen("env LC_ALL=C darcs changes", + shell=True, + cwd=self.datastore, + stdout=PIPE).stdout.readlines() + revision = data[0].strip('\n') + except: + logger.error("Failed to read darcs repository; disabling Darcs support") + logger.error('''Ran command "darcs changes" from directory "%s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError + return revision + diff --git a/build/lib/Bcfg2/Server/Plugins/Decisions.py b/build/lib/Bcfg2/Server/Plugins/Decisions.py new file mode 100644 index 000000000..1f9525a0e --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Decisions.py @@ -0,0 +1,64 @@ +import logging +import lxml.etree +import Bcfg2.Server.Plugin +logger = logging.getLogger('Bcfg2.Plugins.Decisions') + +class DecisionFile(Bcfg2.Server.Plugin.SpecificData): + def handle_event(self, event): + Bcfg2.Server.Plugin.SpecificData.handle_event(self, event) + self.contents = lxml.etree.XML(self.data) + + def get_decisions(self): + return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')] + +class DecisionSet(Bcfg2.Server.Plugin.EntrySet): + def __init__(self, path, fam, encoding): + """Container for decision specification files. + + Arguments: + - `path`: repository path + - `fam`: reference to the file monitor + - `encoding`: XML character encoding + + """ + pattern = '(white|black)list' + Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \ + DecisionFile, encoding) + try: + fam.AddMonitor(path, self) + except OSError, e: + logger.error('Adding filemonitor for %s failed. ' + 'Make sure directory exists' % path) + raise Bcfg2.Server.Plugin.PluginInitError(e) + + def HandleEvent(self, event): + if event.filename != self.path: + return self.handle_event(event) + + def GetDecisions(self, metadata, mode): + ret = [] + candidates = [c for c in self.get_matching(metadata) + if c.name.split('/')[-1].startswith(mode)] + for c in candidates: + ret += c.get_decisions() + return ret + +class Decisions(DecisionSet, + Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Decision): + name = 'Decisions' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + """Decisions plugins + + Arguments: + - `core`: Bcfg2.Core instance + - `datastore`: File repository location + + """ + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Decision.__init__(self) + DecisionSet.__init__(self, self.data, core.fam, core.encoding) + diff --git a/build/lib/Bcfg2/Server/Plugins/Deps.py b/build/lib/Bcfg2/Server/Plugins/Deps.py new file mode 100644 index 000000000..088f8cdad --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Deps.py @@ -0,0 +1,103 @@ +"""This plugin provides automatic dependency handling.""" +__revision__ = '$Revision$' + +import lxml.etree + +import Bcfg2.Server.Plugin + +class DNode(Bcfg2.Server.Plugin.INode): + """DNode provides supports for single predicate types for dependencies.""" + raw = {'Group':"lambda x:'%s' in x.groups and predicate(x)"} + containers = ['Group'] + + def __init__(self, data, idict, parent=None): + self.data = data + self.contents = {} + if parent == None: + self.predicate = lambda x:True + else: + predicate = parent.predicate + if data.tag in self.raw.keys(): + self.predicate = eval(self.raw[data.tag] % (data.get('name')), {'predicate':predicate}) + else: + raise Exception + mytype = self.__class__ + self.children = [] + for item in data.getchildren(): + if item.tag in self.containers: + self.children.append(mytype(item, idict, self)) + else: + data = [(child.tag, child.get('name')) for child in item.getchildren()] + try: + self.contents[item.tag][item.get('name')] = data + except KeyError: + self.contents[item.tag] = {item.get('name'):data} + +class DepXMLSrc(Bcfg2.Server.Plugin.XMLSrc): + __node__ = DNode + +class Deps(Bcfg2.Server.Plugin.PrioDir, + Bcfg2.Server.Plugin.StructureValidator): + name = 'Deps' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + __child__ = DepXMLSrc + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore) + Bcfg2.Server.Plugin.StructureValidator.__init__(self) + self.cache = {} + + def HandleEvent(self, event): + self.cache = {} + Bcfg2.Server.Plugin.PrioDir.HandleEvent(self, event) + + def validate_structures(self, metadata, structures): + entries = [] + prereqs = [] + for structure in structures: + for entry in structure.getchildren(): + if (entry.tag, entry.get('name')) not in entries \ + and not isinstance(entry, lxml.etree._Comment): + entries.append((entry.tag, entry.get('name'))) + entries.sort() + entries = tuple(entries) + gdata = list(metadata.groups) + gdata.sort() + gdata = tuple(gdata) + if (entries, gdata) in self.cache: + prereqs = self.cache[(entries, gdata)] + else: + [src.Cache(metadata) for src in self.entries.values()] + + toexamine = list(entries[:]) + while toexamine: + entry = toexamine.pop() + matching = [src for src in self.entries.values() + if src.cache and entry[0] in src.cache[1] + and entry[1] in src.cache[1][entry[0]]] + if len(matching) > 1: + prio = [int(src.priority) for src in matching] + if prio.count(max(prio)) > 1: + self.logger.error("Found conflicting %s sources with same priority for %s, pkg %s" % + (entry[0].lower(), metadata.hostname, entry[1])) + raise Bcfg2.Server.Plugin.PluginExecutionError + index = prio.index(max(prio)) + matching = [matching[index]] + + if not matching: + continue + elif len(matching) == 1: + for prq in matching[0].cache[1][entry[0]][entry[1]]: + if prq not in prereqs and prq not in entries: + toexamine.append(prq) + prereqs.append(prq) + self.cache[(entries, gdata)] = prereqs + + newstruct = lxml.etree.Element("Independent") + for tag, name in prereqs: + try: + lxml.etree.SubElement(newstruct, tag, name=name) + except: + self.logger.error("Failed to add dep entry for %s:%s" % (tag, name)) + structures.append(newstruct) diff --git a/build/lib/Bcfg2/Server/Plugins/Editor.py b/build/lib/Bcfg2/Server/Plugins/Editor.py new file mode 100644 index 000000000..bfd4d6e93 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Editor.py @@ -0,0 +1,73 @@ +import Bcfg2.Server.Plugin +import re +import lxml.etree + +def linesub(pattern, repl, filestring): + """Substitutes instances of pattern with repl in filestring.""" + if filestring == None: + filestring = '' + output = list() + fileread = filestring.split('\n') + for line in fileread: + output.append(re.sub(pattern, repl, filestring)) + return '\n'.join(output) + +class EditDirectives(Bcfg2.Server.Plugin.SpecificData): + """This object handles the editing directives.""" + def ProcessDirectives(self, input): + """Processes a list of edit directives on input.""" + temp = input + for directive in self.data.split('\n'): + directive = directive.split(',') + temp = linesub(directive[0], directive[1], temp) + return temp + +class EditEntrySet(Bcfg2.Server.Plugin.EntrySet): + def __init__(self, basename, path, entry_type, encoding): + self.ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|%s\.H_.*)$" %path.split('/')[-1]) + Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, entry_type, encoding) + self.inputs = dict() + + def bind_entry(self, entry, metadata): + client = metadata.hostname + filename = entry.get('name') + permdata = {'owner':'root', 'group':'root'} + permdata['perms'] = '0644' + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + entry.text = self.entries['edits'].ProcessDirectives(self.get_client_data(client)) + if not entry.text: + entry.set('empty', 'true') + try: + f = open('%s/%s.H_%s' %(self.path, filename.split('/')[-1], client), 'w') + f.write(entry.text) + f.close() + except: + pass + + def get_client_data(self, client): + return self.inputs[client] + + +class Editor(Bcfg2.Server.Plugin.GroupSpool, + Bcfg2.Server.Plugin.Probing): + name = 'Editor' + __version__ = '$Id$' + __author__ = 'bcfg2-dev@mcs.anl.gov' + filename_pattern = 'edits' + es_child_cls = EditDirectives + es_cls = EditEntrySet + + def GetProbes(self, _): + '''Return a set of probes for execution on client''' + probelist = list() + for name in self.entries.keys(): + probe = lxml.etree.Element('probe') + probe.set('name', name) + probe.set('source', "Editor") + probe.text = "cat %s" % name + probelist.append(probe) + return probelist + + def ReceiveData(self, client, datalist): + for data in datalist: + self.entries[data.get('name')].inputs[client.hostname] = data.text diff --git a/build/lib/Bcfg2/Server/Plugins/Fossil.py b/build/lib/Bcfg2/Server/Plugins/Fossil.py new file mode 100644 index 000000000..57d427673 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Fossil.py @@ -0,0 +1,52 @@ +import os +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Fossil') + +class Fossil(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Fossil is a version plugin for dealing with Bcfg2 repos.""" + name = 'Fossil' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # path to fossil file for bcfg2 repo + fossil_file = "%s/_FOSSIL_" % datastore + + # Read revision from bcfg2 repo + if os.path.isfile(fossil_file): + revision = self.get_revision() + elif not os.path.isdir(datastore): + logger.error("%s is not a directory" % datastore) + raise Bcfg2.Server.Plugin.PluginInitError + else: + logger.error("%s is not a file" % fossil_file) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized Fossil.py plugin with %(ffile)s at revision %(frev)s" \ + % {'ffile': fossil_file, 'frev': revision}) + + def get_revision(self): + """Read fossil revision information for the Bcfg2 repository.""" + try: + data = Popen("env LC_ALL=C fossil info", + shell=True, + cwd=self.datastore, + stdout=PIPE).stdout.readlines() + revline = [line.split(': ')[1].strip() for line in data if \ + line.split(': ')[0].strip() == 'checkout'][-1] + revision = revline.split(' ')[0] + except IndexError: + logger.error("Failed to read fossil info; disabling fossil support") + logger.error('''Ran command "fossil info" from directory "%s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError + return revision diff --git a/build/lib/Bcfg2/Server/Plugins/Git.py b/build/lib/Bcfg2/Server/Plugins/Git.py new file mode 100644 index 000000000..aaeac12ae --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Git.py @@ -0,0 +1,45 @@ +"""The Git plugin provides a revision interface for Bcfg2 repos using git.""" + +import os +from dulwich.repo import Repo +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Git') + + +class Git(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Git is a version plugin for dealing with Bcfg2 repos.""" + name = 'Git' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # path to git directory for bcfg2 repo + git_dir = "%s/.git" % datastore + + # Read revision from bcfg2 repo + if os.path.isdir(git_dir): + self.get_revision() + else: + logger.error("%s is not a directory" % git_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized git plugin with git directory %s" % git_dir) + + def get_revision(self): + """Read git revision information for the Bcfg2 repository.""" + try: + repo = Repo(self.datastore) + revision = repo.head() + except: + logger.error("Failed to read git repository; disabling git support") + raise Bcfg2.Server.Plugin.PluginInitError + return revision diff --git a/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py new file mode 100644 index 000000000..3801a6a08 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py @@ -0,0 +1,117 @@ +import lxml.etree +import re + +import Bcfg2.Server.Plugin + +class PackedDigitRange(object): + def __init__(self, digit_range): + self.sparse = list() + self.ranges = list() + for item in digit_range.split(','): + if '-' in item: + self.ranges.append(tuple([int(x) for x in item.split('-')])) + else: + self.sparse.append(int(item)) + + def includes(self, other): + iother = int(other) + if iother in self.sparse: + return True + for (start, end) in self.ranges: + if iother in xrange(start, end+1): + return True + return False + +class PatternMap(object): + range_finder = '\\[\\[[\d\-,]+\\]\\]' + def __init__(self, pattern, rangestr, groups): + self.pattern = pattern + self.rangestr = rangestr + self.groups = groups + if pattern != None: + self.re = re.compile(pattern) + self.process = self.process_re + elif rangestr != None: + self.process = self.process_range + self.re = re.compile('^' + re.subn(self.range_finder, '(\d+)', rangestr)[0]) + dmatcher = re.compile(re.subn(self.range_finder, '\\[\\[([\d\-,]+)\\]\\]', rangestr)[0]) + self.dranges = [PackedDigitRange(x) for x in dmatcher.match(rangestr).groups()] + else: + raise Exception + + def process_range(self, name): + match = self.re.match(name) + if not match: + return None + digits = match.groups() + for i in range(len(digits)): + if not self.dranges[i].includes(digits[i]): + return None + return self.groups + + def process_re(self, name): + match = self.re.match(name) + if not match: + return None + ret = list() + sub = match.groups() + for group in self.groups: + newg = group + for idx in range(len(sub)): + newg = newg.replace('$%s' % (idx+1), sub[idx]) + ret.append(newg) + return ret + +class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked): + def __init__(self, filename, fam): + Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam) + self.patterns = [] + + def Index(self): + self.patterns = [] + try: + parsed = lxml.etree.XML(self.data) + except: + Bcfg2.Server.Plugin.logger.error("Failed to read file %s" % self.name) + return + for entry in parsed.findall('GroupPattern'): + try: + pat = None + rng = None + if entry.find('NamePattern') is not None: + pat = entry.find('NamePattern').text + if entry.find('NameRange') is not None: + rng = entry.find('NameRange').text + groups = [g.text for g in entry.findall('Group')] + self.patterns.append(PatternMap(pat, rng, groups)) + except: + Bcfg2.Server.Plugin.logger.error(\ + "GroupPatterns: Failed to initialize pattern %s" % \ + (entry.get('pattern'))) + + def process_patterns(self, hostname): + ret = [] + for pattern in self.patterns: + try: + gn = pattern.process(hostname) + if gn is not None: + ret.extend(gn) + except: + Bcfg2.Server.Plugin.logger.error(\ + "GroupPatterns: Failed to process pattern %s for %s" % \ + (pattern.pattern, hostname), exc_info=1) + return ret + +class GroupPatterns(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + name = "GroupPatterns" + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.config = PatternFile(self.data + '/config.xml', + core.fam) + + def get_additional_groups(self, metadata): + return self.config.process_patterns(metadata.hostname) diff --git a/build/lib/Bcfg2/Server/Plugins/Guppy.py b/build/lib/Bcfg2/Server/Plugins/Guppy.py new file mode 100644 index 000000000..b217378d6 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Guppy.py @@ -0,0 +1,63 @@ +""" +This plugin is used to trace memory leaks within the bcfg2-server +process using Guppy. By default the remote debugger is started +when this plugin is enabled. The debugger can be shutoff in a running +process using "bcfg2-admin xcmd Guppy.Disable" and reenabled using +"bcfg2-admin xcmd Guppy.Enable". + +To attach the console run: + +python -c "from guppy import hpy;hpy().monitor()" + +For example: + +# python -c "from guppy import hpy;hpy().monitor()" +<Monitor> +*** Connection 1 opened *** +<Monitor> lc +CID PID ARGV + 1 25063 ['/usr/sbin/bcfg2-server', '-D', '/var/run/bcfg2-server.pid'] +<Monitor> sc 1 +Remote connection 1. To return to Monitor, type <Ctrl-C> or .<RETURN> +<Annex> int +Remote interactive console. To return to Annex, type '-'. +>>> hp.heap() +... + + +""" +import re +import Bcfg2.Server.Plugin + +class Guppy(Bcfg2.Server.Plugin.Plugin): + """Guppy is a debugging plugin to help trace memory leaks""" + name = 'Guppy' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + experimental = True + __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable','Disable'] + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + + self.Enable() + + def Enable(self): + """Enable remote debugging""" + try: + from guppy.heapy import Remote + Remote.on() + except: + self.logger.error("Failed to create Heapy context") + raise Bcfg2.Server.Plugin.PluginInitError + + def Disable(self): + """Disable remote debugging""" + try: + from guppy.heapy import Remote + Remote.off() + except: + self.logger.error("Failed to disable Heapy") + raise Bcfg2.Server.Plugin.PluginInitError + diff --git a/build/lib/Bcfg2/Server/Plugins/Hg.py b/build/lib/Bcfg2/Server/Plugins/Hg.py new file mode 100644 index 000000000..3f2864a1c --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Hg.py @@ -0,0 +1,47 @@ +import os +from mercurial import ui, hg +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Mercurial') + +class Hg(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Mercurial is a version plugin for dealing with Bcfg2 repository.""" + name = 'Mercurial' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # path to hg directory for Bcfg2 repo + hg_dir = "%s/.hg" % datastore + + # Read changeset from bcfg2 repo + if os.path.isdir(hg_dir): + self.get_revision() + else: + logger.error("%s is not present." % hg_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized hg plugin with hg directory = %s" % hg_dir) + + def get_revision(self): + """Read hg revision information for the Bcfg2 repository.""" + try: + repo_path = "%s/" % self.datastore + repo = hg.repository(ui.ui(), repo_path) + tip = repo.changelog.tip() + revision = repo.changelog.rev(tip) + except: + logger.error("Failed to read hg repository; disabling mercurial support") + raise Bcfg2.Server.Plugin.PluginInitError + return revision + diff --git a/build/lib/Bcfg2/Server/Plugins/Hostbase.py b/build/lib/Bcfg2/Server/Plugins/Hostbase.py new file mode 100644 index 000000000..65992596d --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Hostbase.py @@ -0,0 +1,585 @@ +'''This file provides the Hostbase plugin. It manages dns/dhcp/nis host information''' +__revision__ = '$Revision$' + +import os +os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings' +from lxml.etree import Element, SubElement +import Bcfg2.Server.Plugin +from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError +from time import strftime +from sets import Set +from django.template import Context, loader +from django.db import connection +import re +import cStringIO + +class Hostbase(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure, + Bcfg2.Server.Plugin.Generator): + """The Hostbase plugin handles host/network info.""" + name = 'Hostbase' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + filepath = '/my/adm/hostbase/files/bind' + + def __init__(self, core, datastore): + + self.ready = False + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + Bcfg2.Server.Plugin.Generator.__init__(self) + files = ['zone.tmpl', 'reversesoa.tmpl', 'named.tmpl', 'reverseappend.tmpl', + 'dhcpd.tmpl', 'hosts.tmpl', 'hostsappend.tmpl'] + self.filedata = {} + self.dnsservers = [] + self.dhcpservers = [] + self.templates = {'zone':loader.get_template('zone.tmpl'), + 'reversesoa':loader.get_template('reversesoa.tmpl'), + 'named':loader.get_template('named.tmpl'), + 'namedviews':loader.get_template('namedviews.tmpl'), + 'reverseapp':loader.get_template('reverseappend.tmpl'), + 'dhcp':loader.get_template('dhcpd.tmpl'), + 'hosts':loader.get_template('hosts.tmpl'), + 'hostsapp':loader.get_template('hostsappend.tmpl'), + } + self.Entries['ConfigFile'] = {} + self.__rmi__ = ['rebuildState'] + try: + self.rebuildState(None) + except: + raise PluginInitError + + def FetchFile(self, entry, metadata): + """Return prebuilt file data.""" + fname = entry.get('name').split('/')[-1] + if not fname in self.filedata: + raise PluginExecutionError + perms = {'owner':'root', 'group':'root', 'perms':'644'} + [entry.attrib.__setitem__(key, value) for (key, value) in perms.iteritems()] + entry.text = self.filedata[fname] + + def BuildStructures(self, metadata): + """Build hostbase bundle.""" + if metadata.hostname not in self.dnsservers or metadata.hostname not in self.dhcpservers: + return [] + output = Element("Bundle", name='hostbase') + if metadata.hostname in self.dnsservers: + for configfile in self.Entries['ConfigFile']: + if re.search('/etc/bind/', configfile): + SubElement(output, "ConfigFile", name=configfile) + if metadata.hostname in self.dhcpservers: + SubElement(output, "ConfigFile", name="/etc/dhcp3/dhcpd.conf") + return [output] + + def rebuildState(self, _): + """Pre-cache all state information for hostbase config files + callable as an XMLRPC function. + + """ + self.buildZones() + self.buildDHCP() + self.buildHosts() + self.buildHostsLPD() + self.buildPrinters() + self.buildNetgroups() + return True + + def buildZones(self): + """Pre-build and stash zone files.""" + cursor = connection.cursor() + + cursor.execute("SELECT id, serial FROM hostbase_zone") + zones = cursor.fetchall() + + for zone in zones: + # update the serial number for all zone files + todaydate = (strftime('%Y%m%d')) + try: + if todaydate == str(zone[1])[:8]: + serial = zone[1] + 1 + else: + serial = int(todaydate) * 100 + except (KeyError): + serial = int(todaydate) * 100 + cursor.execute("""UPDATE hostbase_zone SET serial = \'%s\' WHERE id = \'%s\'""" % (str(serial), zone[0])) + + cursor.execute("SELECT * FROM hostbase_zone WHERE zone NOT LIKE \'%%.rev\'") + zones = cursor.fetchall() + + iplist = [] + hosts = {} + + for zone in zones: + zonefile = cStringIO.StringIO() + externalzonefile = cStringIO.StringIO() + cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z + INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id + WHERE z.zone_id = \'%s\'""" % zone[0]) + nameservers = cursor.fetchall() + cursor.execute("""SELECT i.ip_addr FROM hostbase_zone_addresses z + INNER JOIN hostbase_zoneaddress i ON z.zoneaddress_id = i.id + WHERE z.zone_id = \'%s\'""" % zone[0]) + addresses = cursor.fetchall() + cursor.execute("""SELECT m.priority, m.mx FROM hostbase_zone_mxs z + INNER JOIN hostbase_mx m ON z.mx_id = m.id + WHERE z.zone_id = \'%s\'""" % zone[0]) + mxs = cursor.fetchall() + context = Context({ + 'zone': zone, + 'nameservers': nameservers, + 'addresses': addresses, + 'mxs': mxs + }) + zonefile.write(self.templates['zone'].render(context)) + externalzonefile.write(self.templates['zone'].render(context)) + + querystring = """SELECT h.hostname, p.ip_addr, + n.name, c.cname, m.priority, m.mx, n.dns_view + FROM (((((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id) + INNER JOIN hostbase_name_mxs x ON n.id = x.name_id) + INNER JOIN hostbase_mx m ON m.id = x.mx_id) + LEFT JOIN hostbase_cname c ON n.id = c.name_id + WHERE n.name LIKE '%%%%%s' + AND h.status = 'active' + ORDER BY h.hostname, n.name, p.ip_addr + """ % zone[1] + cursor.execute(querystring) + zonehosts = cursor.fetchall() + prevhost = (None, None, None, None) + cnames = cStringIO.StringIO() + cnamesexternal = cStringIO.StringIO() + for host in zonehosts: + if not host[2].split(".", 1)[1] == zone[1]: + zonefile.write(cnames.getvalue()) + externalzonefile.write(cnamesexternal.getvalue()) + cnames = cStringIO.StringIO() + cnamesexternal = cStringIO.StringIO() + continue + if not prevhost[1] == host[1] or not prevhost[2] == host[2]: + zonefile.write(cnames.getvalue()) + externalzonefile.write(cnamesexternal.getvalue()) + cnames = cStringIO.StringIO() + cnamesexternal = cStringIO.StringIO() + zonefile.write("%-32s%-10s%-32s\n" % + (host[2].split(".", 1)[0], 'A', host[1])) + zonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + if host[6] == 'global': + externalzonefile.write("%-32s%-10s%-32s\n" % + (host[2].split(".", 1)[0], 'A', host[1])) + externalzonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + elif not prevhost[5] == host[5]: + zonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + if host[6] == 'global': + externalzonefile.write("%-32s%-10s%-3s%s.\n" % + ('', 'MX', host[4], host[5])) + + if host[3]: + try: + if host[3].split(".", 1)[1] == zone[1]: + cnames.write("%-32s%-10s%-32s\n" % + (host[3].split(".", 1)[0], + 'CNAME',host[2].split(".", 1)[0])) + if host[6] == 'global': + cnamesexternal.write("%-32s%-10s%-32s\n" % + (host[3].split(".", 1)[0], + 'CNAME',host[2].split(".", 1)[0])) + else: + cnames.write("%-32s%-10s%-32s\n" % + (host[3]+".", + 'CNAME', + host[2].split(".", 1)[0])) + if host[6] == 'global': + cnamesexternal.write("%-32s%-10s%-32s\n" % + (host[3]+".", + 'CNAME', + host[2].split(".", 1)[0])) + + except: + pass + prevhost = host + zonefile.write(cnames.getvalue()) + externalzonefile.write(cnamesexternal.getvalue()) + zonefile.write("\n\n%s" % zone[9]) + externalzonefile.write("\n\n%s" % zone[9]) + self.filedata[zone[1]] = zonefile.getvalue() + self.filedata[zone[1] + ".external"] = externalzonefile.getvalue() + zonefile.close() + externalzonefile.close() + self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone[1])] = self.FetchFile + self.Entries['ConfigFile']["%s/%s.external" % (self.filepath, zone[1])] = self.FetchFile + + cursor.execute("SELECT * FROM hostbase_zone WHERE zone LIKE \'%%.rev\' AND zone <> \'.rev\'") + reversezones = cursor.fetchall() + + reversenames = [] + for reversezone in reversezones: + cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z + INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id + WHERE z.zone_id = \'%s\'""" % reversezone[0]) + reverse_nameservers = cursor.fetchall() + + context = Context({ + 'inaddr': reversezone[1].rstrip('.rev'), + 'zone': reversezone, + 'nameservers': reverse_nameservers, + }) + + self.filedata[reversezone[1]] = self.templates['reversesoa'].render(context) + self.filedata[reversezone[1] + '.external'] = self.templates['reversesoa'].render(context) + self.filedata[reversezone[1]] += reversezone[9] + self.filedata[reversezone[1] + '.external'] += reversezone[9] + + subnet = reversezone[1].split(".") + subnet.reverse() + reversenames.append((reversezone[1].rstrip('.rev'),".".join(subnet[1:]))) + + for filename in reversenames: + cursor.execute(""" + SELECT DISTINCT h.hostname, p.ip_addr, n.dns_view FROM ((hostbase_host h + INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON n.ip_id = p.id + WHERE p.ip_addr LIKE '%s%%%%' AND h.status = 'active' ORDER BY p.ip_addr + """ % filename[1]) + reversehosts = cursor.fetchall() + zonefile = cStringIO.StringIO() + externalzonefile = cStringIO.StringIO() + if len(filename[0].split(".")) == 2: + originlist = [] + [originlist.append((".".join([ip[1].split(".")[2], filename[0]]), + ".".join([filename[1], ip[1].split(".")[2]]))) + for ip in reversehosts + if (".".join([ip[1].split(".")[2], filename[0]]), + ".".join([filename[1], ip[1].split(".")[2]])) not in originlist] + for origin in originlist: + hosts = [(host[1].split("."), host[0]) + for host in reversehosts + if host[1].rstrip('0123456789').rstrip('.') == origin[1]] + hosts_external = [(host[1].split("."), host[0]) + for host in reversehosts + if (host[1].rstrip('0123456789').rstrip('.') == origin[1] + and host[2] == 'global')] + context = Context({ + 'hosts': hosts, + 'inaddr': origin[0], + 'fileorigin': filename[0], + }) + zonefile.write(self.templates['reverseapp'].render(context)) + context = Context({ + 'hosts': hosts_external, + 'inaddr': origin[0], + 'fileorigin': filename[0], + }) + externalzonefile.write(self.templates['reverseapp'].render(context)) + else: + originlist = [filename[0]] + hosts = [(host[1].split("."), host[0]) + for host in reversehosts + if (host[1].split("."), host[0]) not in hosts] + hosts_external = [(host[1].split("."), host[0]) + for host in reversehosts + if ((host[1].split("."), host[0]) not in hosts_external + and host[2] == 'global')] + context = Context({ + 'hosts': hosts, + 'inaddr': filename[0], + 'fileorigin': None, + }) + zonefile.write(self.templates['reverseapp'].render(context)) + context = Context({ + 'hosts': hosts_external, + 'inaddr': filename[0], + 'fileorigin': None, + }) + externalzonefile.write(self.templates['reverseapp'].render(context)) + self.filedata['%s.rev' % filename[0]] += zonefile.getvalue() + self.filedata['%s.rev.external' % filename[0]] += externalzonefile.getvalue() + zonefile.close() + externalzonefile.close() + self.Entries['ConfigFile']['%s/%s.rev' % (self.filepath, filename[0])] = self.FetchFile + self.Entries['ConfigFile']['%s/%s.rev.external' % (self.filepath, filename[0])] = self.FetchFile + + ## here's where the named.conf file gets written + context = Context({ + 'zones': zones, + 'reverses': reversenames, + }) + self.filedata['named.conf'] = self.templates['named'].render(context) + self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf'] = self.FetchFile + self.filedata['named.conf.views'] = self.templates['namedviews'].render(context) + self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf.views'] = self.FetchFile + + + def buildDHCP(self): + """Pre-build dhcpd.conf and stash in the filedata table.""" + + # fetches all the hosts with DHCP == True + cursor = connection.cursor() + cursor.execute(""" + SELECT hostname, mac_addr, ip_addr + FROM (hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip ip ON i.id = ip.interface_id + WHERE i.dhcp=1 AND h.status='active' AND i.mac_addr <> '' + AND i.mac_addr <> 'float' AND i.mac_addr <> 'unknown' + ORDER BY h.hostname, i.mac_addr + """) + + dhcphosts = cursor.fetchall() + count = 0 + hosts = [] + hostdata = [dhcphosts[0][0], dhcphosts[0][1], dhcphosts[0][2]] + if len(dhcphosts) > 1: + for x in range(1, len(dhcphosts)): + # if an interface has 2 or more ip addresses + # adds the ip to the current interface + if hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0] and hostdata[1] == dhcphosts[x][1]: + hostdata[2] = ", ".join([hostdata[2], dhcphosts[x][2]]) + # if a host has 2 or more interfaces + # writes the current one and grabs the next + elif hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0]: + hosts.append(hostdata) + count += 1 + hostdata = ["-".join([dhcphosts[x][0], str(count)]), dhcphosts[x][1], dhcphosts[x][2]] + # new host found, writes current data to the template + else: + hosts.append(hostdata) + count = 0 + hostdata = [dhcphosts[x][0], dhcphosts[x][1], dhcphosts[x][2]] + #makes sure the last of the data gets written out + if hostdata not in hosts: + hosts.append(hostdata) + + context = Context({ + 'hosts': hosts, + 'numips': len(hosts), + }) + + self.filedata['dhcpd.conf'] = self.templates['dhcp'].render(context) + self.Entries['ConfigFile']['/my/adm/hostbase/files/dhcpd.conf'] = self.FetchFile + + + def buildHosts(self): + """Pre-build and stash /etc/hosts file.""" + + append_data = [] + + cursor = connection.cursor() + cursor.execute(""" + SELECT hostname FROM hostbase_host ORDER BY hostname + """) + hostbase = cursor.fetchall() + domains = [host[0].split(".", 1)[1] for host in hostbase] + domains_set = Set(domains) + domain_data = [(domain, domains.count(domain)) for domain in domains_set] + domain_data.sort() + + cursor.execute(""" + SELECT ip_addr FROM hostbase_ip ORDER BY ip_addr + """) + ips = cursor.fetchall() + three_octets = [ip[0].rstrip('0123456789').rstrip('.') \ + for ip in ips] + three_octets_set = Set(three_octets) + three_octets_data = [(octet, three_octets.count(octet)) \ + for octet in three_octets_set] + three_octets_data.sort() + + for three_octet in three_octets_data: + querystring = """SELECT h.hostname, h.primary_user, + p.ip_addr, n.name, c.cname + FROM (((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id) + LEFT JOIN hostbase_cname c ON n.id = c.name_id + WHERE p.ip_addr LIKE \'%s.%%%%\' AND h.status = 'active'""" % three_octet[0] + cursor.execute(querystring) + tosort = list(cursor.fetchall()) + tosort.sort(lambda x, y: cmp(int(x[2].split(".")[-1]), int(y[2].split(".")[-1]))) + append_data.append((three_octet, tuple(tosort))) + + two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets] + two_octets_set = Set(two_octets) + two_octets_data = [(octet, two_octets.count(octet)) + for octet in two_octets_set] + two_octets_data.sort() + + context = Context({ + 'domain_data': domain_data, + 'three_octets_data': three_octets_data, + 'two_octets_data': two_octets_data, + 'three_octets': three_octets, + 'num_ips': len(three_octets), + }) + + self.filedata['hosts'] = self.templates['hosts'].render(context) + + for subnet in append_data: + ips = [] + simple = True + namelist = [name.split('.', 1)[0] for name in [subnet[1][0][3]]] + cnamelist = [] + if subnet[1][0][4]: + cnamelist.append(subnet[1][0][4].split('.', 1)[0]) + simple = False + appenddata = subnet[1][0] + for ip in subnet[1][1:]: + if appenddata[2] == ip[2]: + namelist.append(ip[3].split('.', 1)[0]) + if ip[4]: + cnamelist.append(ip[4].split('.', 1)[0]) + simple = False + appenddata = ip + else: + if appenddata[0] == ip[0]: + simple = False + ips.append((appenddata[2], appenddata[0], Set(namelist), + cnamelist, simple, appenddata[1])) + appenddata = ip + simple = True + namelist = [ip[3].split('.', 1)[0]] + cnamelist = [] + if ip[4]: + cnamelist.append(ip[4].split('.', 1)[0]) + simple = False + ips.append((appenddata[2], appenddata[0], Set(namelist), + cnamelist, simple, appenddata[1])) + context = Context({ + 'subnet': subnet[0], + 'ips': ips, + }) + self.filedata['hosts'] += self.templates['hostsapp'].render(context) + self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile + + def buildPrinters(self): + """The /mcs/etc/printers.data file""" + header = """# This file is automatically generated. DO NOT EDIT IT! +# +Name Room User Type Notes +============== ========== ============================== ======================== ==================== +""" + + cursor = connection.cursor() + # fetches all the printers from the database + cursor.execute(""" + SELECT printq, location, primary_user, comments + FROM hostbase_host + WHERE whatami='printer' AND printq <> '' AND status = 'active' + ORDER BY printq + """) + printers = cursor.fetchall() + + printersfile = header + for printer in printers: + # splits up the printq line and gets the + # correct description out of the comments section + temp = printer[3].split('\n') + for printq in re.split(',[ ]*', printer[0]): + if len(temp) > 1: + printersfile += ("%-16s%-12s%-32s%-26s%s\n" % + (printq, printer[1], printer[2], temp[1], temp[0])) + else: + printersfile += ("%-16s%-12s%-32s%-26s%s\n" % + (printq, printer[1], printer[2], '', printer[3])) + self.filedata['printers.data'] = printersfile + self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile + + def buildHostsLPD(self): + """Creates the /mcs/etc/hosts.lpd file""" + + # this header needs to be changed to be more generic + header = """+@machines ++@all-machines +achilles.ctd.anl.gov +raven.ops.anl.gov +seagull.hr.anl.gov +parrot.ops.anl.gov +condor.ops.anl.gov +delphi.esh.anl.gov +anlcv1.ctd.anl.gov +anlvms.ctd.anl.gov +olivia.ctd.anl.gov\n\n""" + + cursor = connection.cursor() + cursor.execute(""" + SELECT hostname FROM hostbase_host WHERE netgroup=\"red\" AND status = 'active' + ORDER BY hostname""") + redmachines = list(cursor.fetchall()) + cursor.execute(""" + SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id + WHERE netgroup=\"red\" AND n.only=1 AND h.status = 'active' + """) + redmachines.extend(list(cursor.fetchall())) + cursor.execute(""" + SELECT hostname FROM hostbase_host WHERE netgroup=\"win\" AND status = 'active' + ORDER BY hostname""") + winmachines = list(cursor.fetchall()) + cursor.execute(""" + SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id + WHERE netgroup=\"win\" AND n.only=1 AND h.status = 'active' + """) + winmachines.__add__(list(cursor.fetchall())) + hostslpdfile = header + for machine in redmachines: + hostslpdfile += machine[0] + "\n" + hostslpdfile += "\n" + for machine in winmachines: + hostslpdfile += machine[0] + "\n" + self.filedata['hosts.lpd'] = hostslpdfile + self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile + + + def buildNetgroups(self): + """Makes the *-machine files""" + header = """################################################################### +# This file lists hosts in the '%s' machine netgroup, it is +# automatically generated. DO NOT EDIT THIS FILE! +# +# Number of hosts in '%s' machine netgroup: %i +#\n\n""" + + cursor = connection.cursor() + # fetches all the hosts that with valid netgroup entries + cursor.execute(""" + SELECT h.hostname, n.name, h.netgroup, n.only FROM ((hostbase_host h + INNER JOIN hostbase_interface i ON h.id = i.host_id) + INNER JOIN hostbase_ip p ON i.id = p.interface_id) + INNER JOIN hostbase_name n ON p.id = n.ip_id + WHERE h.netgroup <> '' AND h.netgroup <> 'none' AND h.status = 'active' + ORDER BY h.netgroup, h.hostname + """) + nameslist = cursor.fetchall() + # gets the first host and initializes the hash + hostdata = nameslist[0] + netgroups = {hostdata[2]:[hostdata[0]]} + for row in nameslist: + # if new netgroup, create it + if row[2] not in netgroups: + netgroups.update({row[2]:[]}) + # if it belongs in the netgroup and has multiple interfaces, put them in + if hostdata[0] == row[0] and row[3]: + netgroups[row[2]].append(row[1]) + hostdata = row + # if its a new host, write the old one to the hash + elif hostdata[0] != row[0]: + netgroups[row[2]].append(row[0]) + hostdata = row + + for netgroup in netgroups: + fileoutput = cStringIO.StringIO() + fileoutput.write(header % (netgroup, netgroup, len(netgroups[netgroup]))) + for each in netgroups[netgroup]: + fileoutput.write(each + "\n") + self.filedata['%s-machines' % netgroup] = fileoutput.getvalue() + fileoutput.close() + self.Entries['ConfigFile']['/my/adm/hostbase/makenets/machines/%s-machines' % netgroup] = self.FetchFile + + cursor.execute(""" + UPDATE hostbase_host SET dirty=0 + """) diff --git a/build/lib/Bcfg2/Server/Plugins/Metadata.py b/build/lib/Bcfg2/Server/Plugins/Metadata.py new file mode 100644 index 000000000..81fd3e173 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Metadata.py @@ -0,0 +1,809 @@ +"""This file stores persistent metadata for the Bcfg2 Configuration Repository.""" + +__revision__ = '$Revision$' + +import copy +import fcntl +import lxml.etree +import os +import os.path +import socket +import time +import Bcfg2.Server.Plugin + +class MetadataConsistencyError(Exception): + """This error gets raised when metadata is internally inconsistent.""" + pass + +class MetadataRuntimeError(Exception): + """This error is raised when the metadata engine is called prior to reading enough data.""" + pass + +class ClientMetadata(object): + """This object contains client metadata.""" + def __init__(self, client, profile, groups, bundles, + aliases, addresses, categories, uuid, password, query): + self.hostname = client + self.profile = profile + self.bundles = bundles + self.aliases = aliases + self.addresses = addresses + self.groups = groups + self.categories = categories + self.uuid = uuid + self.password = password + self.connectors = [] + self.query = query + + def inGroup(self, group): + """Test to see if client is a member of group.""" + return group in self.groups + + def group_in_category(self, category): + for grp in self.query.all_groups_in_category(category): + if grp in self.groups: + return grp + return '' + +class MetadataQuery(object): + def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category): + # resolver is set later + self.by_name = by_name + self.names_by_groups = by_groups + self.names_by_profiles = by_profiles + self.all_clients = get_clients + self.all_groups = all_groups + self.all_groups_in_category = all_groups_in_category + + def by_groups(self, groups): + return [self.by_name(name) for name in self.names_by_groups(groups)] + + def by_profiles(self, profiles): + return [self.by_name(name) for name in self.names_by_profiles(profiles)] + + def all(self): + return [self.by_name(name) for name in self.all_clients()] + +class Metadata(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Metadata, + Bcfg2.Server.Plugin.Statistics): + """This class contains data for bcfg2 server metadata.""" + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + name = "Metadata" + + def __init__(self, core, datastore, watch_clients=True): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Metadata.__init__(self) + Bcfg2.Server.Plugin.Statistics.__init__(self) + if watch_clients: + try: + core.fam.AddMonitor("%s/%s" % (self.data, "groups.xml"), self) + core.fam.AddMonitor("%s/%s" % (self.data, "clients.xml"), self) + except: + print("Unable to add file monitor for groups.xml or clients.xml") + raise Bcfg2.Server.Plugin.PluginInitError + self.states = {} + if watch_clients: + self.states = {"groups.xml":False, "clients.xml":False} + self.addresses = {} + self.auth = dict() + self.clients = {} + self.aliases = {} + self.groups = {} + self.cgroups = {} + self.public = [] + self.private = [] + self.profiles = [] + self.categories = {} + self.bad_clients = {} + self.uuid = {} + self.secure = [] + self.floating = [] + self.passwords = {} + self.session_cache = {} + self.clientdata = None + self.clientdata_original = None + self.default = None + self.pdirty = False + self.extra = {'groups.xml':[], 'clients.xml':[]} + self.password = core.password + self.query = MetadataQuery(core.build_metadata, + lambda:self.clients.keys(), + self.get_client_names_by_groups, + self.get_client_names_by_profiles, + self.get_all_group_names, + self.get_all_groups_in_category) + + @classmethod + def init_repo(cls, repo, groups, os_selection, clients): + path = '%s/%s' % (repo, cls.name) + os.makedirs(path) + open("%s/Metadata/groups.xml" % + repo, "w").write(groups % os_selection) + open("%s/Metadata/clients.xml" % + repo, "w").write(clients % socket.getfqdn()) + + def get_groups(self): + '''return groups xml tree''' + groups_tree = lxml.etree.parse(self.data + "/groups.xml") + root = groups_tree.getroot() + return root + + def search_group(self, group_name, tree): + """Find a group.""" + for node in tree.findall("//Group"): + if node.get("name") == group_name: + return node + for child in node: + if child.tag == "Alias" and child.attrib["name"] == group_name: + return node + return None + + def add_group(self, group_name, attribs): + """Add group to groups.xml.""" + tree = lxml.etree.parse(self.data + "/groups.xml") + root = tree.getroot() + element = lxml.etree.Element("Group", name=group_name) + for key, val in attribs.iteritems(): + element.set(key, val) + node = self.search_group(group_name, tree) + if node != None: + self.logger.error("Group \"%s\" already exists" % (group_name)) + raise MetadataConsistencyError + root.append(element) + group_tree = open(self.data + "/groups.xml","w") + fd = group_tree.fileno() + while True: + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + continue + else: + break + tree.write(group_tree) + fcntl.lockf(fd, fcntl.LOCK_UN) + group_tree.close() + + def update_group(self, group_name, attribs): + """Update a groups attributes.""" + tree = lxml.etree.parse(self.data + "/groups.xml") + root = tree.getroot() + node = self.search_group(group_name, tree) + if node == None: + self.logger.error("Group \"%s\" not found" % (group_name)) + raise MetadataConsistencyError + node.attrib.update(attribs) + group_tree = open(self.data + "/groups.xml","w") + fd = group_tree.fileno() + while True: + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + continue + else: + break + tree.write(group_tree) + fcntl.lockf(fd, fcntl.LOCK_UN) + group_tree.close() + + def remove_group(self, group_name): + """Remove a group.""" + tree = lxml.etree.parse(self.data + "/groups.xml") + root = tree.getroot() + node = self.search_group(group_name, tree) + if node == None: + self.logger.error("Client \"%s\" not found" % (group_name)) + raise MetadataConsistencyError + root.remove(node) + group_tree = open(self.data + "/groups.xml","w") + fd = group_tree.fileno() + while True: + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + continue + else: + break + tree.write(group_tree) + fcntl.lockf(fd, fcntl.LOCK_UN) + group_tree.close() + + def add_bundle(self, bundle_name): + """Add bundle to groups.xml.""" + tree = lxml.etree.parse(self.data + "/groups.xml") + root = tree.getroot() + element = lxml.etree.Element("Bundle", name=bundle_name) + node = self.search_group(bundle_name, tree) + if node != None: + self.logger.error("Bundle \"%s\" already exists" % (bundle_name)) + raise MetadataConsistencyError + root.append(element) + group_tree = open(self.data + "/groups.xml","w") + fd = group_tree.fileno() + while True: + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + continue + else: + break + tree.write(group_tree) + fcntl.lockf(fd, fcntl.LOCK_UN) + group_tree.close() + + def remove_bundle(self, bundle_name): + """Remove a bundle.""" + tree = lxml.etree.parse(self.data + "/groups.xml") + root = tree.getroot() + node = self.search_group(bundle_name, tree) + if node == None: + self.logger.error("Bundle \"%s\" not found" % (bundle_name)) + raise MetadataConsistencyError + root.remove(node) + group_tree = open(self.data + "/groups.xml","w") + fd = group_tree.fileno() + while True: + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + continue + else: + break + tree.write(group_tree) + fcntl.lockf(fd, fcntl.LOCK_UN) + group_tree.close() + + def search_client(self, client_name, tree): + """Find a client.""" + for node in tree.findall("//Client"): + if node.get("name") == client_name: + return node + for child in node: + if child.tag == "Alias" and child.attrib["name"] == client_name: + return node + return None + + def add_client(self, client_name, attribs): + """Add client to clients.xml.""" + tree = lxml.etree.parse(self.data + "/clients.xml") + root = tree.getroot() + element = lxml.etree.Element("Client", name=client_name) + for key, val in attribs.iteritems(): + element.set(key, val) + node = self.search_client(client_name, tree) + if node != None: + self.logger.error("Client \"%s\" already exists" % (client_name)) + raise MetadataConsistencyError + root.append(element) + client_tree = open(self.data + "/clients.xml","w") + fd = client_tree.fileno() + while True: + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + continue + else: + break + tree.write(client_tree) + fcntl.lockf(fd, fcntl.LOCK_UN) + client_tree.close() + + def update_client(self, client_name, attribs): + """Update a clients attributes.""" + tree = lxml.etree.parse(self.data + "/clients.xml") + root = tree.getroot() + node = self.search_client(client_name, tree) + if node == None: + self.logger.error("Client \"%s\" not found" % (client_name)) + raise MetadataConsistencyError + node.attrib.update(attribs) + client_tree = open(self.data + "/clients.xml","w") + fd = client_tree.fileno() + while True: + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + continue + else: + break + tree.write(client_tree) + fcntl.lockf(fd, fcntl.LOCK_UN) + client_tree.close() + + def HandleEvent(self, event): + """Handle update events for data files.""" + filename = event.filename.split('/')[-1] + if filename in ['groups.xml', 'clients.xml']: + dest = filename + elif filename in reduce(lambda x, y:x+y, self.extra.values()): + if event.code2str() == 'exists': + return + dest = [key for key, value in self.extra.iteritems() if filename in value][0] + else: + return + if event.code2str() == 'endExist': + return + try: + xdata = lxml.etree.parse("%s/%s" % (self.data, dest)) + except lxml.etree.XMLSyntaxError: + self.logger.error('Failed to parse %s' % (dest)) + return + included = [ent.get('href') for ent in \ + xdata.findall('./{http://www.w3.org/2001/XInclude}include')] + xdata_original = copy.deepcopy(xdata) + if included: + for name in included: + if name not in self.extra[dest]: + self.core.fam.AddMonitor("%s/%s" % (self.data, name), self) + self.extra[dest].append(name) + try: + xdata.xinclude() + except lxml.etree.XIncludeError: + self.logger.error("Failed to process XInclude for file %s" % dest) + + if dest == 'clients.xml': + self.clients = {} + self.aliases = {} + self.raliases = {} + self.bad_clients = {} + self.secure = [] + self.floating = [] + self.addresses = {} + self.raddresses = {} + self.clientdata_original = xdata_original + self.clientdata = xdata + for client in xdata.findall('.//Client'): + clname = client.get('name').lower() + if 'address' in client.attrib: + caddr = client.get('address') + if caddr in self.addresses: + self.addresses[caddr].append(clname) + else: + self.addresses[caddr] = [clname] + if clname not in self.raddresses: + self.raddresses[clname] = set() + self.raddresses[clname].add(caddr) + if 'auth' in client.attrib: + self.auth[client.get('name')] = client.get('auth', + 'cert+password') + if 'uuid' in client.attrib: + self.uuid[client.get('uuid')] = clname + if client.get('secure', 'false') == 'true': + self.secure.append(clname) + if client.get('location', 'fixed') == 'floating': + self.floating.append(clname) + if 'password' in client.attrib: + self.passwords[clname] = client.get('password') + for alias in [alias for alias in client.findall('Alias')\ + if 'address' in alias.attrib]: + if alias.get('address') in self.addresses: + self.addresses[alias.get('address')].append(clname) + else: + self.addresses[alias.get('address')] = [clname] + if clname not in self.raddresses: + self.raddresses[clname] = set() + self.raddresses[clname].add(alias.get('address')) + self.clients.update({clname: client.get('profile')}) + [self.aliases.update({alias.get('name'): clname}) \ + for alias in client.findall('Alias')] + self.raliases[clname] = set() + [self.raliases[clname].add(alias.get('name')) for alias \ + in client.findall('Alias')] + elif dest == 'groups.xml': + self.public = [] + self.private = [] + self.profiles = [] + self.groups = {} + grouptmp = {} + self.categories = {} + for group in xdata.xpath('//Groups/Group') \ + + xdata.xpath('Group'): + grouptmp[group.get('name')] = tuple([[item.get('name') for item in group.findall(spec)] + for spec in ['./Bundle', './Group']]) + grouptmp[group.get('name')][1].append(group.get('name')) + if group.get('default', 'false') == 'true': + self.default = group.get('name') + if group.get('profile', 'false') == 'true': + self.profiles.append(group.get('name')) + if group.get('public', 'false') == 'true': + self.public.append(group.get('name')) + elif group.get('public', 'true') == 'false': + self.private.append(group.get('name')) + if 'category' in group.attrib: + self.categories[group.get('name')] = group.get('category') + for group in grouptmp: + # self.groups[group] => (bundles, groups, categories) + self.groups[group] = (set(), set(), {}) + tocheck = [group] + group_cat = self.groups[group][2] + while tocheck: + now = tocheck.pop() + self.groups[group][1].add(now) + if now in grouptmp: + (bundles, groups) = grouptmp[now] + for ggg in [ggg for ggg in groups if ggg not in self.groups[group][1]]: + if ggg not in self.categories or \ + self.categories[ggg] not in self.groups[group][2]: + self.groups[group][1].add(ggg) + tocheck.append(ggg) + if ggg in self.categories: + group_cat[self.categories[ggg]] = ggg + elif ggg in self.categories: + self.logger.info("Group %s: %s cat-suppressed %s" % \ + (group, + group_cat[self.categories[ggg]], + ggg)) + [self.groups[group][0].add(bund) for bund in bundles] + self.states[dest] = True + if False not in self.states.values(): + # check that all client groups are real and complete + real = self.groups.keys() + for client in self.clients.keys(): + if self.clients[client] not in self.profiles: + self.logger.error("Client %s set as nonexistent or incomplete group %s" \ + % (client, self.clients[client])) + self.logger.error("Removing client mapping for %s" % (client)) + self.bad_clients[client] = self.clients[client] + del self.clients[client] + for bclient in self.bad_clients.keys(): + if self.bad_clients[bclient] in self.profiles: + self.logger.info("Restored profile mapping for client %s" % bclient) + self.clients[bclient] = self.bad_clients[bclient] + del self.bad_clients[bclient] + + def set_profile(self, client, profile, addresspair): + """Set group parameter for provided client.""" + self.logger.info("Asserting client %s profile to %s" % (client, profile)) + if False in self.states.values(): + raise MetadataRuntimeError + if profile not in self.public: + self.logger.error("Failed to set client %s to private group %s" % (client, profile)) + raise MetadataConsistencyError + if client in self.clients: + self.logger.info("Changing %s group from %s to %s" % (client, self.clients[client], profile)) + cli = self.clientdata_original.xpath('.//Client[@name="%s"]' % (client)) + cli[0].set('profile', profile) + else: + self.logger.info("Creating new client: %s, profile %s" % \ + (client, profile)) + if addresspair in self.session_cache: + # we are working with a uuid'd client + lxml.etree.SubElement(self.clientdata_original.getroot(), + 'Client', + name=self.session_cache[addresspair][1], + uuid=client, profile=profile, + address=addresspair[0]) + else: + lxml.etree.SubElement(self.clientdata_original.getroot(), + 'Client', name=client, + profile=profile) + self.clients[client] = profile + self.write_back_clients() + + def write_back_clients(self): + """Write changes to client.xml back to disk.""" + try: + datafile = open("%s/%s" % (self.data, 'clients.xml.new'), 'w') + except IOError: + self.logger.error("Failed to write clients.xml.new") + raise MetadataRuntimeError + # prep data + dataroot = self.clientdata_original.getroot() + if hasattr(dataroot, 'iter'): + items = dataroot.iter() + else: + items = dataroot.getchildren() + for item in items: + # no items have text data of any sort + item.tail = None + item.text = None + newcontents = lxml.etree.tostring(dataroot, pretty_print=True) + + fd = datafile.fileno() + while self.locked(fd) == True: + pass + try: + datafile.write(newcontents) + except: + fcntl.lockf(fd, fcntl.LOCK_UN) + self.logger.error("Metadata: Failed to write new clients data to clients.xml.new", exc_info=1) + os.unlink("%s/%s" % (self.data, "clients.xml.new")) + raise MetadataRuntimeError + datafile.close() + + # check if clients.xml is a symlink + clientsxml = "%s/%s" % (self.data, 'clients.xml') + if os.path.islink(clientsxml): + clientsxml = os.readlink(clientsxml) + + try: + os.rename("%s/%s" % (self.data, 'clients.xml.new'), clientsxml) + except: + self.logger.error("Metadata: Failed to rename clients.xml.new") + raise MetadataRuntimeError + + def locked(self, fd): + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + return True + return False + + def resolve_client(self, addresspair): + """Lookup address locally or in DNS to get a hostname.""" + if addresspair in self.session_cache: + (stamp, uuid) = self.session_cache[addresspair] + if time.time() - stamp < 90: + return self.session_cache[addresspair][1] + address = addresspair[0] + if address in self.addresses: + if len(self.addresses[address]) != 1: + self.logger.error("Address %s has multiple reverse assignments; a uuid must be used" % (address)) + raise MetadataConsistencyError + return self.addresses[address][0] + try: + cname = socket.gethostbyaddr(address)[0].lower() + if cname in self.aliases: + return self.aliases[cname] + return cname + except socket.herror: + warning = "address resolution error for %s" % (address) + self.logger.warning(warning) + raise MetadataConsistencyError + + def get_initial_metadata(self, client): + """Return the metadata for a given client.""" + if False in self.states.values(): + raise MetadataRuntimeError + client = client.lower() + if client in self.aliases: + client = self.aliases[client] + if client in self.clients: + profile = self.clients[client] + (bundles, groups, categories) = self.groups[profile] + else: + if self.default == None: + self.logger.error("Cannot set group for client %s; no default group set" % (client)) + raise MetadataConsistencyError + self.set_profile(client, self.default, (None, None)) + profile = self.default + [bundles, groups, categories] = self.groups[self.default] + aliases = self.raliases.get(client, set()) + addresses = self.raddresses.get(client, set()) + newgroups = set(groups) + newbundles = set(bundles) + newcategories = {} + newcategories.update(categories) + if client in self.passwords: + password = self.passwords[client] + else: + password = None + uuids = [item for item, value in self.uuid.iteritems() if value == client] + if uuids: + uuid = uuids[0] + else: + uuid = None + for group in self.cgroups.get(client, []): + if group in self.groups: + nbundles, ngroups, ncategories = self.groups[group] + else: + nbundles, ngroups, ncategories = ([], [group], {}) + [newbundles.add(b) for b in nbundles if b not in newbundles] + [newgroups.add(g) for g in ngroups if g not in newgroups] + newcategories.update(ncategories) + return ClientMetadata(client, profile, newgroups, newbundles, aliases, + addresses, newcategories, uuid, password, self.query) + + def get_all_group_names(self): + all_groups = set() + [all_groups.update(g[1]) for g in self.groups.values()] + return all_groups + + def get_all_groups_in_category(self, category): + all_groups = set() + [all_groups.add(g) for g in self.categories \ + if self.categories[g] == category] + return all_groups + + def get_client_names_by_profiles(self, profiles): + return [client for client, profile in self.clients.iteritems() \ + if profile in profiles] + + def get_client_names_by_groups(self, groups): + gprofiles = [profile for profile in self.profiles if \ + self.groups[profile][1].issuperset(groups)] + return self.get_client_names_by_profiles(gprofiles) + + def merge_additional_groups(self, imd, groups): + for group in groups: + if group in self.categories and \ + self.categories[group] in imd.categories: + continue + nb, ng, _ = self.groups.get(group, (list(), [group], dict())) + for b in nb: + if b not in imd.bundles: + imd.bundles.add(b) + for g in ng: + if g not in imd.groups: + if g in self.categories and \ + self.categories[g] in imd.categories: + continue + if g in self.private: + self.logger.error("Refusing to add dynamic membership in private group %s for client %s" % (g, imd.hostname)) + continue + imd.groups.add(g) + + def merge_additional_data(self, imd, source, data): + if not hasattr(imd, source): + setattr(imd, source, data) + imd.connectors.append(source) + + def validate_client_address(self, client, addresspair): + """Check address against client.""" + address = addresspair[0] + if client in self.floating: + self.debug_log("Client %s is floating" % client) + return True + if address in self.addresses: + if client in self.addresses[address]: + self.debug_log("Client %s matches address %s" % (client, address)) + return True + else: + self.logger.error("Got request for non-float client %s from %s" \ + % (client, address)) + return False + resolved = self.resolve_client(addresspair) + if resolved.lower() == client.lower(): + return True + else: + self.logger.error("Got request for %s from incorrect address %s" \ + % (client, address)) + self.logger.error("Resolved to %s" % resolved) + return False + + def AuthenticateConnection(self, cert, user, password, address): + """This function checks auth creds.""" + if cert: + id_method = 'cert' + certinfo = dict([x[0] for x in cert['subject']]) + # look at cert.cN + client = certinfo['commonName'] + self.debug_log("Got cN %s; using as client name" % client) + auth_type = self.auth.get(client, 'cert+password') + elif user == 'root': + id_method = 'address' + try: + client = self.resolve_client(address) + except MetadataConsistencyError: + self.logger.error("Client %s failed to resolve; metadata problem" % (address[0])) + return False + else: + id_method = 'uuid' + # user maps to client + if user not in self.uuid: + client = user + self.uuid[user] = user + else: + client = self.uuid[user] + + # we have the client name + self.debug_log("Authenticating client %s" % client) + + # next we validate the address + if id_method == 'uuid': + addr_is_valid = True + else: + addr_is_valid = self.validate_client_address(client, address) + + if not addr_is_valid: + return False + + if id_method == 'cert' and auth_type != 'cert+password': + # we are done if cert+password not required + return True + + if client not in self.passwords: + if client in self.secure: + self.logger.error("Client %s in secure mode but has no password" % (address[0])) + return False + if password != self.password: + self.logger.error("Client %s used incorrect global password" % (address[0])) + return False + if client not in self.secure: + if client in self.passwords: + plist = [self.password, self.passwords[client]] + else: + plist = [self.password] + if password not in plist: + self.logger.error("Client %s failed to use either allowed password" % \ + (address[0])) + return False + else: + # client in secure mode and has a client password + if password != self.passwords[client]: + self.logger.error("Client %s failed to use client password in secure mode" % \ + (address[0])) + return False + # populate the session cache + if user != 'root': + self.session_cache[address] = (time.time(), client) + return True + + def process_statistics(self, meta, _): + """Hook into statistics interface to toggle clients in bootstrap mode.""" + client = meta.hostname + if client in self.auth and self.auth[client] == 'bootstrap': + self.logger.info("Asserting client %s auth mode to cert" % client) + cli = self.clientdata_original.xpath('.//Client[@name="%s"]' \ + % (client)) + cli[0].set('auth', 'cert') + self.write_back_clients() + + def viz(self, hosts, bundles, key, colors): + """Admin mode viz support.""" + groups_tree = lxml.etree.parse(self.data + "/groups.xml") + try: + groups_tree.xinclude() + except lxml.etree.XincludeError: + self.logger.error("Failed to process XInclude for file %s" % dest) + groups = groups_tree.getroot() + categories = {'default':'grey83'} + instances = {} + viz_str = "" + egroups = groups.findall("Group") + groups.findall('.//Groups/Group') + for group in egroups: + if not group.get('category') in categories: + categories[group.get('category')] = colors.pop() + group.set('color', categories[group.get('category')]) + if None in categories: + del categories[None] + if hosts: + clients = self.clients + for client, profile in clients.iteritems(): + if profile in instances: + instances[profile].append(client) + else: + instances[profile] = [client] + for profile, clist in instances.iteritems(): + clist.sort() + viz_str += '''\t"%s-instances" [ label="%s", shape="record" ];\n''' \ + % (profile, '|'.join(clist)) + viz_str += '''\t"%s-instances" -> "group-%s";\n''' \ + % (profile, profile) + if bundles: + bundles = [] + [bundles.append(bund.get('name')) \ + for bund in groups.findall('.//Bundle') \ + if bund.get('name') not in bundles] + bundles.sort() + for bundle in bundles: + viz_str += '''\t"bundle-%s" [ label="%s", shape="septagon"];\n''' \ + % (bundle, bundle) + gseen = [] + for group in egroups: + if group.get('profile', 'false') == 'true': + style = "filled, bold" + else: + style = "filled" + gseen.append(group.get('name')) + viz_str += '\t"group-%s" [label="%s", style="%s", fillcolor=%s];\n' % \ + (group.get('name'), group.get('name'), style, group.get('color')) + if bundles: + for bundle in group.findall('Bundle'): + viz_str += '\t"group-%s" -> "bundle-%s";\n' % \ + (group.get('name'), bundle.get('name')) + gfmt = '\t"group-%s" [label="%s", style="filled", fillcolor="grey83"];\n' + for group in egroups: + for parent in group.findall('Group'): + if parent.get('name') not in gseen: + viz_str += gfmt % (parent.get('name'), parent.get('name')) + gseen.append(parent.get("name")) + viz_str += '\t"group-%s" -> "group-%s" ;\n' % \ + (group.get('name'), parent.get('name')) + if key: + for category in categories: + viz_str += '''\t"''' + category + '''" [label="''' + category + \ + '''", shape="record", style="filled", fillcolor=''' + \ + categories[category] + '''];\n''' + return viz_str diff --git a/build/lib/Bcfg2/Server/Plugins/NagiosGen.py b/build/lib/Bcfg2/Server/Plugins/NagiosGen.py new file mode 100644 index 000000000..cd6f843fb --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/NagiosGen.py @@ -0,0 +1,114 @@ +'''This module implements a Nagios configuration generator''' + +import glob +import logging +import os +import re +import socket + +import Bcfg2.Server.Plugin + +LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen') + +host_config_fmt = \ +''' +define host{ + host_name %s + alias %s + address %s +''' + +class NagiosGen(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Generator): + """NagiosGen is a Bcfg2 plugin that dynamically generates + Nagios configuration file based on Bcfg2 data. + """ + name = 'NagiosGen' + __version__ = '0.6' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Generator.__init__(self) + self.Entries = {'Path': + {'/etc/nagiosgen.status' : self.createhostconfig, + '/etc/nagios/nagiosgen.cfg': self.createserverconfig}} + + self.client_attrib = {'encoding':'ascii', + 'owner':'root', + 'group':'root', + 'type':'file', + 'perms':'0400'} + self.server_attrib = {'encoding':'ascii', + 'owner':'nagios', + 'group':'nagios', + 'type':'file', + 'perms':'0440'} + + def createhostconfig(self, entry, metadata): + """Build host specific configuration file.""" + host_address = socket.gethostbyname(metadata.hostname) + host_groups = [grp for grp in metadata.groups if \ + os.path.isfile('%s/%s-group.cfg' % (self.data, grp))] + host_config = host_config_fmt % \ + (metadata.hostname, metadata.hostname, host_address) + + if host_groups: + host_config += ' hostgroups %s\n' % (",".join(host_groups)) + + xtra = None + if hasattr(metadata, 'Properties') and \ + 'NagiosGen.xml' in metadata.Properties: + for q in (metadata.hostname, 'default'): + xtra = metadata.Properties['NagiosGen.xml'].data.find(q) + if xtra is not None: + break + + if xtra is not None: + directives = list(xtra) + for item in directives: + host_config += ' %-32s %s\n' % (item.tag, item.text) + + else: + host_config += ' use default\n' + + host_config += '}\n' + entry.text = host_config + [entry.attrib.__setitem__(key, value) for \ + (key, value) in self.client_attrib.iteritems()] + try: + fileh = open("%s/%s-host.cfg" % \ + (self.data, metadata.hostname), 'w') + fileh.write(host_config) + fileh.close() + except OSError, ioerr: + LOGGER.error("Failed to write %s/%s-host.cfg" % \ + (self.data, metadata.hostname)) + LOGGER.error(ioerr) + + def createserverconfig(self, entry, _): + """Build monolithic server configuration file.""" + host_configs = glob.glob('%s/*-host.cfg' % self.data) + group_configs = glob.glob('%s/*-group.cfg' % self.data) + host_data = "" + group_data = "" + for host in host_configs: + hostfile = open(host, 'r') + host_data += hostfile.read() + hostfile.close() + for group in group_configs: + group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group) + if host_data.find(group_name) != -1: + groupfile = open(group, 'r') + group_data += groupfile.read() + groupfile.close() + entry.text = group_data + host_data + [entry.attrib.__setitem__(key, value) for \ + (key, value) in self.server_attrib.iteritems()] + try: + fileh = open("%s/nagiosgen.cfg" % (self.data), 'w') + fileh.write(group_data + host_data) + fileh.close() + except OSError, ioerr: + LOGGER.error("Failed to write %s/nagiosgen.cfg" % (self.data)) + LOGGER.error(ioerr) diff --git a/build/lib/Bcfg2/Server/Plugins/Ohai.py b/build/lib/Bcfg2/Server/Plugins/Ohai.py new file mode 100644 index 000000000..0f7c7187f --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Ohai.py @@ -0,0 +1,79 @@ +import lxml.etree +import os + +import logging +logger = logging.getLogger('Bcfg2.Plugins.Ohai') + +import Bcfg2.Server.Plugin + +try: + import json +except: + # FIXME: can be removed when server prereq is >= python 2.6 + # necessary for clients without the in-tree json module + try: + import simplejson as json + except: + logger.error("Unable to load any json modules. Make sure " + "python-simplejson is installed.") + raise ImportError + + +class OhaiCache(object): + + def __init__(self, dirname): + self.dirname = dirname + self.cache = dict() + + def __setitem__(self, item, value): + if value == None: + # simply return if the client returned nothing + return + self.cache[item] = json.loads(value) + file("%s/%s.json" % (self.dirname, item), 'w').write(value) + + def __getitem__(self, item): + if item not in self.cache: + try: + data = open("%s/%s.json" % (self.dirname, item)).read() + except: + raise KeyError, item + self.cache[item] = json.loads(data) + return self.cache[item] + + def __iter__(self): + data = self.cache.keys() + data.extend([x[:-5] for x in os.listdir(self.dirname)]) + return data.__iter__() + + +class Ohai(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Probing, + Bcfg2.Server.Plugin.Connector): + """The Ohai plugin is used to detect information about the client operating system.""" + name = 'Ohai' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Probing.__init__(self) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai', + interpreter='/bin/sh') + self.probe.text = 'ohai' + try: + os.stat(self.data) + except: + os.makedirs(self.data) + self.cache = OhaiCache(self.data) + + def GetProbes(self, meta, force=False): + return [self.probe] + + def ReceiveData(self, meta, datalist): + self.cache[meta.hostname] = datalist[0].text + + def get_additional_data(self, meta): + if meta.hostname in self.cache: + return self.cache[meta.hostname] + return dict() diff --git a/build/lib/Bcfg2/Server/Plugins/Packages.py b/build/lib/Bcfg2/Server/Plugins/Packages.py new file mode 100644 index 000000000..194330723 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Packages.py @@ -0,0 +1,869 @@ +import cPickle +import copy +import gzip +import tarfile +import glob +import logging +import lxml.etree +import os +import re +import sys +import urllib2 + +# FIXME: Remove when server python dep is 2.5 or greater +if sys.version_info >= (2, 5): + from hashlib import md5 +else: + from md5 import md5 + +import Bcfg2.Logger +import Bcfg2.Server.Plugin + +# build sources.list? +# caching for yum + +class NoData(Exception): + pass + +class SomeData(Exception): + pass + +logger = logging.getLogger('Packages') + +def source_from_xml(xsource): + ret = dict([('rawurl', False), ('url', False)]) + for key, tag in [('groups', 'Group'), ('components', 'Component'), + ('arches', 'Arch'), ('blacklist', 'Blacklist'), + ('whitelist', 'Whitelist')]: + ret[key] = [item.text for item in xsource.findall(tag)] + # version and component need to both contain data for sources to work + try: + ret['version'] = xsource.find('Version').text + except: + ret['version'] = 'placeholder' + if ret['components'] == []: + ret['components'] = ['placeholder'] + try: + if xsource.find('Recommended').text in ['True', 'true']: + ret['recommended'] = True + else: + ret['recommended'] = False + except: + ret['recommended'] = False + if xsource.find('RawURL') is not None: + ret['rawurl'] = xsource.find('RawURL').text + if not ret['rawurl'].endswith('/'): + ret['rawurl'] += '/' + else: + ret['url'] = xsource.find('URL').text + if not ret['url'].endswith('/'): + ret['url'] += '/' + return ret + +def _fetch_url(url): + if '@' in url: + mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url) + if not mobj: + raise ValueError + user = mobj.group(2) + passwd = mobj.group(3) + url = mobj.group(1) + mobj.group(4) + auth = urllib2.HTTPBasicAuthHandler(urllib2.HTTPPasswordMgrWithDefaultRealm()) + auth.add_password(None, url, user, passwd) + urllib2.install_opener(urllib2.build_opener(auth)) + return urllib2.urlopen(url).read() + +class Source(object): + basegroups = [] + + def __init__(self, basepath, url, version, arches, components, groups, rawurl, + blacklist, whitelist, recommended): + self.basepath = basepath + self.version = version + self.components = components + self.url = url + self.rawurl = rawurl + self.groups = groups + self.arches = arches + self.deps = dict() + self.provides = dict() + self.blacklist = set(blacklist) + self.whitelist = set(whitelist) + self.cachefile = '%s/cache-%s' % (self.basepath, md5(cPickle.dumps( \ + [self.version, self.components, self.url, \ + self.rawurl, self.groups, self.arches])).hexdigest()) + self.recommended = recommended + self.url_map = [] + + def load_state(self): + pass + + def setup_data(self, force_update=False): + should_read = True + should_download = False + if os.path.exists(self.cachefile): + try: + self.load_state() + should_read = False + except: + logger.error("Cachefile %s load failed; falling back to file read"\ + % (self.cachefile)) + if should_read: + try: + self.read_files() + except: + logger.error("Packages: File read failed; falling back to file download") + should_download = True + + if should_download or force_update: + try: + self.update() + self.read_files() + except: + logger.error("Failed to update source", exc_info=1) + + def get_urls(self): + return [] + urls = property(get_urls) + + def get_files(self): + return [self.escape_url(url) for url in self.urls] + files = property(get_files) + + def get_vpkgs(self, meta): + agroups = ['global'] + [a for a in self.arches if a in meta.groups] + vdict = dict() + for agrp in agroups: + for key, value in self.provides[agrp].iteritems(): + if key not in vdict: + vdict[key] = set(value) + else: + vdict[key].update(value) + return vdict + + def escape_url(self, url): + return "%s/%s" % (self.basepath, url.replace('/', '@')) + + def file_init(self): + pass + + def read_files(self): + pass + + def update(self): + for url in self.urls: + logger.info("Packages: Updating %s" % url) + fname = self.escape_url(url) + try: + data = _fetch_url(url) + except ValueError: + logger.error("Packages: Bad url string %s" % url) + continue + except urllib2.HTTPError, h: + logger.error("Packages: Failed to fetch url %s. code=%s" \ + % (url, h.code)) + continue + file(fname, 'w').write(data) + + def applies(self, metadata): + return len([g for g in self.basegroups if g in metadata.groups]) != 0 and \ + len([g for g in metadata.groups if g in self.groups]) \ + == len(self.groups) + + def get_arches(self, metadata): + return ['global'] + [a for a in self.arches if a in metadata.groups] + + def get_deps(self, metadata, pkgname): + for arch in self.get_arches(metadata): + if pkgname in self.deps[arch]: + return self.deps[arch][pkgname] + raise NoData + + def get_provides(self, metadata, required): + for arch in self.get_arches(metadata): + if required in self.provides[arch]: + return self.provides[arch][required] + raise NoData + + def is_package(self, metadata, _): + return False + + def get_url_info(self): + return {'groups': copy.copy(self.groups), \ + 'urls': [copy.deepcopy(url) for url in self.url_map]} + +class YUMSource(Source): + xp = '{http://linux.duke.edu/metadata/common}' + rp = '{http://linux.duke.edu/metadata/rpm}' + rpo = '{http://linux.duke.edu/metadata/repo}' + fl = '{http://linux.duke.edu/metadata/filelists}' + basegroups = ['yum', 'redhat', 'centos', 'fedora'] + ptype = 'yum' + + def __init__(self, basepath, url, version, arches, components, groups, + rawurl, blacklist, whitelist, recommended): + Source.__init__(self, basepath, url, version, arches, components, + groups, rawurl, blacklist, whitelist, recommended) + if not self.rawurl: + self.baseurl = self.url + '%(version)s/%(component)s/%(arch)s/' + else: + self.baseurl = self.rawurl + self.packages = dict() + self.deps = dict([('global', dict())]) + self.provides = dict([('global', dict())]) + self.filemap = dict([(x, dict()) for x in ['global'] + self.arches]) + self.needed_paths = set() + self.file_to_arch = dict() + + def save_state(self): + cache = file(self.cachefile, 'wb') + cPickle.dump((self.packages, self.deps, self.provides, + self.filemap, self.url_map), cache, 2) + cache.close() + + def load_state(self): + data = file(self.cachefile) + (self.packages, self.deps, self.provides, \ + self.filemap, self.url_map) = cPickle.load(data) + + def get_urls(self): + surls = list() + self.url_map = [] + for arch in self.arches: + usettings = [{'version': self.version, 'component':comp, + 'arch':arch} for comp in self.components] + for setting in usettings: + setting['groups'] = self.groups + setting['url'] = self.baseurl % setting + self.url_map.append(copy.deepcopy(setting)) + surls.append((arch, [setting['url'] for setting in usettings])) + urls = [] + for (sarch, surl_list) in surls: + for surl in surl_list: + if not surl.endswith('/'): + surl += '/' + rmdurl = surl + 'repodata/repomd.xml' + try: + repomd = _fetch_url(rmdurl) + xdata = lxml.etree.XML(repomd) + except ValueError: + logger.error("Packages: Bad url string %s" % rmdurl) + continue + except urllib2.HTTPError, h: + logger.error("Packages: Failed to fetch url %s. code=%s" \ + % (rmdurl, h.code)) + continue + except: + logger.error("Failed to process url %s" % rmdurl) + continue + for elt in xdata.findall(self.rpo + 'data'): + if elt.get('type') not in ['filelists', 'primary']: + continue + floc = elt.find(self.rpo + 'location') + fullurl = surl + floc.get('href') + urls.append(fullurl) + self.file_to_arch[self.escape_url(fullurl)] = sarch + return urls + urls = property(get_urls) + + def read_files(self): + for fname in [f for f in self.files if f.endswith('primary.xml.gz')]: + farch = self.file_to_arch[fname] + fdata = lxml.etree.parse(fname).getroot() + self.parse_primary(fdata, farch) + for fname in [f for f in self.files if f.endswith('filelists.xml.gz')]: + farch = self.file_to_arch[fname] + fdata = lxml.etree.parse(fname).getroot() + self.parse_filelist(fdata, farch) + # merge data + sdata = self.packages.values() + self.packages['global'] = copy.deepcopy(sdata.pop()) + while sdata: + self.packages['global'].intersection(sdata.pop()) + + for key in self.packages: + if key == 'global': + continue + self.packages[key] = self.packages['global'].difference(self.packages[key]) + self.save_state() + + def parse_filelist(self, data, arch): + if arch not in self.filemap: + self.filemap[arch] = dict() + for pkg in data.findall(self.fl + 'package'): + for fentry in [fe for fe in pkg.findall(self.fl + 'file') \ + if fe.text in self.needed_paths]: + if fentry.text in self.filemap[arch]: + self.filemap[arch][fentry.text].add(pkg.get('name')) + else: + self.filemap[arch][fentry.text] = set([pkg.get('name')]) + + def parse_primary(self, data, arch): + if arch not in self.packages: + self.packages[arch] = set() + if arch not in self.deps: + self.deps[arch] = dict() + if arch not in self.provides: + self.provides[arch] = dict() + for pkg in data.getchildren(): + if not pkg.tag.endswith('package'): + continue + pkgname = pkg.find(self.xp + 'name').text + self.packages[arch].add(pkgname) + + pdata = pkg.find(self.xp + 'format') + pre = pdata.find(self.rp + 'requires') + self.deps[arch][pkgname] = set() + for entry in pre.getchildren(): + self.deps[arch][pkgname].add(entry.get('name')) + if entry.get('name').startswith('/'): + self.needed_paths.add(entry.get('name')) + pro = pdata.find(self.rp + 'provides') + if pro != None: + for entry in pro.getchildren(): + prov = entry.get('name') + if prov not in self.provides[arch]: + self.provides[arch][prov] = list() + self.provides[arch][prov].append(pkgname) + + def is_package(self, metadata, item): + arch = [a for a in self.arches if a in metadata.groups] + if not arch: + return False + return (item in self.packages['global'] or item in self.packages[arch[0]]) and \ + item not in self.blacklist and \ + ((len(self.whitelist) == 0) or item in self.whitelist) + + def get_vpkgs(self, metadata): + rv = Source.get_vpkgs(self, metadata) + for arch, fmdata in self.filemap.iteritems(): + if arch not in metadata.groups and arch != 'global': + continue + for filename, pkgs in fmdata.iteritems(): + rv[filename] = pkgs + return rv + + def filter_unknown(self, unknown): + filtered = set([u for u in unknown if u.startswith('rpmlib')]) + unknown.difference_update(filtered) + +class APTSource(Source): + basegroups = ['apt', 'debian', 'ubuntu', 'nexenta'] + ptype = 'deb' + + def __init__(self, basepath, url, version, arches, components, groups, + rawurl, blacklist, whitelist, recommended): + Source.__init__(self, basepath, url, version, arches, components, groups, + rawurl, blacklist, whitelist, recommended) + self.pkgnames = set() + + self.url_map = [{'rawurl': self.rawurl, 'url': self.url, 'version': self.version, \ + 'components': self.components, 'arches': self.arches, 'groups': self.groups}] + + def save_state(self): + cache = file(self.cachefile, 'wb') + cPickle.dump((self.pkgnames, self.deps, self.provides), + cache, 2) + cache.close() + + def load_state(self): + data = file(self.cachefile) + self.pkgnames, self.deps, self.provides = cPickle.load(data) + + def filter_unknown(self, unknown): + filtered = set([u for u in unknown if u.startswith('choice')]) + unknown.difference_update(filtered) + + def get_urls(self): + if not self.rawurl: + return ["%sdists/%s/%s/binary-%s/Packages.gz" % \ + (self.url, self.version, part, arch) for part in self.components \ + for arch in self.arches] + else: + return ["%sPackages.gz" % (self.rawurl)] + urls = property(get_urls) + + def read_files(self): + bdeps = dict() + bprov = dict() + if self.recommended: + depfnames = ['Depends', 'Pre-Depends', 'Recommends'] + else: + depfnames = ['Depends', 'Pre-Depends'] + for fname in self.files: + if not self.rawurl: + barch = [x for x in fname.split('@') if x.startswith('binary-')][0][7:] + else: + # RawURL entries assume that they only have one <Arch></Arch> + # element and that it is the architecture of the source. + barch = self.arches[0] + if barch not in bdeps: + bdeps[barch] = dict() + bprov[barch] = dict() + try: + reader = gzip.GzipFile(fname) + except: + print("Failed to read file %s" % fname) + raise + for line in reader.readlines(): + words = line.strip().split(':', 1) + if words[0] == 'Package': + pkgname = words[1].strip().rstrip() + self.pkgnames.add(pkgname) + bdeps[barch][pkgname] = [] + elif words[0] in depfnames: + vindex = 0 + for dep in words[1].split(','): + if '|' in dep: + cdeps = [re.sub('\s+', '', re.sub('\(.*\)', '', cdep)) for cdep in dep.split('|')] + dyn_dname = "choice-%s-%s-%s" % (pkgname, barch, vindex) + vindex += 1 + bdeps[barch][pkgname].append(dyn_dname) + bprov[barch][dyn_dname] = set(cdeps) + else: + raw_dep = re.sub('\(.*\)', '', dep) + raw_dep = raw_dep.rstrip().strip() + bdeps[barch][pkgname].append(raw_dep) + elif words[0] == 'Provides': + for pkg in words[1].split(','): + dname = pkg.rstrip().strip() + if dname not in bprov[barch]: + bprov[barch][dname] = set() + bprov[barch][dname].add(pkgname) + + self.deps['global'] = dict() + self.provides['global'] = dict() + for barch in bdeps: + self.deps[barch] = dict() + self.provides[barch] = dict() + for pkgname in self.pkgnames: + pset = set() + for barch in bdeps: + if pkgname not in bdeps[barch]: + bdeps[barch][pkgname] = [] + pset.add(tuple(bdeps[barch][pkgname])) + if len(pset) == 1: + self.deps['global'][pkgname] = pset.pop() + else: + for barch in bdeps: + self.deps[barch][pkgname] = bdeps[barch][pkgname] + provided = set() + for bprovided in bprov.values(): + provided.update(set(bprovided)) + for prov in provided: + prset = set() + for barch in bprov: + if prov not in bprov[barch]: + continue + prset.add(tuple(bprov[barch].get(prov, ()))) + if len(prset) == 1: + self.provides['global'][prov] = prset.pop() + else: + for barch in bprov: + self.provides[barch][prov] = bprov[barch].get(prov, ()) + self.save_state() + + def is_package(self, _, pkg): + return pkg in self.pkgnames and \ + pkg not in self.blacklist and \ + (len(self.whitelist) == 0 or pkg in self.whitelist) + +class PACSource(Source): + basegroups = ['arch', 'parabola'] + ptype = 'pacman' + + def __init__(self, basepath, url, version, arches, components, groups, + rawurl, blacklist, whitelist, recommended): + Source.__init__(self, basepath, url, version, arches, components, groups, + rawurl, blacklist, whitelist, recommended) + self.pkgnames = set() + + self.url_map = [{'rawurl': self.rawurl, 'url': self.url, 'version': self.version, \ + 'components': self.components, 'arches': self.arches, 'groups': self.groups}] + + def save_state(self): + cache = file(self.cachefile, 'wb') + cPickle.dump((self.pkgnames, self.deps, self.provides), + cache, 2) + cache.close() + + def load_state(self): + data = file(self.cachefile) + self.pkgnames, self.deps, self.provides = cPickle.load(data) + + def filter_unknown(self, unknown): + filtered = set([u for u in unknown if u.startswith('choice')]) + unknown.difference_update(filtered) + + def get_urls(self): + if not self.rawurl: + return ["%s/%s/os/%s/%s.db.tar.gz" % \ + (self.url, part, arch, part) for part in self.components \ + for arch in self.arches] + else: + raise Exception("PACSource : RAWUrl not supported (yet)") + urls = property(get_urls) + + + def read_files(self): + bdeps = dict() + bprov = dict() + + if self.recommended: + depfnames = ['Depends', 'Pre-Depends', 'Recommends'] + else: + depfnames = ['Depends', 'Pre-Depends'] + + for fname in self.files: + if not self.rawurl: + barch = [x for x in fname.split('@') if x in self.arches][0] + else: + # RawURL entries assume that they only have one <Arch></Arch> + # element and that it is the architecture of the source. + barch = self.arches[0] + + if barch not in bdeps: + bdeps[barch] = dict() + bprov[barch] = dict() + try: + print "try to read : " + fname + tar = tarfile.open(fname, "r") + reader = gzip.GzipFile(fname) + except: + print("Failed to read file %s" % fname) + raise + + for tarinfo in tar: + if tarinfo.isdir(): + self.pkgnames.add(tarinfo.name.rsplit("-",2)[0]) + print "added : " + tarinfo.name.rsplit("-",2)[0] + tar.close() + + self.deps['global'] = dict() + self.provides['global'] = dict() + for barch in bdeps: + self.deps[barch] = dict() + self.provides[barch] = dict() + for pkgname in self.pkgnames: + pset = set() + for barch in bdeps: + if pkgname not in bdeps[barch]: + bdeps[barch][pkgname] = [] + pset.add(tuple(bdeps[barch][pkgname])) + if len(pset) == 1: + self.deps['global'][pkgname] = pset.pop() + else: + for barch in bdeps: + self.deps[barch][pkgname] = bdeps[barch][pkgname] + provided = set() + for bprovided in bprov.values(): + provided.update(set(bprovided)) + for prov in provided: + prset = set() + for barch in bprov: + if prov not in bprov[barch]: + continue + prset.add(tuple(bprov[barch].get(prov, ()))) + if len(prset) == 1: + self.provides['global'][prov] = prset.pop() + else: + for barch in bprov: + self.provides[barch][prov] = bprov[barch].get(prov, ()) + self.save_state() + + def is_package(self, _, pkg): + return pkg in self.pkgnames and \ + pkg not in self.blacklist and \ + (len(self.whitelist) == 0 or pkg in self.whitelist) + +class Packages(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.StructureValidator, + Bcfg2.Server.Plugin.Generator, + Bcfg2.Server.Plugin.Connector): + name = 'Packages' + conflicts = ['Pkgmgr'] + experimental = True + __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload'] + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.StructureValidator.__init__(self) + Bcfg2.Server.Plugin.Generator.__init__(self) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.cachepath = self.data + '/cache' + self.sentinels = set() + self.sources = [] + self.disableResolver = False + self.disableMetaData = False + self.virt_pkgs = dict() + + if not os.path.exists(self.cachepath): + # create cache directory if needed + os.makedirs(self.cachepath) + self._load_config() + + def get_relevant_groups(self, meta): + mgrps = list(set([g for g in meta.groups for s in self.get_matching_sources(meta) \ + if g in s.basegroups or g in s.groups or g in s.arches])) + mgrps.sort() + return tuple(mgrps) + + def build_vpkgs_entry(self, meta): + # build single entry for all matching sources + mgrps = self.get_relevant_groups(meta) + vpkgs = dict() + for source in self.get_matching_sources(meta): + s_vpkgs = source.get_vpkgs(meta) + for name, prov_set in s_vpkgs.iteritems(): + if name not in vpkgs: + vpkgs[name] = set(prov_set) + else: + vpkgs[name].update(prov_set) + return vpkgs + + def get_matching_sources(self, meta): + return [s for s in self.sources if s.applies(meta)] + + def HandlesEntry(self, entry, metadata): + if [x for x in metadata.groups if x in self.sentinels] \ + and entry.tag == 'Package': + return True + return False + + def HandleEntry(self, entry, metadata): + entry.set('version', 'auto') + for source in self.sources: + if [x for x in metadata.groups if x in source.basegroups]: + entry.set('type', source.ptype) + + def complete(self, meta, input_requirements, debug=False): + '''Build the transitive closure of all package dependencies + + Arguments: + meta - client metadata instance + packages - set of package names + debug - print out debug information for the decision making process + returns => (set(packages), set(unsatisfied requirements), package type) + ''' + sources = self.get_matching_sources(meta) + # reverse list so that priorities correspond to file order + sources.reverse() + if len(sources) == 0: + self.logger.error("Packages: No matching sources for client %s; improper group memberships?" % (meta.hostname)) + return set(), set(), 'failed' + ptype = set([s.ptype for s in sources]) + if len(ptype) < 1: + return set(), set(), 'failed' + + # setup vpkg cache + pgrps = self.get_relevant_groups(meta) + if pgrps not in self.virt_pkgs: + self.virt_pkgs[pgrps] = self.build_vpkgs_entry(meta) + vpkg_cache = self.virt_pkgs[pgrps] + + # unclassified is set of unsatisfied requirements (may be pkg for vpkg) + unclassified = set(input_requirements) + vpkgs = set() + both = set() + pkgs = set(input_requirements) + + packages = set() + examined = set() + unknown = set() + + final_pass = False + really_done = False + # do while unclassified or vpkgs or both or pkgs + while unclassified or pkgs or both or final_pass: + #print len(unclassified), len(pkgs), len(both), len(vpkgs), final_pass + if really_done: + break + if len(unclassified) + len(pkgs) + len(both) == 0: + # one more pass then exit + really_done = True + + while unclassified: + current = unclassified.pop() + examined.add(current) + is_pkg = True in [source.is_package(meta, current) for source in sources] + is_vpkg = current in vpkg_cache + + if is_pkg and is_vpkg: + both.add(current) + elif is_pkg and not is_vpkg: + pkgs.add(current) + elif is_vpkg and not is_pkg: + vpkgs.add(current) + elif not is_vpkg and not is_pkg: + unknown.add(current) + + while pkgs: + # direct packages; current can be added, and all deps should be resolved + current = pkgs.pop() + if debug: + self.logger.debug("Packages: handling package requirement %s" % (current)) + deps = () + for source in sources: + if source.is_package(meta, current): + try: + deps = source.get_deps(meta, current) + break + except: + continue + packages.add(current) + newdeps = set(deps).difference(examined) + if debug and newdeps: + self.logger.debug("Packages: Package %s added requirements %s" % (current, newdeps)) + unclassified.update(newdeps) + + satisfied_vpkgs = set() + for current in vpkgs: + # virtual dependencies, satisfied if one of N in the config, or can be forced if only one provider + if len(vpkg_cache[current]) == 1: + if debug: + self.logger.debug("Packages: requirement %s satisfied by %s" % (current, vpkg_cache[current])) + unclassified.update(vpkg_cache[current].difference(examined)) + satisfied_vpkgs.add(current) + elif [item for item in vpkg_cache[current] if item in packages]: + if debug: + self.logger.debug("Packages: requirement %s satisfied by %s" % (current, [item for item in vpkg_cache[current] if item in packages])) + satisfied_vpkgs.add(current) + vpkgs.difference_update(satisfied_vpkgs) + + satisfied_both = set() + for current in both: + # packages that are both have virtual providers as well as a package with that name + # allow use of virt through explicit specification, then fall back to forcing current on last pass + if [item for item in vpkg_cache[current] if item in packages]: + if debug: + self.logger.debug("Packages: requirement %s satisfied by %s" % (current, [item for item in vpkg_cache[current] if item in packages])) + satisfied_both.add(current) + elif current in input_requirements or final_pass: + pkgs.add(current) + satisfied_both.add(current) + both.difference_update(satisfied_both) + + if len(unclassified) + len(pkgs) == 0: + final_pass = True + else: + final_pass = False + + for source in sources: + source.filter_unknown(unknown) + + return packages, unknown, ptype.pop() + + def validate_structures(self, meta, structures): + '''Ensure client configurations include all needed prerequisites + + Arguments: + meta - client metadata instance + structures - a list of structure-stage entry combinations + ''' + if self.disableResolver: return # Config requests no resolver + + initial = set([pkg.get('name') for struct in structures \ + for pkg in struct.findall('Package') + + struct.findall('BoundPackage')]) + news = lxml.etree.Element('Independent') + packages, unknown, ptype = self.complete(meta, initial, + debug=self.debug_flag) + if unknown: + self.logger.info("Got unknown entries") + self.logger.info(list(unknown)) + newpkgs = list(packages.difference(initial)) + newpkgs.sort() + for pkg in newpkgs: + lxml.etree.SubElement(news, 'BoundPackage', name=pkg, + type=ptype, version='auto', origin='Packages') + structures.append(news) + + def make_non_redundant(self, meta, plname=None, plist=None): + '''build a non-redundant version of a list of packages + + Arguments: + meta - client metadata instance + plname - name of file containing a list of packages + ''' + if plname is not None: + pkgnames = set([x.strip() for x in open(plname).readlines()]) + elif plist is not None: + pkgnames = set(plist) + redundant = set() + sources = self.get_matching_sources(meta) + for source in sources: + for pkgname in pkgnames: + if source.is_pkg(meta, current): + try: + deps = source.get_deps(meta, pkgname) + except: + continue + for rpkg in deps: + if rpkg in pkgnames: + redundant.add(rpkg) + return pkgnames.difference(redundant), redundant + + def Refresh(self): + '''Packages.Refresh() => True|False\nReload configuration specification and download sources\n''' + self._load_config(force_update=True) + return True + + def Reload(self): + '''Packages.Refresh() => True|False\nReload configuration specification and sources\n''' + self._load_config() + return True + + def _load_config(self, force_update=False): + ''' + Load the configuration data and setup sources + + Keyword args: + force_update Force downloading repo data + ''' + self.virt_pkgs = dict() + try: + xdata = lxml.etree.parse(self.data + '/config.xml') + xdata.xinclude() + xdata = xdata.getroot() + except (lxml.etree.XIncludeError, \ + lxml.etree.XMLSyntaxError), xmlerr: + self.logger.error("Package: Error processing xml: %s" % xmlerr) + raise Bcfg2.Server.Plugin.PluginInitError + except IOError: + self.logger.error("Failed to read Packages configuration. Have" + + " you created your config.xml file?") + raise Bcfg2.Server.Plugin.PluginInitError + + # Load Packages config + config = xdata.xpath('//Sources/Config') + if config: + if config[0].get("resolver", "enabled").lower() == "disabled": + self.logger.info("Packages: Resolver disabled") + self.disableResolver = True + if config[0].get("metadata", "enabled").lower() == "disabled": + self.logger.info("Packages: Metadata disabled") + self.disableResolver = True + self.disableMetaData = True + + self.sentinels = set() + self.sources = [] + for s in xdata.findall('.//APTSource'): + self.sources.append(APTSource(self.cachepath, **source_from_xml(s))) + for s in xdata.findall('.//YUMSource'): + self.sources.append(YUMSource(self.cachepath, **source_from_xml(s))) + for s in xdata.findall('.//PACSource'): + self.sources.append(PACSource(self.cachepath, **source_from_xml(s))) + + cachefiles = [] + for source in self.sources: + cachefiles.append(source.cachefile) + if not self.disableMetaData: source.setup_data(force_update) + self.sentinels.update(source.basegroups) + for cfile in glob.glob("%s/cache-*" % self.cachepath): + if cfile not in cachefiles: + os.unlink(cfile) + + def get_additional_data(self, meta): + sdata = [] + [sdata.extend(copy.deepcopy(src.url_map)) for src in self.get_matching_sources(meta)] + return dict(sources=sdata) diff --git a/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py new file mode 100644 index 000000000..b58a7c91d --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py @@ -0,0 +1,155 @@ +'''This module implements a package management scheme for all images''' +__revision__ = '$Revision$' + +import logging +import re +import Bcfg2.Server.Plugin + +logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr') + +class FuzzyDict(dict): + fuzzy = re.compile('(?P<name>.*):(?P<alist>\S+(,\S+)*)') + def __getitem__(self, key): + if isinstance(key, str): + mdata = self.fuzzy.match(key) + if mdata: + return dict.__getitem__(self, mdata.groupdict()['name']) + else: + print "got non-string key %s" % str(key) + return dict.__getitem__(self, key) + + def has_key(self, key): + if isinstance(key, str): + mdata = self.fuzzy.match(key) + if self.fuzzy.match(key): + return dict.has_key(self, mdata.groupdict()['name']) + return dict.has_key(self, key) + + def get(self, key, default=None): + try: + return self.__getitem__(key) + except: + if default: + return default + raise + +class PNode(Bcfg2.Server.Plugin.INode): + """PNode has a list of packages available at a particular group intersection.""" + splitters = {'rpm':re.compile('^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \ + '(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'), + 'encap':re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')} + ignore = ['Package'] + + def Match(self, metadata, data): + """Return a dictionary of package mappings.""" + if self.predicate(metadata): + for key in self.contents: + try: + data[key].update(self.contents[key]) + except: + data[key] = FuzzyDict() + data[key].update(self.contents[key]) + for child in self.children: + child.Match(metadata, data) + + def __init__(self, data, pdict, parent=None): + # copy local attributes to all child nodes if no local attribute exists + if not pdict.has_key('Package'): + pdict['Package'] = set() + for child in data.getchildren(): + for attr in [key for key in data.attrib.keys() \ + if key != 'name' and not child.attrib.has_key(key)]: + try: + child.set(attr, data.get(attr)) + except: + # don't fail on things like comments and other immutable elements + pass + Bcfg2.Server.Plugin.INode.__init__(self, data, pdict, parent) + if not self.contents.has_key('Package'): + self.contents['Package'] = FuzzyDict() + for pkg in data.findall('./Package'): + if pkg.attrib.has_key('name') and pkg.get('name') not in pdict['Package']: + pdict['Package'].add(pkg.get('name')) + if pkg.get('name') != None: + self.contents['Package'][pkg.get('name')] = {} + if pkg.getchildren(): + self.contents['Package'][pkg.get('name')]['__children__'] \ + = pkg.getchildren() + if pkg.attrib.has_key('simplefile'): + pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile'))) + self.contents['Package'][pkg.get('name')].update(pkg.attrib) + else: + if pkg.attrib.has_key('file'): + if pkg.attrib.has_key('multiarch'): + archs = pkg.get('multiarch').split() + srcs = pkg.get('srcs', pkg.get('multiarch')).split() + url = ' '.join(["%s/%s" % (pkg.get('uri'), pkg.get('file') % {'src':srcs[idx], 'arch':archs[idx]}) + for idx in range(len(archs))]) + pkg.set('url', url) + else: + pkg.set('url', '%s/%s' % (pkg.get('uri'), pkg.get('file'))) + if self.splitters.has_key(pkg.get('type')) and pkg.get('file') != None: + mdata = self.splitters[pkg.get('type')].match(pkg.get('file')) + if not mdata: + logger.error("Failed to match pkg %s" % pkg.get('file')) + continue + pkgname = mdata.group('name') + self.contents['Package'][pkgname] = mdata.groupdict() + self.contents['Package'][pkgname].update(pkg.attrib) + if pkg.attrib.get('file'): + self.contents['Package'][pkgname]['url'] = pkg.get('url') + self.contents['Package'][pkgname]['type'] = pkg.get('type') + if pkg.get('verify'): + self.contents['Package'][pkgname]['verify'] = pkg.get('verify') + if pkg.get('multiarch'): + self.contents['Package'][pkgname]['multiarch'] = pkg.get('multiarch') + if pkgname not in pdict['Package']: + pdict['Package'].add(pkgname) + if pkg.getchildren(): + self.contents['Package'][pkgname]['__children__'] = pkg.getchildren() + else: + self.contents['Package'][pkg.get('name')].update(pkg.attrib) + + +class PkgSrc(Bcfg2.Server.Plugin.XMLSrc): + """PkgSrc files contain a PNode hierarchy that returns matching package entries.""" + __node__ = PNode + __cacheobj__ = FuzzyDict + +class Pkgmgr(Bcfg2.Server.Plugin.PrioDir): + """This is a generator that handles package assignments.""" + name = 'Pkgmgr' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + __child__ = PkgSrc + __element__ = 'Package' + + def HandleEvent(self, event): + '''Handle events and update dispatch table''' + Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event) + for src in self.entries.values(): + for itype, children in src.items.iteritems(): + for child in children: + try: + self.Entries[itype][child] = self.BindEntry + except KeyError: + self.Entries[itype] = FuzzyDict([(child, + self.BindEntry)]) + + def BindEntry(self, entry, metadata): + """Bind data for entry, and remove instances that are not requested.""" + pname = entry.get('name') + Bcfg2.Server.Plugin.PrioDir.BindEntry(self, entry, metadata) + if entry.findall('Instance'): + mdata = FuzzyDict.fuzzy.match(pname) + if mdata: + arches = mdata.group('alist').split(',') + [entry.remove(inst) for inst in \ + entry.findall('Instance') \ + if inst.get('arch') not in arches] + + def HandlesEntry(self, entry, metadata): + return entry.tag == 'Package' and entry.get('name').split(':')[0] in self.Entries['Package'].keys() + + def HandleEntry(self, entry, metadata): + self.BindEntry(entry, metadata) diff --git a/build/lib/Bcfg2/Server/Plugins/Probes.py b/build/lib/Bcfg2/Server/Plugins/Probes.py new file mode 100644 index 000000000..c00185732 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Probes.py @@ -0,0 +1,150 @@ +import lxml.etree +import re + +import Bcfg2.Server.Plugin + +specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)") +probe_matcher = re.compile("(.*/)?(?P<basename>\S+)") + +class ProbeSet(Bcfg2.Server.Plugin.EntrySet): + ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$") + def __init__(self, path, fam, encoding, plugin_name): + fpattern = '[0-9A-Za-z_\-]+' + self.plugin_name = plugin_name + Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, + Bcfg2.Server.Plugin.SpecificData, + encoding) + fam.AddMonitor(path, self) + self.bangline = re.compile('^#!(?P<interpreter>.*)$') + + def HandleEvent(self, event): + if event.filename != self.path: + return self.handle_event(event) + + def get_probe_data(self, metadata): + ret = [] + build = dict() + candidates = self.get_matching(metadata) + candidates.sort(lambda x, y: cmp(x.specific, y.specific)) + for entry in candidates: + rem = specific_probe_matcher.match(entry.name) + if not rem: + rem = probe_matcher.match(entry.name) + pname = rem.group('basename') + if pname not in build: + build[pname] = entry + + for (name, entry) in build.iteritems(): + probe = lxml.etree.Element('probe') + probe.set('name', name.split('/')[-1]) + probe.set('source', self.plugin_name) + probe.text = entry.data + match = self.bangline.match(entry.data.split('\n')[0]) + if match: + probe.set('interpreter', match.group('interpreter')) + else: + probe.set('interpreter', '/bin/sh') + ret.append(probe) + return ret + +class Probes(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Probing, + Bcfg2.Server.Plugin.Connector): + """A plugin to gather information from a client machine.""" + name = 'Probes' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + Bcfg2.Server.Plugin.Probing.__init__(self) + + try: + self.probes = ProbeSet(self.data, core.fam, core.encoding, + self.name) + except: + raise Bcfg2.Server.Plugin.PluginInitError + + self.probedata = dict() + self.cgroups = dict() + self.load_data() + + def write_data(self): + """Write probe data out for use with bcfg2-info.""" + top = lxml.etree.Element("Probed") + for client, probed in self.probedata.iteritems(): + cx = lxml.etree.SubElement(top, 'Client', name=client) + for probe in probed: + lxml.etree.SubElement(cx, 'Probe', name=probe, + value=self.probedata[client][probe]) + for group in self.cgroups[client]: + lxml.etree.SubElement(cx, "Group", name=group) + data = lxml.etree.tostring(top, encoding='UTF-8', xml_declaration=True, + pretty_print='true') + try: + datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w') + except IOError: + self.logger.error("Failed to write probed.xml") + datafile.write(data) + + def load_data(self): + try: + data = lxml.etree.parse(self.data + '/probed.xml').getroot() + except: + self.logger.error("Failed to read file probed.xml") + return + self.probedata = {} + self.cgroups = {} + for client in data.getchildren(): + self.probedata[client.get('name')] = {} + self.cgroups[client.get('name')]=[] + for pdata in client: + if (pdata.tag == 'Probe'): + self.probedata[client.get('name')][pdata.get('name')] = pdata.get('value') + elif (pdata.tag == 'Group'): + self.cgroups[client.get('name')].append(pdata.get('name')) + + def GetProbes(self, meta, force=False): + """Return a set of probes for execution on client.""" + return self.probes.get_probe_data(meta) + + def ReceiveData(self, client, datalist): + self.cgroups[client.hostname] = [] + self.probedata[client.hostname] = {} + for data in datalist: + self.ReceiveDataItem(client, data) + self.write_data() + + def ReceiveDataItem(self, client, data): + """Receive probe results pertaining to client.""" + if not self.cgroups.has_key(client.hostname): + self.cgroups[client.hostname] = [] + if data.text == None: + self.logger.error("Got null response to probe %s from %s" % \ + (data.get('name'), client.hostname)) + try: + self.probedata[client.hostname].update({data.get('name'): ''}) + except KeyError: + self.probedata[client.hostname] = {data.get('name'): ''} + return + dlines = data.text.split('\n') + self.logger.debug("%s:probe:%s:%s" % (client.hostname, + data.get('name'), [line.strip() for line in dlines])) + for line in dlines[:]: + if line.split(':')[0] == 'group': + newgroup = line.split(':')[1].strip() + if newgroup not in self.cgroups[client.hostname]: + self.cgroups[client.hostname].append(newgroup) + dlines.remove(line) + dtext = "\n".join(dlines) + try: + self.probedata[client.hostname].update({data.get('name'):dtext}) + except KeyError: + self.probedata[client.hostname] = {data.get('name'):dtext} + + def get_additional_groups(self, meta): + return self.cgroups.get(meta.hostname, list()) + + def get_additional_data(self, meta): + return self.probedata.get(meta.hostname, dict()) diff --git a/build/lib/Bcfg2/Server/Plugins/Properties.py b/build/lib/Bcfg2/Server/Plugins/Properties.py new file mode 100644 index 000000000..86330f6a0 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Properties.py @@ -0,0 +1,37 @@ +import copy +import lxml.etree + +import Bcfg2.Server.Plugin + + +class PropertyFile(Bcfg2.Server.Plugin.XMLFileBacked): + """Class for properties files.""" + + def Index(self): + """Build data into an xml object.""" + try: + self.data = lxml.etree.XML(self.data) + except lxml.etree.XMLSyntaxError: + Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name) + + +class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): + __child__ = PropertyFile + + +class Properties(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + """ + The properties plugin maps property + files into client metadata instances. + """ + name = 'Properties' + version = '$Revision$' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.store = PropDirectoryBacked(self.data, core.fam) + + def get_additional_data(self, _): + return copy.deepcopy(self.store.entries) diff --git a/build/lib/Bcfg2/Server/Plugins/Rules.py b/build/lib/Bcfg2/Server/Plugins/Rules.py new file mode 100644 index 000000000..eb0547cdb --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Rules.py @@ -0,0 +1,11 @@ +"""This generator provides rule-based entry mappings.""" +__revision__ = '$Revision$' + +import Bcfg2.Server.Plugin + + +class Rules(Bcfg2.Server.Plugin.PrioDir): + """This is a generator that handles service assignments.""" + name = 'Rules' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' diff --git a/build/lib/Bcfg2/Server/Plugins/SGenshi.py b/build/lib/Bcfg2/Server/Plugins/SGenshi.py new file mode 100644 index 000000000..cead06e34 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/SGenshi.py @@ -0,0 +1,76 @@ +'''This module implements a templating generator based on Genshi''' +__revision__ = '$Revision$' + +import genshi.input +import genshi.template +import lxml.etree +import logging + +import Bcfg2.Server.Plugin +import Bcfg2.Server.Plugins.TGenshi + +logger = logging.getLogger('Bcfg2.Plugins.SGenshi') + + +class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile): + + def get_xml_value(self, metadata): + if not hasattr(self, 'template'): + logger.error("No parsed template information for %s" % (self.name)) + raise Bcfg2.Server.Plugin.PluginExecutionError + try: + stream = self.template.generate(metadata=metadata,).filter( \ + Bcfg2.Server.Plugins.TGenshi.removecomment) + data = stream.render('xml', strip_whitespace=False) + return lxml.etree.XML(data) + except LookupError, lerror: + logger.error('Genshi lookup error: %s' % lerror) + except genshi.template.TemplateError, terror: + logger.error('Genshi template error: %s' % terror) + except genshi.input.ParseError, perror: + logger.error('Genshi parse error: %s' % perror) + raise + + +class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet): + + def __init__(self, path, fam, encoding): + fpattern = '\S+\.xml' + Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path, + SGenshiTemplateFile, encoding) + fam.AddMonitor(path, self) + + def HandleEvent(self, event): + '''passthrough event handler for old calling convention''' + if event.filename != self.path: + return self.handle_event(event) + + def BuildStructures(self, metadata): + """Build SGenshi structures.""" + ret = [] + for entry in self.get_matching(metadata): + try: + ret.append(entry.get_xml_value(metadata)) + except: + logger.error("SGenshi: Failed to template file %s" % entry.name) + return ret + + +class SGenshi(SGenshiEntrySet, + Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Structure): + """The SGenshi plugin provides templated structures.""" + name = 'SGenshi' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + deprecated = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Structure.__init__(self) + try: + SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding) + except: + logger.error("Failed to load %s repository; disabling %s" \ + % (self.name, self.name)) + raise Bcfg2.Server.Plugin.PluginInitError diff --git a/build/lib/Bcfg2/Server/Plugins/SSHbase.py b/build/lib/Bcfg2/Server/Plugins/SSHbase.py new file mode 100644 index 000000000..6d68ecb0a --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/SSHbase.py @@ -0,0 +1,279 @@ +'''This module manages ssh key files for bcfg2''' +__revision__ = '$Revision$' + +import binascii +import os +import socket +import shutil +import tempfile +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + + +class SSHbase(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Generator, + Bcfg2.Server.Plugin.DirectoryBacked, + Bcfg2.Server.Plugin.PullTarget): + """ + The sshbase generator manages ssh host keys (both v1 and v2) + for hosts. It also manages the ssh_known_hosts file. It can + integrate host keys from other management domains and similarly + export its keys. The repository contains files in the following + formats: + + ssh_host_key.H_(hostname) -> the v1 host private key for + (hostname) + ssh_host_key.pub.H_(hostname) -> the v1 host public key + for (hostname) + ssh_host_(dr)sa_key.H_(hostname) -> the v2 ssh host + private key for (hostname) + ssh_host_(dr)sa_key.pub.H_(hostname) -> the v2 ssh host + public key for (hostname) + ssh_known_hosts -> the current known hosts file. this + is regenerated each time a new key is generated. + + """ + name = 'SSHbase' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + pubkeys = ["ssh_host_dsa_key.pub.H_%s", + "ssh_host_rsa_key.pub.H_%s", "ssh_host_key.pub.H_%s"] + hostkeys = ["ssh_host_dsa_key.H_%s", + "ssh_host_rsa_key.H_%s", "ssh_host_key.H_%s"] + keypatterns = ['ssh_host_dsa_key', 'ssh_host_rsa_key', 'ssh_host_key', + 'ssh_host_dsa_key.pub', 'ssh_host_rsa_key.pub', + 'ssh_host_key.pub'] + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Generator.__init__(self) + Bcfg2.Server.Plugin.PullTarget.__init__(self) + try: + Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, + self.core.fam) + except OSError, ioerr: + self.logger.error("Failed to load SSHbase repository from %s" \ + % (self.data)) + self.logger.error(ioerr) + raise Bcfg2.Server.Plugin.PluginInitError + self.Entries = {'Path': + {'/etc/ssh/ssh_known_hosts': self.build_skn, + '/etc/ssh/ssh_host_dsa_key': self.build_hk, + '/etc/ssh/ssh_host_rsa_key': self.build_hk, + '/etc/ssh/ssh_host_dsa_key.pub': self.build_hk, + '/etc/ssh/ssh_host_rsa_key.pub': self.build_hk, + '/etc/ssh/ssh_host_key': self.build_hk, + '/etc/ssh/ssh_host_key.pub': self.build_hk}} + self.ipcache = {} + self.namecache = {} + self.__skn = False + + def get_skn(self): + """Build memory cache of the ssh known hosts file.""" + if not self.__skn: + self.__skn = "\n".join([value.data for key, value in \ + self.entries.iteritems() if \ + key.endswith('.static')]) + names = dict() + # if no metadata is registered yet, defer + if len(self.core.metadata.query.all()) == 0: + self.__skn = False + return self.__skn + for cmeta in self.core.metadata.query.all(): + names[cmeta.hostname] = set([cmeta.hostname]) + names[cmeta.hostname].update(cmeta.aliases) + newnames = set() + newips = set() + for name in names[cmeta.hostname]: + newnames.add(name.split('.')[0]) + try: + newips.add(self.get_ipcache_entry(name)[0]) + except: + continue + names[cmeta.hostname].update(newnames) + names[cmeta.hostname].update(cmeta.addresses) + names[cmeta.hostname].update(newips) + # TODO: Only perform reverse lookups on IPs if an option is set. + if True: + for ip in newips: + try: + names[cmeta.hostname].update(self.get_namecache_entry(ip)) + except: + continue + names[cmeta.hostname] = sorted(names[cmeta.hostname]) + # now we have our name cache + pubkeys = [pubk for pubk in self.entries.keys() \ + if pubk.find('.pub.H_') != -1] + pubkeys.sort() + badnames = set() + for pubkey in pubkeys: + hostname = pubkey.split('H_')[1] + if hostname not in names: + if hostname not in badnames: + badnames.add(hostname) + self.logger.error("SSHbase: Unknown host %s; ignoring public keys" % hostname) + continue + self.__skn += "%s %s" % (','.join(names[hostname]), + self.entries[pubkey].data) + return self.__skn + + def set_skn(self, value): + """Set backing data for skn.""" + self.__skn = value + skn = property(get_skn, set_skn) + + def HandleEvent(self, event=None): + """Local event handler that does skn regen on pubkey change.""" + Bcfg2.Server.Plugin.DirectoryBacked.HandleEvent(self, event) + if event and '_key.pub.H_' in event.filename: + self.skn = False + if event and event.filename.endswith('.static'): + self.skn = False + if not self.__skn: + if (len(self.entries.keys())) >= (len(os.listdir(self.data))-1): + _ = self.skn + + def HandlesEntry(self, entry, _): + """Handle key entries dynamically.""" + return entry.tag == 'Path' and \ + ([fpat for fpat in self.keypatterns + if entry.get('name').endswith(fpat)] + or entry.get('name').endswith('ssh_known_hosts')) + + def HandleEntry(self, entry, metadata): + """Bind data.""" + if entry.get('name').endswith('ssh_known_hosts'): + return self.build_skn(entry, metadata) + else: + return self.build_hk(entry, metadata) + + def get_ipcache_entry(self, client): + """Build a cache of dns results.""" + if client in self.ipcache: + if self.ipcache[client]: + return self.ipcache[client] + else: + raise socket.gaierror + else: + # need to add entry + try: + ipaddr = socket.gethostbyname(client) + self.ipcache[client] = (ipaddr, client) + return (ipaddr, client) + except socket.gaierror: + cmd = "getent hosts %s" % client + ipaddr = Popen(cmd, shell=True, \ + stdout=PIPE).stdout.read().strip().split() + if ipaddr: + self.ipcache[client] = (ipaddr, client) + return (ipaddr, client) + self.ipcache[client] = False + self.logger.error("Failed to find IP address for %s" % client) + raise socket.gaierror + + def get_namecache_entry(self, cip): + """Build a cache of name lookups from client IP addresses.""" + if cip in self.namecache: + # lookup cached name from IP + if self.namecache[cip]: + return self.namecache[cip] + else: + raise socket.gaierror + else: + # add an entry that has not been cached + try: + rvlookup = socket.gethostbyaddr(cip) + if rvlookup[0]: + self.namecache[cip] = [rvlookup[0]] + else: + self.namecache[cip] = [] + self.namecache[cip].extend(rvlookup[1]) + return self.namecache[cip] + except socket.gaierror: + self.namecache[cip] = False + self.logger.error("Failed to find any names associated with IP address %s" % cip) + raise + + def build_skn(self, entry, metadata): + """This function builds builds a host specific known_hosts file.""" + client = metadata.hostname + entry.text = self.skn + hostkeys = [keytmpl % client for keytmpl in self.pubkeys \ + if (keytmpl % client) in self.entries] + hostkeys.sort() + for hostkey in hostkeys: + entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" % ( + self.entries[hostkey].data) + permdata = {'owner':'root', + 'group':'root', + 'type':'file', + 'perms':'0644'} + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + + def build_hk(self, entry, metadata): + """This binds host key data into entries.""" + client = metadata.hostname + filename = "%s.H_%s" % (entry.get('name').split('/')[-1], client) + if filename not in self.entries.keys(): + self.GenerateHostKeys(client) + if not filename in self.entries: + self.logger.error("%s still not registered" % filename) + raise Bcfg2.Server.Plugin.PluginExecutionError + keydata = self.entries[filename].data + permdata = {'owner':'root', + 'group':'root', + 'type':'file', + 'perms':'0600'} + if entry.get('name')[-4:] == '.pub': + permdata['perms'] = '0644' + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + if "ssh_host_key.H_" == filename[:15]: + entry.attrib['encoding'] = 'base64' + entry.text = binascii.b2a_base64(keydata) + else: + entry.text = keydata + + def GenerateHostKeys(self, client): + """Generate new host keys for client.""" + keylist = [keytmpl % client for keytmpl in self.hostkeys] + for hostkey in keylist: + if 'ssh_host_rsa_key.H_' == hostkey[:19]: + keytype = 'rsa' + elif 'ssh_host_dsa_key.H_' == hostkey[:19]: + keytype = 'dsa' + else: + keytype = 'rsa1' + + if hostkey not in self.entries.keys(): + fileloc = "%s/%s" % (self.data, hostkey) + publoc = self.data + '/' + ".".join([hostkey.split('.')[0], + 'pub', + "H_%s" % client]) + tempdir = tempfile.mkdtemp() + temploc = "%s/%s" % (tempdir, hostkey) + cmd = 'ssh-keygen -q -f %s -N "" -t %s -C root@%s < /dev/null' + os.system(cmd % (temploc, keytype, client)) + shutil.copy(temploc, fileloc) + shutil.copy("%s.pub" % temploc, publoc) + self.AddEntry(hostkey) + self.AddEntry(".".join([hostkey.split('.')[0]]+['pub', "H_%s" \ + % client])) + try: + os.unlink(temploc) + os.unlink("%s.pub" % temploc) + os.rmdir(tempdir) + except OSError: + self.logger.error("Failed to unlink temporary ssh keys") + + def AcceptChoices(self, _, metadata): + return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)] + + def AcceptPullData(self, specific, entry, log): + """Per-plugin bcfg2-admin pull support.""" + # specific will always be host specific + filename = "%s/%s.H_%s" % (self.data, entry['name'].split('/')[-1], + specific.hostname) + open(filename, 'w').write(entry['text']) + if log: + print "Wrote file %s" % filename diff --git a/build/lib/Bcfg2/Server/Plugins/SSLCA.py b/build/lib/Bcfg2/Server/Plugins/SSLCA.py new file mode 100644 index 000000000..0dc448e69 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/SSLCA.py @@ -0,0 +1,239 @@ +import Bcfg2.Server.Plugin +import Bcfg2.Options +import lxml.etree +import posixpath +import tempfile +import os +from subprocess import Popen, PIPE, STDOUT +from ConfigParser import ConfigParser + +class SSLCA(Bcfg2.Server.Plugin.GroupSpool): + """ + The SSLCA generator handles the creation and + management of ssl certificates and their keys. + """ + name = 'SSLCA' + __version__ = '$Id:$' + __author__ = 'g.hagger@gmail.com' + __child__ = Bcfg2.Server.Plugin.FileBacked + key_specs = {} + cert_specs = {} + CAs = {} + + def HandleEvent(self, event=None): + """ + Updates which files this plugin handles based upon filesystem events. + Allows configuration items to be added/removed without server restarts. + """ + action = event.code2str() + if event.filename[0] == '/': + return + epath = "".join([self.data, self.handles[event.requestID], + event.filename]) + if posixpath.isdir(epath): + ident = self.handles[event.requestID] + event.filename + else: + ident = self.handles[event.requestID][:-1] + + fname = "".join([ident, '/', event.filename]) + + if event.filename.endswith('.xml'): + if action in ['exists', 'created', 'changed']: + if event.filename.endswith('key.xml'): + key_spec = dict(lxml.etree.parse(epath).find('Key').items()) + self.key_specs[ident] = { + 'bits': key_spec.get('bits', 2048), + 'type': key_spec.get('type', 'rsa') + } + self.Entries['Path'][ident] = self.get_key + elif event.filename.endswith('cert.xml'): + cert_spec = dict(lxml.etree.parse(epath).find('Cert').items()) + ca = cert_spec.get('ca', 'default') + self.cert_specs[ident] = { + 'ca': ca, + 'format': cert_spec.get('format', 'pem'), + 'key': cert_spec.get('key'), + 'days': cert_spec.get('days', 365), + 'C': cert_spec.get('c'), + 'L': cert_spec.get('l'), + 'ST': cert_spec.get('st'), + 'OU': cert_spec.get('ou'), + 'O': cert_spec.get('o'), + 'emailAddress': cert_spec.get('emailaddress') + } + cp = ConfigParser() + cp.read(self.core.cfile) + self.CAs[ca] = dict(cp.items('sslca_'+ca)) + self.Entries['Path'][ident] = self.get_cert + if action == 'deleted': + if ident in self.Entries['Path']: + del self.Entries['Path'][ident] + else: + if action in ['exists', 'created']: + if posixpath.isdir(epath): + self.AddDirectoryMonitor(epath[len(self.data):]) + if ident not in self.entries and posixpath.isfile(epath): + self.entries[fname] = self.__child__(epath) + self.entries[fname].HandleEvent(event) + if action == 'changed': + self.entries[fname].HandleEvent(event) + elif action == 'deleted': + if fname in self.entries: + del self.entries[fname] + else: + self.entries[fname].HandleEvent(event) + + def get_key(self, entry, metadata): + """ + either grabs a prexisting key hostfile, or triggers the generation + of a new key if one doesn't exist. + """ + # set path type and permissions, otherwise bcfg2 won't bind the file + permdata = {'owner':'root', + 'group':'root', + 'type':'file', + 'perms':'644'} + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + + # check if we already have a hostfile, or need to generate a new key + # TODO: verify key fits the specs + path = entry.get('name') + filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname]) + if filename not in self.entries.keys(): + key = self.build_key(filename, entry, metadata) + open(self.data + filename, 'w').write(key) + entry.text = key + else: + entry.text = self.entries[filename].data + + def build_key(self, filename, entry, metadata): + """ + generates a new key according the the specification + """ + type = self.key_specs[entry.get('name')]['type'] + bits = self.key_specs[entry.get('name')]['bits'] + if type == 'rsa': + cmd = "openssl genrsa %s " % bits + elif type == 'dsa': + cmd = "openssl dsaparam -noout -genkey %s" % bits + key = Popen(cmd, shell=True, stdout=PIPE).stdout.read() + return key + + def get_cert(self, entry, metadata): + """ + either grabs a prexisting cert hostfile, or triggers the generation + of a new cert if one doesn't exist. + """ + # set path type and permissions, otherwise bcfg2 won't bind the file + permdata = {'owner':'root', + 'group':'root', + 'type':'file', + 'perms':'644'} + [entry.attrib.__setitem__(key, permdata[key]) for key in permdata] + + path = entry.get('name') + filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname]) + + # first - ensure we have a key to work with + key = self.cert_specs[entry.get('name')].get('key') + key_filename = "".join([key, '/', key.rsplit('/', 1)[1], '.H_', metadata.hostname]) + if key_filename not in self.entries: + e = lxml.etree.Element('Path') + e.attrib['name'] = key + self.core.Bind(e, metadata) + + # check if we have a valid hostfile + if filename in self.entries.keys() and self.verify_cert(filename, entry): + entry.text = self.entries[filename].data + else: + cert = self.build_cert(key_filename, entry, metadata) + open(self.data + filename, 'w').write(cert) + entry.text = cert + + def verify_cert(self, filename, entry): + """ + check that a certificate validates against the ca cert, + and that it has not expired. + """ + chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert') + cert = self.data + filename + cmd = "openssl verify -CAfile %s %s" % (chaincert, cert) + res = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read() + if res == cert + ": OK\n": + return True + return False + + def build_cert(self, key_filename, entry, metadata): + """ + creates a new certificate according to the specification + """ + req_config = self.build_req_config(entry, metadata) + req = self.build_request(key_filename, req_config, entry) + ca = self.cert_specs[entry.get('name')]['ca'] + ca_config = self.CAs[ca]['config'] + days = self.cert_specs[entry.get('name')]['days'] + passphrase = self.CAs[ca].get('passphrase') + if passphrase: + cmd = "openssl ca -config %s -in %s -days %s -batch -passin pass:%s" % (ca_config, req, days, passphrase) + else: + cmd = "openssl ca -config %s -in %s -days %s -batch" % (ca_config, req, days) + cert = Popen(cmd, shell=True, stdout=PIPE).stdout.read() + try: + os.unlink(req_config) + os.unlink(req) + except OSError: + self.logger.error("Failed to unlink temporary files") + return cert + + def build_req_config(self, entry, metadata): + """ + generates a temporary openssl configuration file that is + used to generate the required certificate request + """ + # create temp request config file + conffile = open(tempfile.mkstemp()[1], 'w') + cp = ConfigParser({}) + cp.optionxform = str + defaults = { + 'req': { + 'default_md': 'sha1', + 'distinguished_name': 'req_distinguished_name', + 'req_extensions': 'v3_req', + 'x509_extensions': 'v3_req', + 'prompt': 'no' + }, + 'req_distinguished_name': {}, + 'v3_req': { + 'subjectAltName': '@alt_names' + }, + 'alt_names': {} + } + for section in defaults.keys(): + cp.add_section(section) + for key in defaults[section]: + cp.set(section, key, defaults[section][key]) + x = 1 + altnames = list(metadata.aliases) + altnames.append(metadata.hostname) + for altname in altnames: + cp.set('alt_names', 'DNS.'+str(x), altname) + x += 1 + for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']: + if self.cert_specs[entry.get('name')][item]: + cp.set('req_distinguished_name', item, self.cert_specs[entry.get('name')][item]) + cp.set('req_distinguished_name', 'CN', metadata.hostname) + cp.write(conffile) + conffile.close() + return conffile.name + + def build_request(self, key_filename, req_config, entry): + """ + creates the certificate request + """ + req = tempfile.mkstemp()[1] + days = self.cert_specs[entry.get('name')]['days'] + key = self.data + key_filename + cmd = "openssl req -new -config %s -days %s -key %s -text -out %s" % (req_config, days, key, req) + res = Popen(cmd, shell=True, stdout=PIPE).stdout.read() + return req + diff --git a/build/lib/Bcfg2/Server/Plugins/Snapshots.py b/build/lib/Bcfg2/Server/Plugins/Snapshots.py new file mode 100644 index 000000000..a4489ae95 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Snapshots.py @@ -0,0 +1,130 @@ +#import lxml.etree +import logging +import binascii +import difflib +#import sqlalchemy +#import sqlalchemy.orm +import Bcfg2.Server.Plugin +import Bcfg2.Server.Snapshots +import Bcfg2.Logger +from Bcfg2.Server.Snapshots.model import Snapshot +import Queue +import time +import threading + +logger = logging.getLogger('Snapshots') + +ftypes = ['ConfigFile', 'SymLink', 'Directory'] +datafields = { + 'Package': ['version'], + 'Path': ['type'], + 'Service': ['status'], + 'ConfigFile': ['owner', 'group', 'perms'], + 'Directory': ['owner', 'group', 'perms'], + 'SymLink': ['to'], + } + +def build_snap_ent(entry): + basefields = [] + if entry.tag in ['Package', 'Service']: + basefields += ['type'] + desired = dict([(key, unicode(entry.get(key))) for key in basefields]) + state = dict([(key, unicode(entry.get(key))) for key in basefields]) + desired.update([(key, unicode(entry.get(key))) for key in \ + datafields[entry.tag]]) + if entry.tag == 'ConfigFile' or \ + ((entry.tag == 'Path') and (entry.get('type') == 'file')): + if entry.text == None: + desired['contents'] = None + else: + if entry.get('encoding', 'ascii') == 'ascii': + desired['contents'] = unicode(entry.text) + else: + desired['contents'] = unicode(binascii.a2b_base64(entry.text)) + + if 'current_bfile' in entry.attrib: + state['contents'] = unicode(binascii.a2b_base64( \ + entry.get('current_bfile'))) + elif 'current_bdiff' in entry.attrib: + diff = binascii.a2b_base64(entry.get('current_bdiff')) + state['contents'] = unicode( \ + '\n'.join(difflib.restore(diff.split('\n'), 1))) + + state.update([(key, unicode(entry.get('current_' + key, entry.get(key)))) \ + for key in datafields[entry.tag]]) + if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false': + state = None + return [desired, state] + + +class Snapshots(Bcfg2.Server.Plugin.Statistics, + Bcfg2.Server.Plugin.Plugin): + name = 'Snapshots' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Statistics.__init__(self) + self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile) + self.work_queue = Queue.Queue() + self.loader = threading.Thread(target=self.load_snapshot) + self.loader.start() + + def load_snapshot(self): + while self.running: + try: + (metadata, data) = self.work_queue.get(block=True, timeout=5) + except: + continue + self.statistics_from_old_stats(metadata, data) + + def process_statistics(self, metadata, data): + return self.work_queue.put((metadata, data)) + + def statistics_from_old_stats(self, metadata, xdata): + # entries are name -> (modified, correct, start, desired, end) + # not sure we can get all of this from old format stats + t1 = time.time() + entries = dict([('Package', dict()), + ('Service', dict()), ('Path', dict())]) + extra = dict([('Package', dict()), ('Service', dict()), + ('Path', dict())]) + bad = [] + state = xdata.find('.//Statistics') + correct = state.get('state') == 'clean' + revision = unicode(state.get('revision', '-1')) + for entry in state.find('.//Bad'): + data = [False, False, unicode(entry.get('name'))] \ + + build_snap_ent(entry) + if entry.tag in ftypes: + etag = 'Path' + else: + etag = entry.tag + entries[etag][entry.get('name')] = data + for entry in state.find('.//Modified'): + if entry.tag in ftypes: + etag = 'Path' + else: + etag = entry.tag + if entry.get('name') in entries[etag]: + data = [True, False, unicode(entry.get('name'))] + \ + build_snap_ent(entry) + else: + data = [True, False, unicode(entry.get('name'))] + \ + build_snap_ent(entry) + for entry in state.find('.//Extra'): + if entry.tag in datafields: + data = build_snap_ent(entry)[1] + ename = unicode(entry.get('name')) + data['name'] = ename + extra[entry.tag][ename] = data + else: + print "extra", entry.tag, entry.get('name') + t2 = time.time() + snap = Snapshot.from_data(self.session, correct, revision, + metadata, entries, extra) + self.session.add(snap) + self.session.commit() + t3 = time.time() + logger.info("Snapshot storage took %fs" % (t3-t2)) + return True diff --git a/build/lib/Bcfg2/Server/Plugins/Statistics.py b/build/lib/Bcfg2/Server/Plugins/Statistics.py new file mode 100644 index 000000000..c7fa0e534 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Statistics.py @@ -0,0 +1,161 @@ +'''This file manages the statistics collected by the BCFG2 Server''' +__revision__ = '$Revision$' + +import binascii +import copy +import difflib +import logging +from lxml.etree import XML, SubElement, Element, XMLSyntaxError +import lxml.etree +import os +import Queue +from time import asctime, localtime, time, strptime, mktime +import threading + +import Bcfg2.Server.Plugin + + +class StatisticsStore(object): + """Manages the memory and file copy of statistics collected about client runs.""" + __min_write_delay__ = 0 + + def __init__(self, filename): + self.filename = filename + self.element = Element('Dummy') + self.dirty = 0 + self.lastwrite = 0 + self.logger = logging.getLogger('Bcfg2.Server.Statistics') + self.ReadFromFile() + + def WriteBack(self, force=0): + """Write statistics changes back to persistent store.""" + if (self.dirty and (self.lastwrite + self.__min_write_delay__ <= time())) \ + or force: + try: + fout = open(self.filename + '.new', 'w') + except IOError, ioerr: + self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr)) + else: + fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True)) + fout.close() + os.rename(self.filename + '.new', self.filename) + self.dirty = 0 + self.lastwrite = time() + + def ReadFromFile(self): + """Reads current state regarding statistics.""" + try: + fin = open(self.filename, 'r') + data = fin.read() + fin.close() + self.element = XML(data) + self.dirty = 0 + except (IOError, XMLSyntaxError): + self.logger.error("Creating new statistics file %s"%(self.filename)) + self.element = Element('ConfigStatistics') + self.WriteBack() + self.dirty = 0 + + def updateStats(self, xml, client): + """Updates the statistics of a current node with new data.""" + + # Current policy: + # - Keep anything less than 24 hours old + # - Keep latest clean run for clean nodes + # - Keep latest clean and dirty run for dirty nodes + newstat = xml.find('Statistics') + + if newstat.get('state') == 'clean': + node_dirty = 0 + else: + node_dirty = 1 + + # Find correct node entry in stats data + # The following list comprehension should be guarenteed to return at + # most one result + nodes = [elem for elem in self.element.findall('Node') \ + if elem.get('name') == client] + nummatch = len(nodes) + if nummatch == 0: + # Create an entry for this node + node = SubElement(self.element, 'Node', name=client) + elif nummatch == 1 and not node_dirty: + # Delete old instance + node = nodes[0] + [node.remove(elem) for elem in node.findall('Statistics') \ + if self.isOlderThan24h(elem.get('time'))] + elif nummatch == 1 and node_dirty: + # Delete old dirty statistics entry + node = nodes[0] + [node.remove(elem) for elem in node.findall('Statistics') \ + if (elem.get('state') == 'dirty' \ + and self.isOlderThan24h(elem.get('time')))] + else: + # Shouldn't be reached + self.logger.error("Duplicate node entry for %s"%(client)) + + # Set current time for stats + newstat.set('time', asctime(localtime())) + + # Add statistic + node.append(copy.deepcopy(newstat)) + + # Set dirty + self.dirty = 1 + self.WriteBack(force=1) + + def isOlderThan24h(self, testTime): + """Helper function to determine if <time> string is older than 24 hours.""" + now = time() + utime = mktime(strptime(testTime)) + secondsPerDay = 60*60*24 + + return (now-utime) > secondsPerDay + + +class Statistics(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.ThreadedStatistics, + Bcfg2.Server.Plugin.PullSource): + name = 'Statistics' + __version__ = '$Id$' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore) + Bcfg2.Server.Plugin.PullSource.__init__(self) + fpath = "%s/etc/statistics.xml" % datastore + self.data_file = StatisticsStore(fpath) + + def handle_statistic(self, metadata, data): + self.data_file.updateStats(data, metadata.hostname) + + def FindCurrent(self, client): + rt = self.data_file.element.xpath('//Node[@name="%s"]' % client)[0] + maxtime = max([strptime(stat.get('time')) for stat \ + in rt.findall('Statistics')]) + return [stat for stat in rt.findall('Statistics') \ + if strptime(stat.get('time')) == maxtime][0] + + def GetExtra(self, client): + return [(entry.tag, entry.get('name')) for entry \ + in self.FindCurrent(client).xpath('.//Extra/*')] + + def GetCurrentEntry(self, client, e_type, e_name): + curr = self.FindCurrent(client) + entry = curr.xpath('.//Bad/%s[@name="%s"]' % (e_type, e_name)) + if not entry: + raise Bcfg2.Server.Plugin.PluginExecutionError + cfentry = entry[-1] + + owner = cfentry.get('current_owner', cfentry.get('owner')) + group = cfentry.get('current_group', cfentry.get('group')) + perms = cfentry.get('current_perms', cfentry.get('perms')) + if 'current_bfile' in cfentry.attrib: + contents = binascii.a2b_base64(cfentry.get('current_bfile')) + elif 'current_bdiff' in cfentry.attrib: + diff = binascii.a2b_base64(cfentry.get('current_bdiff')) + contents = '\n'.join(difflib.restore(diff.split('\n'), 1)) + else: + contents = None + + return (owner, group, perms, contents) diff --git a/build/lib/Bcfg2/Server/Plugins/Svcmgr.py b/build/lib/Bcfg2/Server/Plugins/Svcmgr.py new file mode 100644 index 000000000..6d25c1a6d --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Svcmgr.py @@ -0,0 +1,12 @@ +"""This generator provides service mappings.""" +__revision__ = '$Revision$' + +import Bcfg2.Server.Plugin + + +class Svcmgr(Bcfg2.Server.Plugin.PrioDir): + """This is a generator that handles service assignments.""" + name = 'Svcmgr' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + deprecated = True diff --git a/build/lib/Bcfg2/Server/Plugins/Svn.py b/build/lib/Bcfg2/Server/Plugins/Svn.py new file mode 100644 index 000000000..cb4ab649b --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Svn.py @@ -0,0 +1,46 @@ +import os +from subprocess import Popen, PIPE +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Svn') + + +class Svn(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Svn is a version plugin for dealing with Bcfg2 repos.""" + name = 'Svn' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + self.core = core + self.datastore = datastore + + # path to svn directory for bcfg2 repo + svn_dir = "%s/.svn" % datastore + + # Read revision from bcfg2 repo + if os.path.isdir(svn_dir): + self.get_revision() + else: + logger.error("%s is not a directory" % svn_dir) + raise Bcfg2.Server.Plugin.PluginInitError + + logger.debug("Initialized svn plugin with svn directory = %s" % svn_dir) + + def get_revision(self): + """Read svn revision information for the Bcfg2 repository.""" + try: + data = Popen(("env LC_ALL=C svn info %s" % + (self.datastore)), shell=True, + stdout=PIPE).communicate()[0].split('\n') + return [line.split(': ')[1] for line in data \ + if line[:9] == 'Revision:'][-1] + except IndexError: + logger.error("Failed to read svn info; disabling svn support") + logger.error('''Ran command "svn info %s"''' % (self.datastore)) + logger.error("Got output: %s" % data) + raise Bcfg2.Server.Plugin.PluginInitError diff --git a/build/lib/Bcfg2/Server/Plugins/TCheetah.py b/build/lib/Bcfg2/Server/Plugins/TCheetah.py new file mode 100644 index 000000000..d40f4baf3 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/TCheetah.py @@ -0,0 +1,78 @@ +'''This module implements a templating generator based on Cheetah''' +__revision__ = '$Revision$' + +import binascii +import logging +import sys +import traceback +import Bcfg2.Server.Plugin + +logger = logging.getLogger('Bcfg2.Plugins.TCheetah') + +try: + import Cheetah.Template + import Cheetah.Parser +except: + logger.error("TCheetah: Failed to import Cheetah. Is it installed?") + raise + + +class TemplateFile: + """Template file creates Cheetah template structures for the loaded file.""" + + def __init__(self, name, specific, encoding): + self.name = name + self.specific = specific + self.encoding = encoding + self.template = None + self.searchlist = dict() + + def handle_event(self, event): + """Handle all fs events for this template.""" + if event.code2str() == 'deleted': + return + try: + s = {'useStackFrames': False} + self.template = Cheetah.Template.Template(open(self.name).read(), + compilerSettings=s, + searchList=self.searchlist) + except Cheetah.Parser.ParseError, perror: + logger.error("Cheetah parse error for file %s" % (self.name)) + logger.error(perror.report()) + + def bind_entry(self, entry, metadata): + """Build literal file information.""" + self.template.metadata = metadata + self.searchlist['metadata'] = metadata + self.template.path = entry.get('realname', entry.get('name')) + self.searchlist['path'] = entry.get('realname', entry.get('name')) + self.template.source_path = self.name + self.searchlist['source_path'] = self.name + + if entry.tag == 'Path': + entry.set('type', 'file') + try: + if type(self.template) == unicode: + entry.text = self.template + else: + if entry.get('encoding') == 'base64': + # take care of case where file needs base64 encoding + entry.text = binascii.b2a_base64(self.template) + else: + entry.text = unicode(str(self.template), self.encoding) + except: + (a, b, c) = sys.exc_info() + msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1] + logger.error(msg) + logger.error("TCheetah template error for %s" % self.searchlist['path']) + del a, b, c + raise Bcfg2.Server.Plugin.PluginExecutionError + + +class TCheetah(Bcfg2.Server.Plugin.GroupSpool): + """The TCheetah generator implements a templating mechanism for configuration files.""" + name = 'TCheetah' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + filename_pattern = 'template' + es_child_cls = TemplateFile diff --git a/build/lib/Bcfg2/Server/Plugins/TGenshi.py b/build/lib/Bcfg2/Server/Plugins/TGenshi.py new file mode 100644 index 000000000..29e6d7307 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/TGenshi.py @@ -0,0 +1,126 @@ +"""This module implements a templating generator based on Genshi.""" +__revision__ = '$Revision$' + +import binascii +import logging +import Bcfg2.Server.Plugin + +logger = logging.getLogger('Bcfg2.Plugins.TGenshi') + +# try to import genshi stuff +try: + import genshi.core + import genshi.input + from genshi.template import TemplateLoader, \ + TextTemplate, MarkupTemplate, TemplateError +except ImportError: + logger.error("TGenshi: Failed to import Genshi. Is it installed?") + raise Bcfg2.Server.Plugin.PluginInitError +try: + from genshi.template import NewTextTemplate + have_ntt = True +except: + have_ntt = False + +def removecomment(stream): + """A genshi filter that removes comments from the stream.""" + for kind, data, pos in stream: + if kind is genshi.core.COMMENT: + continue + yield kind, data, pos + + +class TemplateFile: + """Template file creates Genshi template structures for the loaded file.""" + + def __init__(self, name, specific, encoding): + self.name = name + self.specific = specific + self.encoding = encoding + if self.specific.all: + matchname = self.name + elif self.specific.group: + matchname = self.name[:self.name.find('.G')] + else: + matchname = self.name[:self.name.find('.H')] + if matchname.endswith('.txt'): + self.template_cls = TextTemplate + elif matchname.endswith('.newtxt'): + if not have_ntt: + logger.error("Genshi NewTextTemplates not supported by this version of Genshi") + else: + self.template_cls = NewTextTemplate + else: + self.template_cls = MarkupTemplate + self.HandleEvent = self.handle_event + + def handle_event(self, event=None): + """Handle all fs events for this template.""" + if event and event.code2str() == 'deleted': + return + try: + loader = TemplateLoader() + try: + self.template = loader.load(self.name, cls=self.template_cls, + encoding=self.encoding) + except LookupError, lerror: + logger.error('Genshi lookup error: %s' % lerror) + except TemplateError, terror: + logger.error('Genshi template error: %s' % terror) + except genshi.input.ParseError, perror: + logger.error('Genshi parse error: %s' % perror) + + def bind_entry(self, entry, metadata): + """Build literal file information.""" + fname = entry.get('realname', entry.get('name')) + if entry.tag == 'Path': + entry.set('type', 'file') + try: + stream = self.template.generate( \ + name=fname, metadata=metadata, + path=self.name).filter(removecomment) + if have_ntt: + ttypes = [TextTemplate, NewTextTemplate] + else: + ttypes = [TextTemplate] + if True in [isinstance(self.template, t) for t in ttypes]: + try: + textdata = stream.render('text', strip_whitespace=False) + except TypeError: + textdata = stream.render('text') + if type(textdata) == unicode: + entry.text = textdata + else: + if entry.get('encoding') == 'base64': + # take care of case where file needs base64 encoding + entry.text = binascii.b2a_base64(textdata) + else: + entry.text = unicode(textdata, self.encoding) + else: + try: + xmldata = stream.render('xml', strip_whitespace=False) + except TypeError: + xmldata = stream.render('xml') + if type(xmldata) == unicode: + entry.text = xmldata + else: + entry.text = unicode(xmldata, self.encoding) + except TemplateError, terror: + logger.error('Genshi template error: %s' % terror) + raise Bcfg2.Server.Plugin.PluginExecutionError + except AttributeError, err: + logger.error('Genshi template loading error: %s' % err) + raise Bcfg2.Server.Plugin.PluginExecutionError + + +class TGenshi(Bcfg2.Server.Plugin.GroupSpool): + """ + The TGenshi generator implements a templating + mechanism for configuration files. + + """ + name = 'TGenshi' + __version__ = '$Id$' + __author__ = 'jeff@ocjtech.us' + filename_pattern = 'template\.(txt|newtxt|xml)' + es_child_cls = TemplateFile diff --git a/build/lib/Bcfg2/Server/Plugins/Trigger.py b/build/lib/Bcfg2/Server/Plugins/Trigger.py new file mode 100644 index 000000000..f6dd47e12 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Trigger.py @@ -0,0 +1,37 @@ +import os +import Bcfg2.Server.Plugin + + +def async_run(prog, args): + pid = os.fork() + if pid: + os.waitpid(pid, 0) + else: + dpid = os.fork() + if not dpid: + os.system(" ".join([prog] + args)) + os._exit(0) + + +class Trigger(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Statistics): + """Trigger is a plugin that calls external scripts (on the server).""" + name = 'Trigger' + __version__ = '$Id' + __author__ = 'bcfg-dev@mcs.anl.gov' + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Statistics.__init__(self) + try: + os.stat(self.data) + except: + self.logger.error("Trigger: spool directory %s does not exist; unloading" % self.data) + raise Bcfg2.Server.Plugin.PluginInitError + + def process_statistics(self, metadata, _): + args = [metadata.hostname, '-p', metadata.profile, '-g', + ':'.join([g for g in metadata.groups])] + for notifier in os.listdir(self.data): + n = self.data + '/' + notifier + async_run(n, args) diff --git a/build/lib/Bcfg2/Server/Plugins/Web.py b/build/lib/Bcfg2/Server/Plugins/Web.py new file mode 100644 index 000000000..e1646e429 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/Web.py @@ -0,0 +1,47 @@ +import os +import BaseHTTPServer +import SimpleHTTPServer +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Web') + +class Web(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Web is a simple webserver to display the content of the Bcfg2 repos.""" + name = 'Web' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # Change directory to the Bcfg2 repo + ##path = '/home/fab/backup' + if not os.path.exists(datastore): + ##print "Path '%s' doesn't exisit" % datastore + logger.error("%s doesn't exist" % datastore) + raise Bcfg2.Server.Plugin.PluginInitError + else: + os.chdir(datastore) + self.start_web() + + logger.debug("Serving at port %s" % port) + + + def start_web(self, port=6788): + """Starts the webserver for directory listing of the Bcfg2 repo.""" + try: + server_class = BaseHTTPServer.HTTPServer + handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler + server_address = ('', port) + server = server_class(server_address, handler_class) + server.serve_forever() + except: + logger.error("Failed to start webserver") + raise Bcfg2.Server.Plugin.PluginInitError diff --git a/build/lib/Bcfg2/Server/Plugins/__Web.py b/build/lib/Bcfg2/Server/Plugins/__Web.py new file mode 100644 index 000000000..e1646e429 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/__Web.py @@ -0,0 +1,47 @@ +import os +import BaseHTTPServer +import SimpleHTTPServer +import Bcfg2.Server.Plugin + +# for debugging output only +import logging +logger = logging.getLogger('Bcfg2.Plugins.Web') + +class Web(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Version): + """Web is a simple webserver to display the content of the Bcfg2 repos.""" + name = 'Web' + __version__ = '$Id$' + __author__ = 'bcfg-dev@mcs.anl.gov' + experimental = True + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Version.__init__(self) + self.core = core + self.datastore = datastore + + # Change directory to the Bcfg2 repo + ##path = '/home/fab/backup' + if not os.path.exists(datastore): + ##print "Path '%s' doesn't exisit" % datastore + logger.error("%s doesn't exist" % datastore) + raise Bcfg2.Server.Plugin.PluginInitError + else: + os.chdir(datastore) + self.start_web() + + logger.debug("Serving at port %s" % port) + + + def start_web(self, port=6788): + """Starts the webserver for directory listing of the Bcfg2 repo.""" + try: + server_class = BaseHTTPServer.HTTPServer + handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler + server_address = ('', port) + server = server_class(server_address, handler_class) + server.serve_forever() + except: + logger.error("Failed to start webserver") + raise Bcfg2.Server.Plugin.PluginInitError diff --git a/build/lib/Bcfg2/Server/Plugins/__init__.py b/build/lib/Bcfg2/Server/Plugins/__init__.py new file mode 100644 index 000000000..c69c37452 --- /dev/null +++ b/build/lib/Bcfg2/Server/Plugins/__init__.py @@ -0,0 +1,35 @@ +"""Imports for Bcfg2.Server.Plugins.""" +__revision__ = '$Revision$' + +__all__ = [ + 'Account', + 'Base', + 'Bundler', + 'Bzr', + 'Cfg', + 'Cvs', + 'Darcs', + 'Decisions', + 'Fossil', + 'Git', + 'GroupPatterns', + 'Hg', + 'Hostbase', + 'Metadata', + 'NagiosGen', + 'Ohai', + 'Packages', + 'Properties', + 'Probes', + 'Pkgmgr', + 'Rules', + 'SSHbase', + 'Snapshots', + 'Statistics', + 'Svcmgr', + 'Svn', + 'TCheetah', + 'Trigger', + 'SGenshi', + 'TGenshi', + ] diff --git a/build/lib/Bcfg2/Server/Reports/__init__.py b/build/lib/Bcfg2/Server/Reports/__init__.py new file mode 100644 index 000000000..bdf908f4a --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/__init__.py @@ -0,0 +1 @@ +__all__ = ['manage', 'nisauth', 'reports', 'settings', 'backends', 'urls', 'importscript'] diff --git a/build/lib/Bcfg2/Server/Reports/backends.py b/build/lib/Bcfg2/Server/Reports/backends.py new file mode 100644 index 000000000..9207038ed --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/backends.py @@ -0,0 +1,35 @@ +from django.contrib.auth.models import User +from nisauth import * + +class NISBackend(object): + + def authenticate(self, username=None, password=None): + try: + print "start nis authenticate" + n = nisauth(username, password) + temp_pass = User.objects.make_random_password(100) + nis_user = dict(username=username, + ) + + user_session_obj = dict( + email = username, + first_name = None, + last_name = None, + uid = n.uid + ) + user, created = User.objects.get_or_create(username=username) + + return user + + except NISAUTHError, e: + print str(e) + return None + + + def get_user(self, user_id): + try: + return User.objects.get(pk=user_id) + except User.DoesNotExist, e: + print str(e) + return None + diff --git a/build/lib/Bcfg2/Server/Reports/importscript.py b/build/lib/Bcfg2/Server/Reports/importscript.py new file mode 100644 index 000000000..cdfd8079c --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/importscript.py @@ -0,0 +1,270 @@ +#! /usr/bin/env python +'''Imports statistics.xml and clients.xml files in to database backend for new statistics engine''' +__revision__ = '$Revision$' + +import os, sys, binascii +try: + import Bcfg2.Server.Reports.settings +except Exception, e: + sys.stderr.write("Failed to load configuration settings. %s\n" % e) + sys.exit(1) + +project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__) +project_name = os.path.basename(project_directory) +sys.path.append(os.path.join(project_directory, '..')) +project_module = __import__(project_name, '', '', ['']) +sys.path.pop() +# Set DJANGO_SETTINGS_MODULE appropriately. +os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name + +from Bcfg2.Server.Reports.reports.models import * +from lxml.etree import XML, XMLSyntaxError +from getopt import getopt, GetoptError +from datetime import datetime +from time import strptime +from django.db import connection +from Bcfg2.Server.Reports.updatefix import update_database +import ConfigParser +import logging +import Bcfg2.Logger +import platform + +def build_reason_kwargs(r_ent): + binary_file=False + if r_ent.get('current_bfile', False): + binary_file=True + rc_diff = r_ent.get('current_bfile') + if len(rc_diff) > 1024*1024: + rc_diff = '' + elif len(rc_diff) == 0: + # No point in flagging binary if we have no data + binary_file=False + elif r_ent.get('current_bdiff', False): + rc_diff = binascii.a2b_base64(r_ent.get('current_bdiff')) + elif r_ent.get('current_diff', False): + rc_diff = r_ent.get('current_diff') + else: + rc_diff = '' + return dict(owner=r_ent.get('owner', default=""), + current_owner=r_ent.get('current_owner', default=""), + group=r_ent.get('group', default=""), + current_group=r_ent.get('current_group', default=""), + perms=r_ent.get('perms', default=""), + current_perms=r_ent.get('current_perms', default=""), + status=r_ent.get('status', default=""), + current_status=r_ent.get('current_status', default=""), + to=r_ent.get('to', default=""), + current_to=r_ent.get('current_to', default=""), + version=r_ent.get('version', default=""), + current_version=r_ent.get('current_version', default=""), + current_exists=r_ent.get('current_exists', default="True").capitalize()=="True", + current_diff=rc_diff, + is_binary=binary_file) + + +def load_stats(cdata, sdata, vlevel, logger, quick=False, location=''): + clients = {} + [clients.__setitem__(c.name, c) \ + for c in Client.objects.all()] + + pingability = {} + [pingability.__setitem__(n.get('name'), n.get('pingable', default='N')) \ + for n in cdata.findall('Client')] + + for node in sdata.findall('Node'): + name = node.get('name') + c_inst, created = Client.objects.get_or_create(name=name) + if vlevel > 0: + logger.info("Client %s added to db" % name) + clients[name] = c_inst + try: + pingability[name] + except KeyError: + pingability[name] = 'N' + for statistics in node.findall('Statistics'): + timestamp = datetime(*strptime(statistics.get('time'))[0:6]) + ilist = Interaction.objects.filter(client=c_inst, + timestamp=timestamp) + if ilist: + current_interaction = ilist[0] + if vlevel > 0: + logger.info("Interaction for %s at %s with id %s already exists" % \ + (c_inst.id, timestamp, current_interaction.id)) + continue + else: + newint = Interaction(client=c_inst, + timestamp = timestamp, + state = statistics.get('state', default="unknown"), + repo_rev_code = statistics.get('revision',default="unknown"), + client_version = statistics.get('client_version',default="unknown"), + goodcount = statistics.get('good',default="0"), + totalcount = statistics.get('total',default="0"), + server = location) + newint.save() + current_interaction = newint + if vlevel > 0: + logger.info("Interaction for %s at %s with id %s INSERTED in to db"%(c_inst.id, + timestamp, current_interaction.id)) + + + counter_fields = { TYPE_CHOICES[0]: 0, TYPE_CHOICES[1]: 0, TYPE_CHOICES[2]: 0 } + pattern = [('Bad/*', TYPE_CHOICES[0]), + ('Extra/*', TYPE_CHOICES[2]), + ('Modified/*', TYPE_CHOICES[1]),] + for (xpath, type) in pattern: + for x in statistics.findall(xpath): + counter_fields[type] = counter_fields[type] + 1 + kargs = build_reason_kwargs(x) + + try: + rr = None + if not quick: + try: + rr = Reason.objects.filter(**kargs)[0] + except IndexError: + pass + if not rr: + rr = Reason(**kargs) + rr.save() + if vlevel > 0: + logger.info("Created reason: %s" % rr.id) + except Exception, ex: + logger.error("Failed to create reason for %s: %s" % (x.get('name'), ex)) + rr = Reason(current_exists=x.get('current_exists', + default="True").capitalize()=="True") + rr.save() + + entry, created = Entries.objects.get_or_create(\ + name=x.get('name'), kind=x.tag) + + Entries_interactions(entry=entry, reason=rr, + interaction=current_interaction, + type=type[0]).save() + if vlevel > 0: + logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id)) + + # Update interaction counters + current_interaction.bad_entries = counter_fields[TYPE_CHOICES[0]] + current_interaction.modified_entries = counter_fields[TYPE_CHOICES[1]] + current_interaction.extra_entries = counter_fields[TYPE_CHOICES[2]] + current_interaction.save() + + mperfs = [] + for times in statistics.findall('OpStamps'): + for metric, value in times.items(): + mmatch = [] + if not quick: + mmatch = Performance.objects.filter(metric=metric, value=value) + + if mmatch: + mperf = mmatch[0] + else: + mperf = Performance(metric=metric, value=value) + mperf.save() + mperfs.append(mperf) + current_interaction.performance_items.add(*mperfs) + + for key in pingability.keys(): + if key not in clients: + continue + try: + pmatch = Ping.objects.filter(client=clients[key]).order_by('-endtime')[0] + if pmatch.status == pingability[key]: + pmatch.endtime = datetime.now() + pmatch.save() + continue + except IndexError: + pass + Ping(client=clients[key], status=pingability[key], + starttime=datetime.now(), + endtime=datetime.now()).save() + + if vlevel > 1: + logger.info("---------------PINGDATA SYNCED---------------------") + + #Clients are consistent + +if __name__ == '__main__': + from sys import argv + verb = 0 + cpath = "/etc/bcfg2.conf" + clientpath = False + statpath = False + syslog = False + + try: + opts, args = getopt(argv[1:], "hvudc:s:CS", ["help", "verbose", "updates" , + "debug", "clients=", "stats=", + "config=", "syslog"]) + except GetoptError, mesg: + # print help information and exit: + print "%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-c clients-file] [-s statistics-file]" % (mesg) + raise SystemExit, 2 + + for o, a in opts: + if o in ("-h", "--help"): + print "Usage:\nimportscript.py [-h] [-v] -c <clients-file> -s <statistics-file> \n" + print "h : help; this message" + print "v : verbose; print messages on record insertion/skip" + print "u : updates; print status messages as items inserted semi-verbose" + print "d : debug; print most SQL used to manipulate database" + print "C : path to bcfg2.conf config file." + print "c : clients.xml file" + print "s : statistics.xml file" + print "S : syslog; output to syslog" + raise SystemExit + if o in ["-C", "--config"]: + cpath = a + + if o in ("-v", "--verbose"): + verb = 1 + if o in ("-u", "--updates"): + verb = 2 + if o in ("-d", "--debug"): + verb = 3 + if o in ("-c", "--clients"): + clientspath = a + + if o in ("-s", "--stats"): + statpath = a + if o in ("-S", "--syslog"): + syslog = True + + logger = logging.getLogger('importscript.py') + logging.getLogger().setLevel(logging.INFO) + Bcfg2.Logger.setup_logging('importscript.py', + True, + syslog) + + cf = ConfigParser.ConfigParser() + cf.read([cpath]) + + if not statpath: + try: + statpath = "%s/etc/statistics.xml" % cf.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + print "Could not read bcfg2.conf; exiting" + raise SystemExit, 1 + try: + statsdata = XML(open(statpath).read()) + except (IOError, XMLSyntaxError): + print("StatReports: Failed to parse %s"%(statpath)) + raise SystemExit, 1 + + if not clientpath: + try: + clientspath = "%s/Metadata/clients.xml" % \ + cf.get('server', 'repository') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + print "Could not read bcfg2.conf; exiting" + raise SystemExit, 1 + try: + clientsdata = XML(open(clientspath).read()) + except (IOError, XMLSyntaxError): + print("StatReports: Failed to parse %s"%(clientspath)) + raise SystemExit, 1 + + q = '-O3' in sys.argv + # Be sure the database is ready for new schema + update_database() + load_stats(clientsdata, statsdata, verb, logger, quick=q, location=platform.node()) diff --git a/build/lib/Bcfg2/Server/Reports/manage.py b/build/lib/Bcfg2/Server/Reports/manage.py new file mode 100644 index 000000000..5e78ea979 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/manage.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +try: + import settings # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) + sys.exit(1) + +if __name__ == "__main__": + execute_manager(settings) diff --git a/build/lib/Bcfg2/Server/Reports/nisauth.py b/build/lib/Bcfg2/Server/Reports/nisauth.py new file mode 100644 index 000000000..b4be0e391 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/nisauth.py @@ -0,0 +1,43 @@ +import os +import crypt, nis +from Bcfg2.Server.Reports.settings import AUTHORIZED_GROUP + +"""Checks with NIS to see if the current user is in the support group""" + +__revision__ = "$Revision: $" + +class NISAUTHError(Exception): + """NISAUTHError is raised when somehting goes boom.""" + pass + +class nisauth(object): + group_test = False + samAcctName = None + distinguishedName = None + sAMAccountName = None + telephoneNumber = None + title = None + memberOf = None + department = None #this will be a list + mail = None + extensionAttribute1 = None #badgenumber + badge_no = None + uid = None + + def __init__(self,login,passwd=None): + """get user profile from NIS""" + try: + p = nis.match(login, 'passwd.byname').split(":") + print p + except: + raise NISAUTHError('username') + # check user password using crypt and 2 character salt from passwd file + if p[1] == crypt.crypt(passwd, p[1][:2]): + # check to see if user is in valid support groups + # will have to include these groups in a settings file eventually + if not login in nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[-1].split(','): + raise NISAUTHError('group') + self.uid = p[2] + print self.uid + else: + raise NISAUTHError('password') diff --git a/build/lib/Bcfg2/Server/Reports/reports/__init__.py b/build/lib/Bcfg2/Server/Reports/reports/__init__.py new file mode 100644 index 000000000..ccdce8943 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/__init__.py @@ -0,0 +1 @@ +__all__ = ['templatetags'] diff --git a/build/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml b/build/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml new file mode 100644 index 000000000..1b1359eed --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml @@ -0,0 +1,35 @@ +<?xml version='1.0' encoding='utf-8' ?> +<django-objects version="1.0"> + <object pk="1" model="reports.internaldatabaseversion"> + <field type="IntegerField" name="version">0</field> + <field type="DateTimeField" name="updated">2008-08-05 11:03:50</field> + </object> + <object pk="2" model="reports.internaldatabaseversion"> + <field type="IntegerField" name="version">1</field> + <field type="DateTimeField" name="updated">2008-08-05 11:04:10</field> + </object> + <object pk="3" model="reports.internaldatabaseversion"> + <field type="IntegerField" name="version">2</field> + <field type="DateTimeField" name="updated">2008-08-05 13:37:19</field> + </object> + <object pk="4" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>3</field> + <field type='DateTimeField' name='updated'>2008-08-11 08:44:36</field> + </object> + <object pk="5" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>10</field> + <field type='DateTimeField' name='updated'>2008-08-22 11:28:50</field> + </object> + <object pk="5" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>11</field> + <field type='DateTimeField' name='updated'>2009-01-13 12:26:10</field> + </object> + <object pk="6" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>16</field> + <field type='DateTimeField' name='updated'>2010-06-01 12:26:10</field> + </object> + <object pk="7" model="reports.internaldatabaseversion"> + <field type='IntegerField' name='version'>17</field> + <field type='DateTimeField' name='updated'>2010-07-02 00:00:00</field> + </object> +</django-objects> diff --git a/build/lib/Bcfg2/Server/Reports/reports/models.py b/build/lib/Bcfg2/Server/Reports/reports/models.py new file mode 100644 index 000000000..1963a9090 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/models.py @@ -0,0 +1,328 @@ +"""Django models for Bcfg2 reports.""" +from django.db import models +from django.db import connection, transaction +from django.db.models import Q +from datetime import datetime, timedelta +from time import strptime + +KIND_CHOICES = ( + #These are the kinds of config elements + ('Package', 'Package'), + ('Path', 'directory'), + ('Path', 'file'), + ('Path', 'permissions'), + ('Path', 'symlink'), + ('Service', 'Service'), +) +PING_CHOICES = ( + #These are possible ping states + ('Up (Y)', 'Y'), + ('Down (N)', 'N') +) +TYPE_BAD = 1 +TYPE_MODIFIED = 2 +TYPE_EXTRA = 3 + +TYPE_CHOICES = ( + (TYPE_BAD, 'Bad'), + (TYPE_MODIFIED, 'Modified'), + (TYPE_EXTRA, 'Extra'), +) + +def convert_entry_type_to_id(type_name): + """Convert a entry type to its entry id""" + for e_id, e_name in TYPE_CHOICES: + if e_name.lower() == type_name.lower(): + return e_id + return -1 + +class ClientManager(models.Manager): + """Extended client manager functions.""" + def active(self, timestamp=None): + """returns a set of clients that have been created and have not yet been + expired as of optional timestmamp argument. Timestamp should be a + datetime object.""" + + if timestamp == None: + timestamp = datetime.now() + elif not isinstance(timestamp, datetime): + raise ValueError, 'Expected a datetime object' + else: + try: + timestamp = datetime(*strptime(timestamp, "%Y-%m-%d %H:%M:%S")[0:6]) + except ValueError: + return self.none() + + return self.filter(Q(expiration__gt=timestamp) | Q(expiration__isnull=True), + creation__lt=timestamp) + + +class Client(models.Model): + """Object representing every client we have seen stats for.""" + creation = models.DateTimeField(auto_now_add=True) + name = models.CharField(max_length=128,) + current_interaction = models.ForeignKey('Interaction', + null=True, blank=True, + related_name="parent_client") + expiration = models.DateTimeField(blank=True, null=True) + + def __str__(self): + return self.name + + objects = ClientManager() + + class Admin: + pass + +class Ping(models.Model): + """Represents a ping of a client (sparsely).""" + client = models.ForeignKey(Client, related_name="pings") + starttime = models.DateTimeField() + endtime = models.DateTimeField() + status = models.CharField(max_length=4, choices=PING_CHOICES)#up/down + + class Meta: + get_latest_by = 'endtime' + +class InteractiveManager(models.Manager): + """Manages interactions objects.""" + + def recent_interactions_dict(self, maxdate=None, active_only=True): + """ + Return the most recent interactions for clients as of a date. + + This method uses aggregated queries to return a ValuesQueryDict object. + Faster then raw sql since this is executed as a single query. + """ + + return self.values('client').annotate(max_timestamp=Max('timestamp')).values() + + def interaction_per_client(self, maxdate = None, active_only=True): + """ + Returns the most recent interactions for clients as of a date + + Arguments: + maxdate -- datetime object. Most recent date to pull. (dafault None) + active_only -- Include only active clients (default True) + + """ + + if maxdate and not isinstance(maxdate,datetime): + raise ValueError, 'Expected a datetime object' + return self.filter(id__in = self.get_interaction_per_client_ids(maxdate, active_only)) + + def get_interaction_per_client_ids(self, maxdate = None, active_only=True): + """ + Returns the ids of most recent interactions for clients as of a date. + + Arguments: + maxdate -- datetime object. Most recent date to pull. (dafault None) + active_only -- Include only active clients (default True) + + """ + from django.db import connection + cursor = connection.cursor() + cfilter = "expiration is null" + + sql = 'select reports_interaction.id, x.client_id from (select client_id, MAX(timestamp) ' + \ + 'as timer from reports_interaction' + if maxdate: + if not isinstance(maxdate,datetime): + raise ValueError, 'Expected a datetime object' + sql = sql + " where timestamp <= '%s' " % maxdate + cfilter = "(expiration is null or expiration > '%s') and creation <= '%s'" % (maxdate,maxdate) + sql = sql + ' GROUP BY client_id) x, reports_interaction where ' + \ + 'reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer' + if active_only: + sql = sql + " and x.client_id in (select id from reports_client where %s)" % \ + cfilter + try: + cursor.execute(sql) + return [item[0] for item in cursor.fetchall()] + except: + '''FIXME - really need some error hadling''' + pass + return [] + +class Interaction(models.Model): + """Models each reconfiguration operation interaction between client and server.""" + client = models.ForeignKey(Client, related_name="interactions",) + timestamp = models.DateTimeField()#Timestamp for this record + state = models.CharField(max_length=32)#good/bad/modified/etc + repo_rev_code = models.CharField(max_length=64)#repo revision at time of interaction + client_version = models.CharField(max_length=32)#Client Version + goodcount = models.IntegerField()#of good config-items + totalcount = models.IntegerField()#of total config-items + server = models.CharField(max_length=256) # Name of the server used for the interaction + bad_entries = models.IntegerField(default=-1) + modified_entries = models.IntegerField(default=-1) + extra_entries = models.IntegerField(default=-1) + + def __str__(self): + return "With " + self.client.name + " @ " + self.timestamp.isoformat() + + def percentgood(self): + if not self.totalcount == 0: + return (self.goodcount/float(self.totalcount))*100 + else: + return 0 + + def percentbad(self): + if not self.totalcount == 0: + return ((self.totalcount-self.goodcount)/(float(self.totalcount)))*100 + else: + return 0 + + def isclean(self): + if (self.bad_entry_count() == 0 and self.goodcount == self.totalcount): + return True + else: + return False + + def isstale(self): + if (self == self.client.current_interaction):#Is Mostrecent + if(datetime.now()-self.timestamp > timedelta(hours=25) ): + return True + else: + return False + else: + #Search for subsequent Interaction for this client + #Check if it happened more than 25 hrs ago. + if (self.client.interactions.filter(timestamp__gt=self.timestamp) + .order_by('timestamp')[0].timestamp - + self.timestamp > timedelta(hours=25)): + return True + else: + return False + def save(self): + super(Interaction, self).save() #call the real save... + self.client.current_interaction = self.client.interactions.latest() + self.client.save()#save again post update + + def delete(self): + '''Override the default delete. Allows us to remove Performance items''' + pitems = list(self.performance_items.all()) + super(Interaction, self).delete() + for perf in pitems: + if perf.interaction.count() == 0: + perf.delete() + + def badcount(self): + return self.totalcount - self.goodcount + + def bad(self): + return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_BAD) + + def bad_entry_count(self): + """Number of bad entries. Store the count in the interation field to save db queries.""" + if self.bad_entries < 0: + self.bad_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_BAD).count() + self.save() + return self.bad_entries + + def modified(self): + return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_MODIFIED) + + def modified_entry_count(self): + """Number of modified entries. Store the count in the interation field to save db queries.""" + if self.modified_entries < 0: + self.modified_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_MODIFIED).count() + self.save() + return self.modified_entries + + def extra(self): + return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_EXTRA) + + def extra_entry_count(self): + """Number of extra entries. Store the count in the interation field to save db queries.""" + if self.extra_entries < 0: + self.extra_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_EXTRA).count() + self.save() + return self.extra_entries + + objects = InteractiveManager() + + class Admin: + list_display = ('client', 'timestamp', 'state') + list_filter = ['client', 'timestamp'] + pass + class Meta: + get_latest_by = 'timestamp' + ordering = ['-timestamp'] + unique_together = ("client", "timestamp") + +class Reason(models.Model): + """reason why modified or bad entry did not verify, or changed.""" + owner = models.TextField(max_length=128, blank=True) + current_owner = models.TextField(max_length=128, blank=True) + group = models.TextField(max_length=128, blank=True) + current_group = models.TextField(max_length=128, blank=True) + perms = models.TextField(max_length=4, blank=True)#txt fixes typing issue + current_perms = models.TextField(max_length=4, blank=True) + status = models.TextField(max_length=3, blank=True)#on/off/(None) + current_status = models.TextField(max_length=1, blank=True)#on/off/(None) + to = models.TextField(max_length=256, blank=True) + current_to = models.TextField(max_length=256, blank=True) + version = models.TextField(max_length=128, blank=True) + current_version = models.TextField(max_length=128, blank=True) + current_exists = models.BooleanField()#False means its missing. Default True + current_diff = models.TextField(max_length=1280, blank=True) + is_binary = models.BooleanField(default=False) + def _str_(self): + return "Reason" + + @staticmethod + @transaction.commit_on_success + def prune_orphans(): + '''Prune oprhaned rows... no good way to use the ORM''' + cursor = connection.cursor() + cursor.execute('delete from reports_reason where not exists (select rei.id from reports_entries_interactions rei where rei.reason_id = reports_reason.id)') + transaction.set_dirty() + + +class Entries(models.Model): + """Contains all the entries feed by the client.""" + name = models.CharField(max_length=128, db_index=True) + kind = models.CharField(max_length=16, choices=KIND_CHOICES, db_index=True) + + def __str__(self): + return self.name + + @staticmethod + @transaction.commit_on_success + def prune_orphans(): + '''Prune oprhaned rows... no good way to use the ORM''' + cursor = connection.cursor() + cursor.execute('delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)') + transaction.set_dirty() + +class Entries_interactions(models.Model): + """Define the relation between the reason, the interaction and the entry.""" + entry = models.ForeignKey(Entries) + reason = models.ForeignKey(Reason) + interaction = models.ForeignKey(Interaction) + type = models.IntegerField(choices=TYPE_CHOICES) + +class Performance(models.Model): + """Object representing performance data for any interaction.""" + interaction = models.ManyToManyField(Interaction, related_name="performance_items") + metric = models.CharField(max_length=128) + value = models.DecimalField(max_digits=32, decimal_places=16) + def __str__(self): + return self.metric + + @staticmethod + @transaction.commit_on_success + def prune_orphans(): + '''Prune oprhaned rows... no good way to use the ORM''' + cursor = connection.cursor() + cursor.execute('delete from reports_performance where not exists (select ri.id from reports_performance_interaction ri where ri.performance_id = reports_performance.id)') + transaction.set_dirty() + +class InternalDatabaseVersion(models.Model): + """Object that tell us to witch version is the database.""" + version = models.IntegerField() + updated = models.DateTimeField(auto_now_add=True) + + def __str__(self): + return "version %d updated the %s" % (self.version, self.updated.isoformat()) diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/404.html b/build/lib/Bcfg2/Server/Reports/reports/templates/404.html new file mode 100644 index 000000000..168bd9fec --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/404.html @@ -0,0 +1,8 @@ +{% extends 'base.html' %} +{% block title %}Bcfg2 - Page not found{% endblock %} +{% block fullcontent %} +<h2>Page not found</h2> +<p> +The page or object requested could not be found. +</p> +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html b/build/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html new file mode 100644 index 000000000..842de36f0 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html @@ -0,0 +1,25 @@ +{% extends "base.html" %} + +{% block timepiece %} +<script type="text/javascript"> +function showCalendar() { + var cal = new CalendarPopup("calendar_div"); + cal.showYearNavigation(); + cal.select(document.forms['cal_form'].cal_date,'cal_link', + 'yyyy/MM/dd' {% if timestamp %}, '{{ timestamp|date:"Y/m/d" }}'{% endif %} ); + return false; +} +function bcfg2_check_date() { + var new_date = document.getElementById('cal_date').value; + if(new_date) { + document.cal_form.submit(); + } +} +document.write(getCalendarStyles()); +</script> +{% if not timestamp %}Rendered at {% now "Y-m-d H:i" %} | {% else %}View as of {{ timestamp|date:"Y-m-d H:i" }} | {% endif %}{% spaceless %} + <a id='cal_link' name='cal_link' href='#' onclick='showCalendar(); return false;' + >[change]</a> + <form method='post' action='{{ path }}' id='cal_form' name='cal_form'><input id='cal_date' name='cal_date' type='hidden' value=''/></form> +{% endspaceless %} +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/base.html b/build/lib/Bcfg2/Server/Reports/reports/templates/base.html new file mode 100644 index 000000000..9bd9da218 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/base.html @@ -0,0 +1,95 @@ +{% load bcfg2_tags %} + +<?xml version="1.0"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> +<head> +<title>{% block title %}Bcfg2 Reporting System{% endblock %}</title> + +<meta http-equiv="Content-type" content="text/html; charset=utf-8" /> +<meta http-equiv="Content-language" content="en" /> +<meta http-equiv="X-UA-Compatible" content="IE=EmulateIE7" /> +<meta name="robots" content="noindex, nofollow" /> +<meta http-equiv="cache-control" content="no-cache" /> + +<link rel="stylesheet" type="text/css" href="{% to_media_url bcfg2_base.css %}" media="all" /> +<script type="text/javascript" src="{% to_media_url bcfg2.js %}"></script> +<script type="text/javascript" src="{% to_media_url date.js %}"></script> +<script type="text/javascript" src="{% to_media_url AnchorPosition.js %}"></script> +<script type="text/javascript" src="{% to_media_url CalendarPopup.js %}"></script> +<script type="text/javascript" src="{% to_media_url PopupWindow.js %}"></script> +{% block extra_header_info %}{% endblock %} + +</head> +<body onload="{% block body_onload %}{% endblock %}"> + + <div id="header"> + <a href="http://bcfg2.org"><img src='{% to_media_url bcfg2_logo.png %}' + height='115' width='300' alt='Bcfg2' style='float:left; height: 115px' /></a> + </div> + +<div id="document"> + <div id="content"><div id="contentwrapper"> + {% block fullcontent %} + <div class='page_name'> + <h1>{% block pagebanner %}Page Banner{% endblock %}</h1> + <div id="timepiece">{% block timepiece %}Rendered at {% now "Y-m-d H:i" %}{% endblock %}</div> + </div> + <div class='detail_wrapper'> + {% block content %}{% endblock %} + </div> + {% endblock %} + </div></div><!-- content --> + <div id="sidemenucontainer"><div id="sidemenu"> + {% block sidemenu %} + <ul class='menu-level1'> + <li>Overview</li> + </ul> + <ul class='menu-level2'> + <li><a href="{% url reports_summary %}">Summary</a></li> + <li><a href="{% url reports_history %}">Recent Interactions</a></li> + <li><a href="{% url reports_timing %}">Timing</a></li> + </ul> + <ul class='menu-level1'> + <li>Clients</li> + </ul> + <ul class='menu-level2'> + <li><a href="{% url reports_grid_view %}">Grid View</a></li> + <li><a href="{% url reports_detailed_list %}">Detailed List</a></li> + <li><a href="{% url reports_client_manage %}">Manage</a></li> + </ul> + <ul class='menu-level1'> + <li>Entries Configured</li> + </ul> + <ul class='menu-level2'> + <li><a href="{% url reports_item_list "bad" %}">Bad</a></li> + <li><a href="{% url reports_item_list "modified" %}">Modified</a></li> + <li><a href="{% url reports_item_list "extra" %}">Extra</a></li> + </ul> +{% comment %} + TODO + <ul class='menu-level1'> + <li>Entry Types</li> + </ul> + <ul class='menu-level2'> + <li><a href="#">Action</a></li> + <li><a href="#">Package</a></li> + <li><a href="#">Path</a></li> + <li><a href="#">Service</a></li> + </ul> +{% endcomment %} + <ul class='menu-level1'> + <li><a href="http://bcfg2.org">Homepage</a></li> + <li><a href="http://docs.bcfg2.org">Documentation</a></li> + </ul> + {% endblock %} + </div></div><!-- sidemenu --> + <div style='clear:both'></div> +</div><!-- document --> + <div id="footer"> + <span>Bcfg2 Version 1.1.0</span> + </div> + +<div id="calendar_div" style='position:absolute; visibility:hidden; background-color:white; layer-background-color:white;'></div> +</body> +</html> diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html new file mode 100644 index 000000000..efd5f9e00 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html @@ -0,0 +1,127 @@ +{% extends "base.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Client {{client.name}}{% endblock %} + +{% block extra_header_info %} +<style type="text/css"> +.node_data { + border: 1px solid #98DBCC; + margin: 10px; + padding-left: 18px; +} +.node_data td { + padding: 1px 20px 1px 2px; +} +span.history_links { + font-size: 90%; + margin-left: 50px; +} +span.history_links a { + font-size: 90%; +} +</style> +{% endblock %} + +{% block body_onload %}javascript:clientdetailload(){% endblock %} + +{% block pagebanner %}Client Details{% endblock %} + +{% block content %} + <div class='detail_header'> + <h2>{{client.name}}</h2> + <a href='{% url reports_client_manage %}#{{ client.name }}'>[manage]</a> + <span class='history_links'><a href="{% url reports_client_history client.name %}">View History</a> | Jump to + <select id="quick" name="quick" onchange="javascript:pageJump('quick');"> + <option value="" selected="selected">--- Time ---</option> + {% for i in client.interactions.all|slice:":25" %} + <option value="{% url reports_client_detail_pk hostname=client.name, pk=i.id %}">{{i.timestamp}}</option> + {% endfor %} + </select></span> + </div> + + {% if interaction.isstale %} + <div class="warningbox"> + This node did not run within the last 24 hours — it may be out of date. + </div> + {% endif %} + <table class='node_data'> + <tr><td>Timestamp</td><td>{{interaction.timestamp}}</td></tr> + {% if interaction.server %} + <tr><td>Served by</td><td>{{interaction.server}}</td></tr> + {% endif %} + {% if interaction.repo_rev_code %} + <tr><td>Revision</td><td>{{interaction.repo_rev_code}}</td></tr> + {% endif %} + <tr><td>State</td><td class='{{interaction.state}}-lineitem'>{{interaction.state|capfirst}}</td></tr> + <tr><td>Managed entries</td><td>{{interaction.totalcount}}</td></tr> + {% if not interaction.isclean %} + <tr><td>Deviation</td><td>{{interaction.percentbad|floatformat:"3"}}%</td></tr> + {% endif %} + </table> + + {% if interaction.bad_entry_count %} + <div class='entry_list'> + <div class='entry_list_head dirty-lineitem'> + <div class='entry_expand_tab' onclick='javascript:toggleMe("bad_table");'>[+]</div> + <h3>Bad Entries — {{ interaction.bad_entry_count }}</h3> + </div> + <table id='bad_table' class='entry_list'> + {% for e in interaction.bad|sortwell %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='entry_list_type'>{{e.entry.kind}}:</td> + <td><a href="{% url reports_item "bad",e.id %}"> + {{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endif %} + + {% if interaction.modified_entry_count %} + <div class='entry_list'> + <div class='entry_list_head modified-lineitem'> + <div class='entry_expand_tab' onclick='javascript:toggleMe("modified_table");'>[+]</div> + <h3>Modified Entries — {{ interaction.modified_entry_count }}</h3> + </div> + <table id='modified_table' class='entry_list'> + {% for e in interaction.modified|sortwell %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='entry_list_type'>{{e.entry.kind}}:</td> + <td><a href="{% url reports_item "modified",e.id %}"> + {{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endif %} + + {% if interaction.extra_entry_count %} + <div class='entry_list'> + <div class='entry_list_head extra-lineitem'> + <div class='entry_expand_tab' onclick='javascript:toggleMe("extra_table");'>[+]</div> + <h3>Extra Entries — {{ interaction.extra_entry_count }}</h3> + </div> + <table id='extra_table' class='entry_list'> + {% for e in interaction.extra|sortwell %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='entry_list_type'>{{e.entry.kind}}:</td> + <td><a href="{% url reports_item "extra",e.id %}">{{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endif %} + + {% if entry_list %} + <div class="entry_list recent_history_wrapper"> + <div class="entry_list_head" style="border-bottom: 2px solid #98DBCC;"> + <h4 style="display: inline"><a href="{% url reports_client_history client.name %}">Recent Interactions</a></h4> + </div> + <div class='recent_history_box'> + {% include "widgets/interaction_list.inc" %} + <div style='padding-left: 5px'><a href="{% url reports_client_history client.name %}">more...</a></div> + </div> + </div> + {% endif %} +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html new file mode 100644 index 000000000..0c1fae8d5 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html @@ -0,0 +1,46 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Detailed Client Listing{% endblock %} +{% block pagebanner %}Clients - Detailed View{% endblock %} + +{% block content %} +<div class='client_list_box'> +{% if entry_list %} + {% filter_navigator %} + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td class='left_column'>Node</td> + <td class='right_column' style='width:75px'>State</td> + <td class='right_column_narrow'>Good</td> + <td class='right_column_narrow'>Bad</td> + <td class='right_column_narrow'>Modified</td> + <td class='right_column_narrow'>Extra</td> + <td class='right_column'>Last Run</td> + <td class='right_column_wide'>Server</td> + </tr> + {% for entry in entry_list %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='left_column'><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=entry.client.name, pk=entry.id %}'>{{ entry.client.name }}</a></td> + <td class='right_column' style='width:75px'><a href='{% add_url_filter state=entry.state %}' + {% ifequal entry.state 'dirty' %}class='dirty-lineitem'{% endifequal %}>{{ entry.state }}</a></td> + <td class='right_column_narrow'>{{ entry.goodcount }}</td> + <td class='right_column_narrow'>{{ entry.bad_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.modified_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.extra_entry_count }}</td> + <td class='right_column'><span {% if entry.timestamp|isstale:entry_max %}class='dirty-lineitem'{% endif %}>{{ entry.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</span></td> + <td class='right_column_wide'> + {% if entry.server %} + <a href='{% add_url_filter server=entry.server %}'>{{ entry.server }}</a> + {% else %} + + {% endif %} + </td> + </tr> + {% endfor %} + </table> +{% else %} + <p>No client records are available.</p> +{% endif %} +</div> +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html new file mode 100644 index 000000000..01d4ec2f4 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html @@ -0,0 +1,20 @@ +{% extends "base.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Interaction History{% endblock %} +{% block pagebanner %}Interaction history{% if client %} for {{ client.name }}{% endif %}{% endblock %} + +{% block extra_header_info %} +{% endblock %} + +{% block content %} +<div class='client_list_box'> +{% if entry_list %} + {% filter_navigator %} + {% include "widgets/interaction_list.inc" %} +{% else %} + <p>No client records are available.</p> +{% endif %} +</div> +{% page_navigator %} +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html new file mode 100644 index 000000000..e0c0d2d7a --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html @@ -0,0 +1,34 @@ +{% extends "base-timeview.html" %} + +{% block extra_header_info %} +{% endblock%} + +{% block title %}Bcfg2 - Client Grid View{% endblock %} + +{% block pagebanner %}Clients - Grid View{% endblock %} + +{% block content %} + +{% if inter_list %} + <table class='grid-view' align='center'> + {% for inter in inter_list %} + {% if forloop.first %}<tr>{% endif %} + <td class="{{inter.state}}-lineitem"> + <a href="{% spaceless %}{% if not timestamp %} + {% url reports_client_detail inter.client.name %} + {% else %} + {% url reports_client_detail_pk inter.client.name,inter.id %} + {% endif %} + {% endspaceless %}">{{ inter.client.name }}</a> + </td> + {% if forloop.last %} + </tr> + {% else %} + {% if forloop.counter|divisibleby:"4" %}</tr><tr>{% endif %} + {% endif %} + {% endfor %} + </table> +{% else %} + <p>No client records are available.</p> +{% endif %} +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html new file mode 100644 index 000000000..5725ae577 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html @@ -0,0 +1,45 @@ +{% extends "base.html" %} + +{% block extra_header_info %} +{% endblock%} + +{% block title %}Bcfg2 - Manage Clients{% endblock %} + +{% block pagebanner %}Clients - Manage{% endblock %} + +{% block content %} +<div class='client_list_box'> + {% if message %} + <div class="warningbox">{{ message }}</div> + {% endif %} +{% if clients %} + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td class='left_column'>Node</td> + <td class='right_column'>Expiration</td> + <td class='right_column_narrow'>Manage</td> + </tr> + {% for client in clients %} + <tr class='{% cycle listview,listview_alt %}'> + <td><span id="{{ client.name }}"> </span> + <span id="ttag-{{ client.name }}"> </span> + <span id="s-ttag-{{ client.name }}"> </span> + <a href="{% url reports_client_detail client.name %}">{{ client.name }}</a></td> + <td>{% firstof client.expiration 'Active' %}</td> + <td> + <form method="post" action="{% url reports_client_manage %}"> + <div> {# here for no reason other then to validate #} + <input type="hidden" name="client_name" value="{{ client.name }}" /> + <input type="hidden" name="client_action" value="{% if client.expiration %}unexpire{% else %}expire{% endif %}" /> + <input type="submit" value="{% if client.expiration %}Activate{% else %}Expire Now{% endif %}" /> + </div> + </form> + </td> + </tr> + {% endfor %} + </table> + </div> +{% else %} + <p>No client records are available.</p> +{% endif %} +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html new file mode 100644 index 000000000..58aed1684 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html @@ -0,0 +1,109 @@ +{% extends "base.html" %} +{% load syntax_coloring %} + + +{% block title %}Bcfg2 - Element Details{% endblock %} + + +{% block extra_header_info %} +<style type="text/css"> +#table_list_header { + font-size: 100%; +} +table.entry_list { + width: auto; +} +div.information_wrapper { + margin: 15px; +} +div.diff_wrapper { + overflow: auto; +} +div.entry_list h3 { + font-size: 90%; + padding: 5px; +} +</style> +{% endblock%} + +{% block pagebanner %}Element Details{% endblock %} + +{% block content %} + <div class='detail_header'> + <h3>{{mod_or_bad|capfirst}} {{item.entry.kind}}: {{item.entry.name}}</h3> + </div> + + <div class="information_wrapper"> + + {% if isextra %} + <p>This item exists on the host but is not defined in the configuration.</p> + {% endif %} + + {% if not item.reason.current_exists %} + <div class="warning">This item does not currently exist on the host but is specified to exist in the configuration.</div> + {% endif %} + + {% if item.reason.current_owner or item.reason.current_group or item.reason.current_perms or item.reason.current_status or item.reason.current_status or item.reason.current_to or item.reason.current_version %} + <table class='entry_list'> + <tr id='table_list_header'> + <td style='text-align: right;'>Problem Type</td><td>Expected</td><td style='border-bottom: 1px solid #98DBCC;'>Found</td></tr> + {% if item.reason.current_owner %} + <tr><td style='text-align: right'><b>Owner</b></td><td>{{item.reason.owner}}</td> + <td>{{item.reason.current_owner}}</td></tr> + {% endif %} + {% if item.reason.current_group %} + <tr><td style='text-align: right'><b>Group</b></td><td>{{item.reason.group}}</td> + <td>{{item.reason.current_group}}</td></tr> + {% endif %} + {% if item.reason.current_perms %} + <tr><td style='text-align: right'><b>Permissions</b></td><td>{{item.reason.perms}}</td> + <td>{{item.reason.current_perms}}</td></tr> + {% endif %} + {% if item.reason.current_status %} + <tr><td style='text-align: right'><b>Status</b></td><td>{{item.reason.status}}</td> + <td>{{item.reason.current_status}}</td></tr> + {% endif %} + {% if item.reason.current_to %} + <tr><td style='text-align: right'><b>Symlink Target</b></td><td>{{item.reason.to}}</td> + <td>{{item.reason.current_to}}</td></tr> + {% endif %} + {% if item.reason.current_version %} + <tr><td style='text-align: right'><b>Package Version</b></td><td>{{item.reason.version|cut:"("|cut:")"}}</td> + <td>{{item.reason.current_version|cut:"("|cut:")"}}</td></tr> + {% endif %} + </table> + {% endif %} + + {% if item.reason.current_diff %} + <div class='entry_list'> + <div class='entry_list_head'> + <h3>Incorrect file contents</h3> + </div> + <div class='diff_wrapper'> + {{ item.reason.current_diff|syntaxhilight }} + </div> + </div> + {% endif %} + + + <div class='entry_list'> + <div class='entry_list_head'> + <h3>Occurences on {{ timestamp|date:"Y-m-d" }}</h3> + </div> + {% if associated_list %} + <table class="entry_list" cellpadding="3"> + {% for inter in associated_list %} + <tr><td><a href="{% url reports_client_detail inter.client.name %}" + >{{inter.client.name}}</a></td> + <td><a href="{% url reports_client_detail_pk hostname=inter.client.name,pk=inter.id %}" + >{{inter.timestamp}}</a></td> + </tr> + {% endfor %} + </table> + {% else %} + <p>Missing client list</p> + {% endif %} + </div> + + </div><!-- information_wrapper --> +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html new file mode 100644 index 000000000..572249470 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html @@ -0,0 +1,33 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Element Listing{% endblock %} + +{% block extra_header_info %} +{% endblock%} + +{% block pagebanner %}{{mod_or_bad|capfirst}} Element Listing{% endblock %} + +{% block content %} +{% if item_list_dict %} + {% for kind, entries in item_list_dict.items %} + + <div class='entry_list'> + <div class='entry_list_head element_list_head'> + <div class='entry_expand_tab' onclick='javascript:toggleMe("table_{{ kind }}");'>[+]</div> + <h3>{{ kind }} — {{ entries|length }}</h3> + </div> + + <table id='table_{{ kind }}' class='entry_list'> + {% for e in entries %} + <tr class='{% cycle listview,listview_alt %}'> + <td><a href="{% url reports_item type=mod_or_bad,pk=e.id %}">{{e.entry.name}}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endfor %} +{% else %} + <p>There are currently no inconsistent configuration entries.</p> +{% endif %} +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html new file mode 100644 index 000000000..0124f635d --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html @@ -0,0 +1,42 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Client Summary{% endblock %} +{% block pagebanner %}Clients - Summary{% endblock %} + +{% block body_onload %}javascript:hide_table_array(hide_tables){% endblock %} + +{% block extra_header_info %} +<script type="text/javascript"> +var hide_tables = new Array({{ summary_data|length }}); +{% for summary in summary_data %} +hide_tables[{{ forloop.counter0 }}] = "table_{{ summary.name }}"; +{% endfor %} +</script> +{% endblock%} + +{% block content %} + <div class='detail_header'> + <h2>{{ node_count }} nodes reporting in</h2> + </div> +{% if summary_data %} + {% for summary in summary_data %} + <div class='entry_list'> + <div class='entry_list_head element_list_head'> + <div class='entry_expand_tab' onclick='javascript:toggleMe("table_{{ summary.name }}");'>[+]</div> + <h3>{{ summary.nodes|length }} {{ summary.label }}</h3> + </div> + + <table id='table_{{ summary.name }}' class='entry_list'> + {% for node in summary.nodes|sort_interactions_by_name %} + <tr class='{% cycle listview,listview_alt %}'> + <td><a href="{% url reports_client_detail_pk hostname=node.client.name,pk=node.id %}">{{ node.client.name }}</a></td> + </tr> + {% endfor %} + </table> + </div> + {% endfor %} +{% else %} + <p>No data to report on</p> +{% endif %} +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html new file mode 100644 index 000000000..47accb2cb --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html @@ -0,0 +1,38 @@ +{% extends "base-timeview.html" %} +{% load bcfg2_tags %} + +{% block title %}Bcfg2 - Performance Metrics{% endblock %} +{% block pagebanner %}Performance Metrics{% endblock %} + + +{% block extra_header_info %} +{% endblock%} + +{% block content %} +<div class='client_list_box'> + {% if metrics %} + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td>Name</td> + <td>Parse</td> + <td>Probe</td> + <td>Inventory</td> + <td>Install</td> + <td>Config</td> + <td>Total</td> + </tr> + {% for metric in metrics|dictsort:"name" %} + <tr class='{% cycle listview,listview_alt %}'> + <td><a style='font-size: 100%' + href="{% url reports_client_detail hostname=metric.name %}">{{ metric.name }}</a></td> + {% for mitem in metric|build_metric_list %} + <td>{{ mitem }}</td> + {% endfor %} + </tr> + {% endfor %} + </table> + {% else %} + <p>No metric data available</p> + {% endif %} +</div> +{% endblock %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html new file mode 100644 index 000000000..6b57baf6a --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html @@ -0,0 +1,13 @@ +{% spaceless %} +{% if filters %} +{% for filter, filter_url in filters %} + {% if forloop.first %} + <div class="filter_bar">Active filters (click to remove): + {% endif %} + <a href='{{ filter_url }}'>{{ filter|capfirst }}</a>{% if not forloop.last %}, {% endif %} + {% if forloop.last %} + </div> + {% endif %} +{% endfor %} +{% endif %} +{% endspaceless %} diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc new file mode 100644 index 000000000..8f2dec1dc --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc @@ -0,0 +1,38 @@ +{% load bcfg2_tags %} +<div class='interaction_history_widget'> + <table cellpadding="3"> + <tr id='table_list_header' class='listview'> + <td class='left_column'>Timestamp</td> + {% if not client %} + <td class='right_column_wide'>Client</td> + {% endif %} + <td class='right_column' style='width:75px'>State</td> + <td class='right_column_narrow'>Good</td> + <td class='right_column_narrow'>Bad</td> + <td class='right_column_narrow'>Modified</td> + <td class='right_column_narrow'>Extra</td> + <td class='right_column_wide'>Server</td> + </tr> + {% for entry in entry_list %} + <tr class='{% cycle listview,listview_alt %}'> + <td class='left_column'><a href='{% url reports_client_detail_pk hostname=entry.client.name, pk=entry.id %}'>{{ entry.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</a></td> + {% if not client %} + <td class='right_column_wide'><a href='{% add_url_filter hostname=entry.client.name %}'>{{ entry.client.name }}</a></td> + {% endif %} + <td class='right_column' style='width:75px'><a href='{% add_url_filter state=entry.state %}' + {% ifequal entry.state 'dirty' %}class='dirty-lineitem'{% endifequal %}>{{ entry.state }}</a></td> + <td class='right_column_narrow'>{{ entry.goodcount }}</td> + <td class='right_column_narrow'>{{ entry.bad_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.modified_entry_count }}</td> + <td class='right_column_narrow'>{{ entry.extra_entry_count }}</td> + <td class='right_column_wide'> + {% if entry.server %} + <a href='{% add_url_filter server=entry.server %}'>{{ entry.server }}</a> + {% else %} + + {% endif %} + </td> + </tr> + {% endfor %} + </table> +</div> diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html new file mode 100644 index 000000000..aa0def83e --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html @@ -0,0 +1,23 @@ +{% spaceless %} +{% for page, page_url in pager %} + {% if forloop.first %} + <div class="page_bar"> + {% if prev_page %}<a href="{{ prev_page }}">< Prev</a><span> </span>{% endif %} + {% if first_page %}<a href="{{ first_page }}">1</a><span> ... </span>{% endif %} + {% endif %} + {% ifequal page current_page %} + <span class='nav_bar_current'>{{ page }}</span> + {% else %} + <a href="{{ page_url }}">{{ page }}</a> + {% endifequal %} + {% if forloop.last %} + {% if last_page %}<span> ... </span><a href="{{ last_page }}">{{ total_pages }}</a><span> </span>{% endif %} + {% if next_page %}<a href="{{ next_page }}">Next ></a><span> </span>{% endif %} + |{% for limit, limit_url in page_limits %} <a href="{{ limit_url }}">{{ limit }}</a>{% endfor %} + </div> + {% else %} + <span> </span> + {% endif %} +{% endfor %} +{% endspaceless %} +<!-- {{ path }} --> diff --git a/build/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py b/build/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py diff --git a/build/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py b/build/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py new file mode 100644 index 000000000..7fffe289d --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py @@ -0,0 +1,274 @@ +from django import template +from django.core.urlresolvers import resolve, reverse, Resolver404, NoReverseMatch +from django.utils.encoding import smart_unicode, smart_str +from datetime import datetime, timedelta +from Bcfg2.Server.Reports.utils import filter_list + +register = template.Library() + +__PAGE_NAV_LIMITS__ = (10, 25, 50, 100) + +@register.inclusion_tag('widgets/page_bar.html', takes_context=True) +def page_navigator(context): + """ + Creates paginated links. + + Expects the context to be a RequestContext and views.prepare_paginated_list() + to have populated page information. + """ + fragment = dict() + try: + path = context['request'].META['PATH_INFO'] + total_pages = int(context['total_pages']) + records_per_page = int(context['records_per_page']) + except KeyError, e: + return fragment + except ValueError, e: + return fragment + + if total_pages < 2: + return {} + + try: + view, args, kwargs = resolve(path) + current_page = int(kwargs.get('page_number',1)) + fragment['current_page'] = current_page + fragment['page_number'] = current_page + fragment['total_pages'] = total_pages + fragment['records_per_page'] = records_per_page + if current_page > 1: + kwargs['page_number'] = current_page - 1 + fragment['prev_page'] = reverse(view, args=args, kwargs=kwargs) + if current_page < total_pages: + kwargs['page_number'] = current_page + 1 + fragment['next_page'] = reverse(view, args=args, kwargs=kwargs) + + view_range = 5 + if total_pages > view_range: + pager_start = current_page - 2 + pager_end = current_page + 2 + if pager_start < 1: + pager_end += (1 - pager_start) + pager_start = 1 + if pager_end > total_pages: + pager_start -= (pager_end - total_pages) + pager_end = total_pages + else: + pager_start = 1 + pager_end = total_pages + + if pager_start > 1: + kwargs['page_number'] = 1 + fragment['first_page'] = reverse(view, args=args, kwargs=kwargs) + if pager_end < total_pages: + kwargs['page_number'] = total_pages + fragment['last_page'] = reverse(view, args=args, kwargs=kwargs) + + pager = [] + for page in range(pager_start, int(pager_end) + 1): + kwargs['page_number'] = page + pager.append( (page, reverse(view, args=args, kwargs=kwargs)) ) + + kwargs['page_number'] = 1 + page_limits = [] + for limit in __PAGE_NAV_LIMITS__: + kwargs['page_limit'] = limit + page_limits.append( (limit, reverse(view, args=args, kwargs=kwargs)) ) + # resolver doesn't like this + del kwargs['page_number'] + del kwargs['page_limit'] + page_limits.append( ('all', reverse(view, args=args, kwargs=kwargs) + "|all") ) + + fragment['pager'] = pager + fragment['page_limits'] = page_limits + + except Resolver404: + path = "404" + except NoReverseMatch, nr: + path = "NoReverseMatch: %s" % nr + except ValueError: + path = "ValueError" + #FIXME - Handle these + + fragment['path'] = path + return fragment + +@register.inclusion_tag('widgets/filter_bar.html', takes_context=True) +def filter_navigator(context): + try: + path = context['request'].META['PATH_INFO'] + view, args, kwargs = resolve(path) + + # Strip any page limits and numbers + if 'page_number' in kwargs: + del kwargs['page_number'] + if 'page_limit' in kwargs: + del kwargs['page_limit'] + + filters = [] + for filter in filter_list: + if filter in kwargs: + myargs = kwargs.copy() + del myargs[filter] + filters.append( (filter, reverse(view, args=args, kwargs=myargs) ) ) + filters.sort(lambda x,y: cmp(x[0], y[0])) + return { 'filters': filters } + except (Resolver404, NoReverseMatch, ValueError, KeyError): + pass + return dict() + +def _subtract_or_na(mdict, x, y): + """ + Shortcut for build_metric_list + """ + try: + return round(mdict[x] - mdict[y], 4) + except: + return "n/a" + +@register.filter +def build_metric_list(mdict): + """ + Create a list of metric table entries + + Moving this here it simplify the view. Should really handle the case where these + are missing... + """ + td_list = [] + # parse + td_list.append( _subtract_or_na(mdict, 'config_parse', 'config_download')) + #probe + td_list.append( _subtract_or_na(mdict, 'probe_upload', 'start')) + #inventory + td_list.append( _subtract_or_na(mdict, 'inventory', 'initialization')) + #install + td_list.append( _subtract_or_na(mdict, 'install', 'inventory')) + #cfg download & parse + td_list.append( _subtract_or_na(mdict, 'config_parse', 'probe_upload')) + #total + td_list.append( _subtract_or_na(mdict, 'finished', 'start')) + return td_list + +@register.filter +def isstale(timestamp, entry_max=None): + """ + Check for a stale timestamp + + Compares two timestamps and returns True if the + difference is greater then 24 hours. + """ + if not entry_max: + entry_max = datetime.now() + return entry_max - timestamp > timedelta(hours=24) + +@register.filter +def sort_interactions_by_name(value): + """ + Sort an interaction list by client name + """ + inters = list(value) + inters.sort(lambda a,b: cmp(a.client.name, b.client.name)) + return inters + +class AddUrlFilter(template.Node): + def __init__(self, filter_name, filter_value): + self.filter_name = filter_name + self.filter_value = filter_value + self.fallback_view = 'Bcfg2.Server.Reports.reports.views.render_history_view' + + def render(self, context): + link = '#' + try: + path = context['request'].META['PATH_INFO'] + view, args, kwargs = resolve(path) + filter_value = self.filter_value.resolve(context, True) + if filter_value: + filter_name = smart_str(self.filter_name) + filter_value = smart_unicode(filter_value) + kwargs[filter_name] = filter_value + # These two don't make sense + if filter_name == 'server' and 'hostname' in kwargs: + del kwargs['hostname'] + elif filter_name == 'hostname' and 'server' in kwargs: + del kwargs['server'] + try: + link = reverse(view, args=args, kwargs=kwargs) + except NoReverseMatch, rm: + link = reverse(self.fallback_view, args=None, + kwargs={ filter_name: filter_value }) + except NoReverseMatch, rm: + raise rm + except (Resolver404, ValueError), e: + pass + return link + +@register.tag +def add_url_filter(parser, token): + """ + Return a url with the filter added to the current view. + + Takes a new filter and resolves the current view with the new filter + applied. Resolves to Bcfg2.Server.Reports.reports.views.client_history + by default. + + {% add_url_filter server=interaction.server %} + """ + try: + tag_name, filter_pair = token.split_contents() + filter_name, filter_value = filter_pair.split('=', 1) + filter_name = filter_name.strip() + filter_value = parser.compile_filter(filter_value) + except ValueError: + raise template.TemplateSyntaxError, "%r tag requires exactly one argument" % token.contents.split()[0] + if not filter_name or not filter_value: + raise template.TemplateSyntaxError, "argument should be a filter=value pair" + + return AddUrlFilter(filter_name, filter_value) + +@register.filter +def sortwell(value): + """ + Sorts a list(or evaluates queryset to list) of bad, extra, or modified items in the best + way for presentation + """ + + configItems = list(value) + configItems.sort(lambda x,y: cmp(x.entry.name, y.entry.name)) + configItems.sort(lambda x,y: cmp(x.entry.kind, y.entry.kind)) + return configItems + +class MediaTag(template.Node): + def __init__(self, filter_value): + self.filter_value = filter_value + + def render(self, context): + base = context['MEDIA_URL'] + try: + request = context['request'] + try: + base = request.environ['bcfg2.media_url'] + except: + if request.path != request.META['PATH_INFO']: + offset = request.path.find(request.META['PATH_INFO']) + if offset > 0: + base = "%s/%s" % (request.path[:offset], \ + context['MEDIA_URL'].strip('/')) + except: + pass + return "%s/%s" % (base, self.filter_value) + +@register.tag +def to_media_url(parser, token): + """ + Return a url relative to the media_url. + + {% to_media_url /bcfg2.css %} + """ + try: + tag_name, filter_value = token.split_contents() + filter_value = parser.compile_filter(filter_value) + except ValueError: + raise template.TemplateSyntaxError, "%r tag requires exactly one argument" % token.contents.split()[0] + + return MediaTag(filter_value) + diff --git a/build/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py b/build/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py new file mode 100644 index 000000000..43dafb262 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py @@ -0,0 +1,41 @@ +from django import template +from django.utils.encoding import smart_unicode, smart_str +from django.utils.html import conditional_escape +from django.utils.safestring import mark_safe + +register = template.Library() + +try: + from pygments import highlight + from pygments.lexers import get_lexer_by_name + from pygments.formatters import HtmlFormatter + colorize = True + +except: + colorize = False + +@register.filter +def syntaxhilight(value, arg="diff", autoescape=None): + """ + Returns a syntax-hilighted version of Code; requires code/language arguments + """ + + if autoescape: + value = conditional_escape(value) + arg = conditional_escape(arg) + + if colorize: + try: + output = u'<style type="text/css">' \ + + smart_unicode(HtmlFormatter().get_style_defs('.highlight')) \ + + u'</style>' + + lexer = get_lexer_by_name(arg) + output += highlight(value, lexer, HtmlFormatter()) + return mark_safe(output) + except: + return value + else: + return mark_safe(u'<div class="note-box">Tip: Install pygments for highlighting</div><pre>%s</pre>' % value) +syntaxhilight.needs_autoescape = True + diff --git a/build/lib/Bcfg2/Server/Reports/reports/urls.py b/build/lib/Bcfg2/Server/Reports/reports/urls.py new file mode 100644 index 000000000..9970d26a1 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/urls.py @@ -0,0 +1,55 @@ +from django.conf.urls.defaults import * +from django.core.urlresolvers import reverse, NoReverseMatch +from django.http import HttpResponsePermanentRedirect +from Bcfg2.Server.Reports.utils import filteredUrls, paginatedUrls, timeviewUrls + +def newRoot(request): + try: + grid_view = reverse('reports_grid_view') + except NoReverseMatch: + grid_view = '/grid' + return HttpResponsePermanentRedirect(grid_view) + +urlpatterns = patterns('Bcfg2.Server.Reports.reports', + (r'^$', newRoot), + + url(r'^manage/?$', 'views.client_manage', name='reports_client_manage'), + url(r'^client/(?P<hostname>\S+)/(?P<pk>\d+)/?$', 'views.client_detail', name='reports_client_detail_pk'), + url(r'^client/(?P<hostname>\S+)/?$', 'views.client_detail', name='reports_client_detail'), + url(r'^elements/(?P<type>\w+)/(?P<pk>\d+)/?$', 'views.config_item', name='reports_item'), +) + +urlpatterns += patterns('Bcfg2.Server.Reports.reports', + *timeviewUrls( + (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'), + (r'^summary/?$', 'views.display_summary', None, 'reports_summary'), + (r'^timing/?$', 'views.display_timing', None, 'reports_timing'), + (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'), +)) + +urlpatterns += patterns('Bcfg2.Server.Reports.reports', + *filteredUrls(*timeviewUrls( + (r'^detailed/?$', + 'views.client_detailed_list', None, 'reports_detailed_list') +))) + +urlpatterns += patterns('Bcfg2.Server.Reports.reports', + *paginatedUrls( *filteredUrls( + (r'^history/?$', + 'views.render_history_view', None, 'reports_history'), + (r'^history/(?P<hostname>[\w\-\.]+)/?$', + 'views.render_history_view', None, 'reports_client_history'), +))) + + # Uncomment this for admin: + #(r'^admin/', include('django.contrib.admin.urls')), + + +## Uncomment this section if using authentication +#urlpatterns += patterns('', +# (r'^login/$', 'django.contrib.auth.views.login', +# {'template_name': 'auth/login.html'}), +# (r'^logout/$', 'django.contrib.auth.views.logout', +# {'template_name': 'auth/logout.html'}) +# ) + diff --git a/build/lib/Bcfg2/Server/Reports/reports/views.py b/build/lib/Bcfg2/Server/Reports/reports/views.py new file mode 100644 index 000000000..00d35c092 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/reports/views.py @@ -0,0 +1,379 @@ +""" +Report views + +Functions to handle all of the reporting views. +""" +from django.template import Context, RequestContext, loader +from django.http import HttpResponse, HttpResponseRedirect, HttpResponseServerError, Http404 +from django.shortcuts import render_to_response, get_object_or_404 +from django.core.urlresolvers import resolve, reverse, Resolver404, NoReverseMatch +from django.db import connection +from django.db.backends import util + +from Bcfg2.Server.Reports.reports.models import * +from datetime import datetime, timedelta +from time import strptime +import sys + +class PaginationError(Exception): + """This error is raised when pagination cannot be completed.""" + pass + +def server_error(request): + """ + 500 error handler. + + For now always return the debug response. Mailing isn't appropriate here. + + """ + from django.views import debug + return debug.technical_500_response(request, *sys.exc_info()) + +def timeview(fn): + """ + Setup a timeview view + + Handles backend posts from the calendar and converts date pieces + into a 'timestamp' parameter + + """ + def _handle_timeview(request, **kwargs): + """Send any posts back.""" + if request.method == 'POST': + cal_date = request.POST['cal_date'] + try: + fmt = "%Y/%m/%d" + if cal_date.find(' ') > -1: + fmt += " %H:%M" + timestamp = datetime(*strptime(cal_date, fmt)[0:6]) + view, args, kw = resolve(request.META['PATH_INFO']) + kw['year'] = "%0.4d" % timestamp.year + kw['month'] = "%02.d" % timestamp.month + kw['day'] = "%02.d" % timestamp.day + if cal_date.find(' ') > -1: + kw['hour'] = timestamp.hour + kw['minute'] = timestamp.minute + return HttpResponseRedirect(reverse(view, args=args, kwargs=kw)) + except KeyError: + pass + except: + pass + # FIXME - Handle this + + """Extract timestamp from args.""" + timestamp = None + try: + timestamp = datetime(int(kwargs.pop('year')), int(kwargs.pop('month')), + int(kwargs.pop('day')), int(kwargs.pop('hour', 0)), + int(kwargs.pop('minute', 0)), 0) + kwargs['timestamp'] = timestamp + except KeyError: + pass + except: + raise + return fn(request, **kwargs) + + return _handle_timeview + +def config_item(request, pk, type="bad"): + """ + Display a single entry. + + Dispalys information about a single entry. + + """ + item = get_object_or_404(Entries_interactions, id=pk) + timestamp=item.interaction.timestamp + time_start=item.interaction.timestamp.replace(\ + hour=0, minute=0, second=0, microsecond=0) + time_end=time_start + timedelta(days=1) + + todays_data = Interaction.objects.filter(\ + timestamp__gte=time_start,\ + timestamp__lt=time_end) + shared_entries = Entries_interactions.objects.filter(entry=item.entry,\ + reason=item.reason, type=item.type, + interaction__in=[x['id']\ + for x in todays_data.values('id')]) + + associated_list = Interaction.objects.filter(id__in=[x['interaction']\ + for x in shared_entries.values('interaction')])\ + .order_by('client__name','timestamp').select_related().all() + + return render_to_response('config_items/item.html', + {'item':item, + 'isextra': item.type == TYPE_EXTRA, + 'mod_or_bad': type, + 'associated_list':associated_list, + 'timestamp' : timestamp}, + context_instance=RequestContext(request)) + +@timeview +def config_item_list(request, type, timestamp=None): + """Render a listing of affected elements""" + mod_or_bad = type.lower() + type = convert_entry_type_to_id(type) + if type < 0: + raise Http404 + + current_clients = Interaction.objects.get_interaction_per_client_ids(timestamp) + item_list_dict = {} + seen = dict() + for x in Entries_interactions.objects.filter(interaction__in=current_clients, type=type).select_related(): + if (x.entry, x.reason) in seen: + continue + seen[(x.entry, x.reason)] = 1 + if item_list_dict.get(x.entry.kind, None): + item_list_dict[x.entry.kind].append(x) + else: + item_list_dict[x.entry.kind] = [x] + + for kind in item_list_dict: + item_list_dict[kind].sort(lambda a,b: cmp(a.entry.name, b.entry.name)) + + return render_to_response('config_items/listing.html', {'item_list_dict':item_list_dict, + 'mod_or_bad':mod_or_bad, + 'timestamp' : timestamp}, + context_instance=RequestContext(request)) + +@timeview +def client_index(request, timestamp=None): + """ + Render a grid view of active clients. + + Keyword parameters: + timestamp -- datetime objectto render from + + """ + list = Interaction.objects.interaction_per_client(timestamp).select_related()\ + .order_by("client__name").all() + + return render_to_response('clients/index.html', + { 'inter_list': list, 'timestamp' : timestamp}, + context_instance=RequestContext(request)) + +@timeview +def client_detailed_list(request, timestamp=None, **kwargs): + """ + Provides a more detailed list view of the clients. Allows for extra + filters to be passed in. + + """ + + kwargs['interaction_base'] = Interaction.objects.interaction_per_client(timestamp).select_related() + kwargs['orderby'] = "client__name" + kwargs['page_limit'] = 0 + return render_history_view(request, 'clients/detailed-list.html', **kwargs) + +def client_detail(request, hostname = None, pk = None): + context = dict() + client = get_object_or_404(Client, name=hostname) + if(pk == None): + context['interaction'] = client.current_interaction + return render_history_view(request, 'clients/detail.html', page_limit=5, + client=client, context=context) + else: + context['interaction'] = client.interactions.get(pk=pk) + return render_history_view(request, 'clients/detail.html', page_limit=5, + client=client, maxdate=context['interaction'].timestamp, context=context) + +def client_manage(request): + """Manage client expiration""" + message = '' + if request.method == 'POST': + try: + client_name = request.POST.get('client_name', None) + client_action = request.POST.get('client_action', None) + client = Client.objects.get(name=client_name) + if client_action == 'expire': + client.expiration = datetime.now(); + client.save() + message = "Expiration for %s set to %s." % \ + (client_name, client.expiration.strftime("%Y-%m-%d %H:%M:%S")) + elif client_action == 'unexpire': + client.expiration = None; + client.save() + message = "%s is now active." % client_name + else: + message = "Missing action" + except Client.DoesNotExist: + if not client_name: + client_name = "<none>" + message = "Couldn't find client \"%s\"" % client_name + + return render_to_response('clients/manage.html', + {'clients': Client.objects.order_by('name').all(), 'message': message}, + context_instance=RequestContext(request)) + +@timeview +def display_summary(request, timestamp=None): + """ + Display a summary of the bcfg2 world + """ + query = Interaction.objects.interaction_per_client(timestamp).select_related() + node_count = query.count() + recent_data = query.all() + if not timestamp: + timestamp = datetime.now() + + collected_data = dict(clean=[],bad=[],modified=[],extra=[],stale=[],pings=[]) + for node in recent_data: + if timestamp - node.timestamp > timedelta(hours=24): + collected_data['stale'].append(node) + # If stale check for uptime + try: + if node.client.pings.latest().status == 'N': + collected_data['pings'].append(node) + except Ping.DoesNotExist: + collected_data['pings'].append(node) + continue + if node.bad_entry_count() > 0: + collected_data['bad'].append(node) + else: + collected_data['clean'].append(node) + if node.modified_entry_count() > 0: + collected_data['modified'].append(node) + if node.extra_entry_count() > 0: + collected_data['extra'].append(node) + + # label, header_text, node_list + summary_data = [] + get_dict = lambda name, label: { 'name': name, + 'nodes': collected_data[name], + 'label': label } + if len(collected_data['clean']) > 0: + summary_data.append( get_dict('clean', 'nodes are clean.') ) + if len(collected_data['bad']) > 0: + summary_data.append( get_dict('bad', 'nodes are bad.') ) + if len(collected_data['modified']) > 0: + summary_data.append( get_dict('modified', 'nodes were modified.') ) + if len(collected_data['extra']) > 0: + summary_data.append( get_dict('extra', + 'nodes have extra configurations.') ) + if len(collected_data['stale']) > 0: + summary_data.append( get_dict('stale', + 'nodes did not run within the last 24 hours.') ) + if len(collected_data['pings']) > 0: + summary_data.append( get_dict('pings', + 'are down.') ) + + return render_to_response('displays/summary.html', + {'summary_data': summary_data, 'node_count': node_count, + 'timestamp': timestamp}, + context_instance=RequestContext(request)) + +@timeview +def display_timing(request, timestamp=None): + mdict = dict() + inters = Interaction.objects.interaction_per_client(timestamp).select_related().all() + [mdict.__setitem__(inter, {'name': inter.client.name}) \ + for inter in inters] + for metric in Performance.objects.filter(interaction__in=mdict.keys()).all(): + for i in metric.interaction.all(): + mdict[i][metric.metric] = metric.value + return render_to_response('displays/timing.html', + {'metrics': mdict.values(), 'timestamp': timestamp}, + context_instance=RequestContext(request)) + + +def render_history_view(request, template='clients/history.html', **kwargs): + """ + Provides a detailed history of a clients interactions. + + Renders a detailed history of a clients interactions. Allows for various + filters and settings. Automatically sets pagination data into the context. + + Keyword arguments: + interaction_base -- Interaction QuerySet to build on + (default Interaction.objects) + context -- Additional context data to render with + page_number -- Page to display (default 1) + page_limit -- Number of results per page, if 0 show all (default 25) + client -- Client object to render + hostname -- Client hostname to lookup and render. Returns a 404 if + not found + server -- Filter interactions by server + state -- Filter interactions by state + entry_max -- Most recent interaction to display + orderby -- Sort results using this field + + """ + + context = kwargs.get('context', dict()) + max_results = int(kwargs.get('page_limit', 25)) + page = int(kwargs.get('page_number', 1)) + + client=kwargs.get('client', None) + if not client and 'hostname' in kwargs: + client = get_object_or_404(Client, name=kwargs['hostname']) + if client: + context['client'] = client + + entry_max = kwargs.get('maxdate', None) + context['entry_max'] = entry_max + + # Either filter by client or limit by clients + iquery = kwargs.get('interaction_base', Interaction.objects) + if client: + iquery = iquery.filter(client__exact=client).select_related() + + if 'orderby' in kwargs and kwargs['orderby']: + iquery = iquery.order_by(kwargs['orderby']) + + if 'state' in kwargs and kwargs['state']: + iquery = iquery.filter(state__exact=kwargs['state']) + if 'server' in kwargs and kwargs['server']: + iquery = iquery.filter(server__exact=kwargs['server']) + + if entry_max: + iquery = iquery.filter(timestamp__lte=entry_max) + + if max_results < 0: + max_results = 1 + entry_list = [] + if max_results > 0: + try: + rec_start, rec_end = prepare_paginated_list(request, context, iquery, page, max_results) + except PaginationError, page_error: + if isinstance(page_error[0], HttpResponse): + return page_error[0] + return HttpResponseServerError(page_error) + context['entry_list'] = iquery.all()[rec_start:rec_end] + else: + context['entry_list'] = iquery.all() + + return render_to_response(template, context, + context_instance=RequestContext(request)) + +def prepare_paginated_list(request, context, paged_list, page=1, max_results=25): + """ + Prepare context and slice an object for pagination. + """ + if max_results < 1: + raise PaginationError, "Max results less then 1" + if paged_list == None: + raise PaginationError, "Invalid object" + + try: + nitems = paged_list.count() + except TypeError: + nitems = len(paged_list) + + rec_start = (page - 1) * int(max_results) + try: + total_pages = (nitems / int(max_results)) + 1 + except: + total_pages = 1 + if page > total_pages: + # If we passed beyond the end send back + try: + view, args, kwargs = resolve(request.META['PATH_INFO']) + kwargs['page_number'] = total_pages + raise PaginationError, HttpResponseRedirect( reverse(view, kwargs=kwargs) ) + except (Resolver404, NoReverseMatch, ValueError): + raise "Accessing beyond last page. Unable to resolve redirect." + + context['total_pages'] = total_pages + context['records_per_page'] = max_results + return (rec_start, rec_start + int(max_results)) + diff --git a/build/lib/Bcfg2/Server/Reports/settings.py b/build/lib/Bcfg2/Server/Reports/settings.py new file mode 100644 index 000000000..9efe38552 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/settings.py @@ -0,0 +1,144 @@ +import django + +# Django settings for bcfg2 reports project. +from ConfigParser import ConfigParser, NoSectionError, NoOptionError +c = ConfigParser() +c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf']) + +try: + dset = c.get('statistics', 'web_debug') +except: + dset = 'false' + +if dset == "True": + DEBUG = True +else: + DEBUG = False + +TEMPLATE_DEBUG = DEBUG + +ADMINS = ( + ('Bcfg2', 'admin@email.address'), +) + +MANAGERS = ADMINS + +DATABASE_ENGINE = c.get('statistics', 'database_engine') +# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'. +if c.has_option('statistics', 'database_name'): + DATABASE_NAME = c.get('statistics', 'database_name') +else: + DATABASE_NAME = '' +# Or path to database file if using sqlite3. +#<repository>/etc/brpt.sqlite is default path + +if DATABASE_ENGINE != 'sqlite3': + DATABASE_USER = c.get('statistics', 'database_user') + # Not used with sqlite3. + DATABASE_PASSWORD = c.get('statistics', 'database_password') + # Not used with sqlite3. + DATABASE_HOST = c.get('statistics', 'database_host') + # Set to empty string for localhost. Not used with sqlite3. + DATABASE_PORT = c.get('statistics', 'database_port') + # Set to empty string for default. Not used with sqlite3. +if DATABASE_ENGINE == 'sqlite3' and DATABASE_NAME == '': + DATABASE_NAME = "%s/etc/brpt.sqlite" % c.get('server', 'repository') + +# Local time zone for this installation. All choices can be found here: +# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone +try: + TIME_ZONE = c.get('statistics', 'time_zone') +except: + if django.VERSION[0] == 1 and django.VERSION[1] > 2: + TIME_ZONE = None + +# Language code for this installation. All choices can be found here: +# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes +# http://blogs.law.harvard.edu/tech/stories/storyReader$15 +LANGUAGE_CODE = 'en-us' + +SITE_ID = 1 + +# Absolute path to the directory that holds media. +# Example: "/home/media/media.lawrence.com/" +MEDIA_ROOT = '' + +# URL that handles the media served from MEDIA_ROOT. +# Example: "http://media.lawrence.com" +MEDIA_URL = '/site_media' +if c.has_option('statistics', 'web_prefix'): + MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL + +# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a +# trailing slash. +# Examples: "http://foo.com/media/", "/media/". +ADMIN_MEDIA_PREFIX = '/media/' + +# Make this unique, and don't share it with anybody. +SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7' + +# List of callables that know how to import templates from various sources. +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', + 'django.template.loaders.eggs.load_template_source', +) + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.middleware.doc.XViewMiddleware', +) + +ROOT_URLCONF = 'Bcfg2.Server.Reports.urls' + +# Authentication Settings +# Use NIS authentication backend defined in backends.py +AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', + 'Bcfg2.Server.Reports.backends.NISBackend') +# The NIS group authorized to login to BCFG2's reportinvg system +AUTHORIZED_GROUP = '' +#create login url area: +try: + import django.contrib.auth +except ImportError: + print('Import of Django module failed. Is Django installed?') +django.contrib.auth.LOGIN_URL = '/login' + +SESSION_EXPIRE_AT_BROWSER_CLOSE = True + + + +TEMPLATE_DIRS = ( + # Put strings here, like "/home/html/django_templates". + # Always use forward slashes, even on Windows. + '/usr/share/python-support/python-django/django/contrib/admin/templates/', + 'Bcfg2.Server.Reports.reports' +) + +if django.VERSION[0] == 1 and django.VERSION[1] < 2: + TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.core.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request' + ) +else: + TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.contrib.auth.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request' + ) + +INSTALLED_APPS = ( + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.admin', + 'Bcfg2.Server.Reports.reports' +) diff --git a/build/lib/Bcfg2/Server/Reports/updatefix.py b/build/lib/Bcfg2/Server/Reports/updatefix.py new file mode 100644 index 000000000..f8fca1f90 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/updatefix.py @@ -0,0 +1,184 @@ +import Bcfg2.Server.Reports.settings + +from django.db import connection +import django.core.management +from Bcfg2.Server.Reports.reports.models import InternalDatabaseVersion, \ + TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA + +import logging, traceback +logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix') + +# all update function should go here +def _merge_database_table_entries(): + cursor = connection.cursor() + insert_cursor = connection.cursor() + find_cursor = connection.cursor() + cursor.execute(""" + Select name, kind from reports_bad + union + select name, kind from reports_modified + union + select name, kind from reports_extra + """) + # this fetch could be better done + entries_map={} + for row in cursor.fetchall(): + insert_cursor.execute("insert into reports_entries (name, kind) \ + values (%s, %s)", (row[0], row[1])) + entries_map[(row[0], row[1])] = insert_cursor.lastrowid + + cursor.execute(""" + Select name, kind, reason_id, interaction_id, 1 from reports_bad + inner join reports_bad_interactions on reports_bad.id=reports_bad_interactions.bad_id + union + Select name, kind, reason_id, interaction_id, 2 from reports_modified + inner join reports_modified_interactions on reports_modified.id=reports_modified_interactions.modified_id + union + Select name, kind, reason_id, interaction_id, 3 from reports_extra + inner join reports_extra_interactions on reports_extra.id=reports_extra_interactions.extra_id + """) + for row in cursor.fetchall(): + key = (row[0], row[1]) + if entries_map.get(key, None): + entry_id = entries_map[key] + else: + find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key) + rowe = find_cursor.fetchone() + entry_id = rowe[0] + insert_cursor.execute("insert into reports_entries_interactions \ + (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4])) + +def _interactions_constraint_or_idx(): + '''sqlite doesn't support alter tables.. or constraints''' + cursor = connection.cursor() + try: + cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)') + except: + cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)') + + +def _populate_interaction_entry_counts(): + '''Populate up the type totals for the interaction table''' + cursor = connection.cursor() + count_field = { TYPE_BAD: 'bad_entries', + TYPE_MODIFIED: 'modified_entries', + TYPE_EXTRA: 'extra_entries' } + + for type in count_field.keys(): + cursor.execute("select count(type), interaction_id "+ + "from reports_entries_interactions where type = %s group by interaction_id" % type) + updates = [] + for row in cursor.fetchall(): + updates.append(row) + try: + cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates) + except Exception, e: + print e + cursor.close() + + +# be sure to test your upgrade query before reflecting the change in the models +# the list of function and sql command to do should go here +_fixes = [_merge_database_table_entries, + # this will remove unused tables + "drop table reports_bad;", + "drop table reports_bad_interactions;", + "drop table reports_extra;", + "drop table reports_extra_interactions;", + "drop table reports_modified;", + "drop table reports_modified_interactions;", + "drop table reports_repository;", + "drop table reports_metadata;", + "alter table reports_interaction add server varchar(256) not null default 'N/A';", + # fix revision data type to support $VCS hashes + "alter table reports_interaction add repo_rev_code varchar(64) default '';", + # Performance enhancements for large sites + 'alter table reports_interaction add column bad_entries integer not null default -1;', + 'alter table reports_interaction add column modified_entries integer not null default -1;', + 'alter table reports_interaction add column extra_entries integer not null default -1;', + _populate_interaction_entry_counts, + _interactions_constraint_or_idx, + 'alter table reports_reason add is_binary bool NOT NULL default False;', +] + +# this will calculate the last possible version of the database +lastversion = len(_fixes) + +def rollupdate(current_version): + """ function responsible to coordinates all the updates + need current_version as integer + """ + ret = None + if current_version < lastversion: + for i in range(current_version, lastversion): + try: + if type(_fixes[i]) == str: + connection.cursor().execute(_fixes[i]) + else: + _fixes[i]() + except: + logger.error("Failed to perform db update %s" % (_fixes[i]), exc_info=1) + # since array start at 0 but version start at 1 we add 1 to the normal count + ret = InternalDatabaseVersion.objects.create(version=i+1) + return ret + else: + return None + +def dosync(): + """Function to do the syncronisation for the models""" + # try to detect if it's a fresh new database + try: + cursor = connection.cursor() + # If this table goes missing then don't forget to change it to the new one + cursor.execute("Select * from reports_client") + # if we get here with no error then the database has existing tables + fresh = False + except: + logger.debug("there was an error while detecting the freshness of the database") + #we should get here if the database is new + fresh = True + + # ensure database connection are close, so that the management can do it's job right + try: + cursor.close() + connection.close() + except: + # ignore any errors from missing/invalid dbs + pass + # Do the syncdb according to the django version + if "call_command" in dir(django.core.management): + # this is available since django 1.0 alpha. + # not yet tested for full functionnality + django.core.management.call_command("syncdb", interactive=False, verbosity=0) + if fresh: + django.core.management.call_command("loaddata", 'initial_version.xml', verbosity=0) + elif "syncdb" in dir(django.core.management): + # this exist only for django 0.96.* + django.core.management.syncdb(interactive=False, verbosity=0) + if fresh: + logger.debug("loading the initial_version fixtures") + django.core.management.load_data(fixture_labels=['initial_version'], verbosity=0) + else: + logger.warning("Don't forget to run syncdb") + + +def update_database(): + ''' methode to search where we are in the revision of the database models and update them ''' + try : + logger.debug("Running upgrade of models to the new one") + dosync() + know_version = InternalDatabaseVersion.objects.order_by('-version') + if not know_version: + logger.debug("No version, creating initial version") + know_version = InternalDatabaseVersion.objects.create(version=0) + else: + know_version = know_version[0] + logger.debug("Presently at %s" % know_version) + if know_version.version < lastversion: + new_version = rollupdate(know_version.version) + if new_version: + logger.debug("upgraded to %s" % new_version) + except: + logger.error("Error while updating the database") + for x in traceback.format_exc().splitlines(): + logger.error(x) diff --git a/build/lib/Bcfg2/Server/Reports/urls.py b/build/lib/Bcfg2/Server/Reports/urls.py new file mode 100644 index 000000000..d7ff1eee5 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/urls.py @@ -0,0 +1,14 @@ +from django.conf.urls.defaults import * +from django.http import HttpResponsePermanentRedirect + +handler500 = 'Bcfg2.Server.Reports.reports.views.server_error' + +urlpatterns = patterns('', + (r'^', include('Bcfg2.Server.Reports.reports.urls')) +) + +#urlpatterns += patterns("django.views", +# url(r"media/(?P<path>.*)$", "static.serve", { +# "document_root": '/Users/tlaszlo/svn/bcfg2/reports/site_media/', +# }) +#) diff --git a/build/lib/Bcfg2/Server/Reports/utils.py b/build/lib/Bcfg2/Server/Reports/utils.py new file mode 100644 index 000000000..b74f09e74 --- /dev/null +++ b/build/lib/Bcfg2/Server/Reports/utils.py @@ -0,0 +1,116 @@ +"""Helper functions for reports""" +from Bcfg2.Server.Reports.reports.models import TYPE_CHOICES +from django.conf.urls.defaults import * +import re + +"""List of filters provided by filteredUrls""" +filter_list = ('server', 'state') + +class BatchFetch(object): + """Fetch Django objects in smaller batches to save memory""" + + def __init__(self, obj, step=10000): + self.count = 0 + self.block_count = 0 + self.obj = obj + self.data = None + self.step = step + self.max = obj.count() + + def __iter__(self): + return self + + def next(self): + """Return the next object from our array and fetch from the + database when needed""" + if self.block_count + self.count - self.step == self.max: + raise StopIteration + if self.block_count == 0 or self.count == self.step: + # Without list() this turns into LIMIT 1 OFFSET x queries + self.data = list(self.obj.all()[self.block_count: \ + (self.block_count + self.step)]) + self.block_count += self.step + self.count = 0 + self.count += 1 + return self.data[self.count - 1] + +def generateUrls(fn): + """ + Parse url tuples and send to functions. + + Decorator for url generators. Handles url tuple parsing + before the actual function is called. + """ + def url_gen(*urls): + results = [] + for url_tuple in urls: + if isinstance(url_tuple, (list, tuple)): + results += fn(*url_tuple) + else: + raise ValueError("Unable to handle compiled urls") + return results + return url_gen + +@generateUrls +def paginatedUrls(pattern, view, kwargs=None, name=None): + """ + Takes a group of url tuples and adds paginated urls. + + Extends a url tuple to include paginated urls. Currently doesn't handle url() compiled + patterns. + + """ + results = [(pattern, view, kwargs, name)] + tail = '' + mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) + if mtail: + tail = mtail.group(1) + pattern = pattern[:len(pattern) - len(tail)] + results += [(pattern + "/(?P<page_number>\d+)" + tail, view, kwargs)] + results += [(pattern + "/(?P<page_number>\d+)\|(?P<page_limit>\d+)" + tail, view, kwargs)] + if not kwargs: + kwargs = dict() + kwargs['page_limit'] = 0 + results += [(pattern + "/?\|(?P<page_limit>all)" + tail, view, kwargs)] + return results + +@generateUrls +def filteredUrls(pattern, view, kwargs=None, name=None): + """ + Takes a url and adds filtered urls. + + Extends a url tuple to include filtered view urls. Currently doesn't + handle url() compiled patterns. + """ + results = [(pattern, view, kwargs, name)] + tail = '' + mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) + if mtail: + tail = mtail.group(1) + pattern = pattern[:len(pattern) - len(tail)] + for filter in ('/state/(?P<state>\w+)', + '/server/(?P<server>[\w\-\.]+)', + '/server/(?P<server>[\w\-\.]+)/(?P<state>[A-Za-z]+)'): + results += [(pattern + filter + tail, view, kwargs)] + return results + +@generateUrls +def timeviewUrls(pattern, view, kwargs=None, name=None): + """ + Takes a url and adds timeview urls + + Extends a url tuple to include filtered view urls. Currently doesn't + handle url() compiled patterns. + """ + results = [(pattern, view, kwargs, name)] + tail = '' + mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) + if mtail: + tail = mtail.group(1) + pattern = pattern[:len(pattern) - len(tail)] + for filter in ('/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})/' + \ + '(?P<hour>\d\d)-(?P<minute>\d\d)', + '/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})'): + results += [(pattern + filter + tail, view, kwargs)] + return results + diff --git a/build/lib/Bcfg2/Server/Snapshots/__init__.py b/build/lib/Bcfg2/Server/Snapshots/__init__.py new file mode 100644 index 000000000..6018377cb --- /dev/null +++ b/build/lib/Bcfg2/Server/Snapshots/__init__.py @@ -0,0 +1,30 @@ +__all__ = ['models', 'db_from_config', 'setup_session'] + +import sqlalchemy +import sqlalchemy.orm +import ConfigParser + + +def db_from_config(cfile): + cp = ConfigParser.ConfigParser() + cp.read([cfile]) + driver = cp.get('snapshots', 'driver') + if driver == 'sqlite': + path = cp.get('snapshots', 'database') + return 'sqlite:///%s' % path + elif driver in ['mysql', 'postgres']: + user = cp.get('snapshots', 'user') + password = cp.get('snapshots', 'password') + host = cp.get('snapshots', 'host') + db = cp.get('snapshots', 'database') + return '%s://%s:%s@%s/%s' % (driver, user, password, host, db) + else: + raise Exception, "unsupported db driver %s" % driver + + +def setup_session(cfile, debug=False): + engine = sqlalchemy.create_engine(db_from_config(cfile), + echo=debug) + Session = sqlalchemy.orm.sessionmaker() + Session.configure(bind=engine) + return Session() diff --git a/build/lib/Bcfg2/Server/Snapshots/model.py b/build/lib/Bcfg2/Server/Snapshots/model.py new file mode 100644 index 000000000..cbb14be79 --- /dev/null +++ b/build/lib/Bcfg2/Server/Snapshots/model.py @@ -0,0 +1,278 @@ +from sqlalchemy import Table, Column, Integer, Unicode, ForeignKey, Boolean, \ + DateTime, UnicodeText, desc +import datetime +import sqlalchemy.exceptions +from sqlalchemy.orm import relation, backref +from sqlalchemy.ext.declarative import declarative_base + + +class Uniquer(object): + force_rt = True + + @classmethod + def by_value(cls, session, **kwargs): + if cls.force_rt: + try: + return session.query(cls).filter_by(**kwargs).one() + except sqlalchemy.exceptions.InvalidRequestError: + return cls(**kwargs) + else: + return cls(**kwargs) + + @classmethod + def from_record(cls, session, data): + return cls.by_value(session, **data) + +Base = declarative_base() + + +class Administrator(Uniquer, Base): + __tablename__ = 'administrator' + id = Column(Integer, primary_key=True) + name = Column(Unicode(20), unique=True) + email = Column(Unicode(64)) + +admin_client = Table('admin_client', Base.metadata, + Column('admin_id', Integer, ForeignKey('administrator.id')), + Column('client_id', Integer, ForeignKey('client.id'))) + +admin_group = Table('admin_group', Base.metadata, + Column('admin_id', Integer, ForeignKey('administrator.id')), + Column('group_id', Integer, ForeignKey('group.id'))) + + +class Client(Uniquer, Base): + __tablename__ = 'client' + id = Column(Integer, primary_key=True) + name = Column(Unicode(64), unique=True) + admins = relation("Administrator", secondary=admin_client, + backref='clients') + active = Column(Boolean, default=True) + online = Column(Boolean, default=True) + online_ts = Column(DateTime) + + +class Group(Uniquer, Base): + __tablename__ = 'group' + id = Column(Integer, primary_key=True) + name = Column(Unicode(32), unique=True) + admins = relation("Administrator", secondary=admin_group, + backref='groups') + + +class ConnectorKeyVal(Uniquer, Base): + __tablename__ = 'connkeyval' + id = Column(Integer, primary_key=True) + connector = Column(Unicode(16)) + key = Column(Unicode(32)) + value = Column(UnicodeText) + +meta_group = Table('meta_group', Base.metadata, + Column('metadata_id', Integer, ForeignKey('metadata.id')), + Column('group_id', Integer, ForeignKey('group.id'))) + +meta_conn = Table('meta_conn', Base.metadata, + Column('metadata_id', Integer, ForeignKey('metadata.id')), + Column('connkeyval_id', Integer, ForeignKey('connkeyval.id'))) + + +class Metadata(Base): + __tablename__ = 'metadata' + id = Column(Integer, primary_key=True) + client_id = Column(Integer, ForeignKey('client.id')) + client = relation(Client) + groups = relation("Group", secondary=meta_group) + keyvals = relation(ConnectorKeyVal, secondary=meta_conn) + timestamp = Column(DateTime) + + @classmethod + def from_metadata(cls, mysession, mymetadata): + client = Client.by_value(mysession, name=unicode(mymetadata.hostname)) + m = cls(client=client) + for group in mymetadata.groups: + m.groups.append(Group.by_value(mysession, name=unicode(group))) + for connector in mymetadata.connectors: + data = getattr(mymetadata, connector) + if not isinstance(data, dict): + continue + for key, value in data.iteritems(): + if not isinstance(value, str): + continue + m.keyvals.append(ConnectorKeyVal.by_value(mysession, + connector=unicode(connector), + key=unicode(key), + value=unicode(value))) + return m + + +class Package(Base, Uniquer): + __tablename__ = 'package' + id = Column(Integer, primary_key=True) + name = Column(Unicode(24)) + type = Column(Unicode(16)) + version = Column(Unicode(16)) + verification_status = Column(Boolean) + + +class CorrespondenceType(object): + mtype = Package + + @classmethod + def from_record(cls, mysession, record): + (mod, corr, name, s_dict, e_dict) = record + if not s_dict: + start = None + else: + start = cls.mtype.by_value(mysession, name=name, **s_dict) + if s_dict != e_dict: + end = cls.mtype.by_value(mysession, name=name, **e_dict) + else: + end = start + return cls(start=start, end=end, modified=mod, correct=corr) + + +class PackageCorrespondence(Base, CorrespondenceType): + mtype = Package + __tablename__ = 'package_pair' + id = Column(Integer, primary_key=True) + start_id = Column(Integer, ForeignKey('package.id')) + start = relation(Package, primaryjoin=start_id == Package.id) + end_id = Column(Integer, ForeignKey('package.id'), nullable=True) + end = relation(Package, primaryjoin=end_id == Package.id) + modified = Column(Boolean) + correct = Column(Boolean) + +package_snap = Table('package_snap', Base.metadata, + Column('ppair_id', Integer, ForeignKey('package_pair.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + + +class Service(Base, Uniquer): + __tablename__ = 'service' + id = Column(Integer, primary_key=True) + name = Column(Unicode(16)) + type = Column(Unicode(12)) + status = Column(Boolean) + + +class ServiceCorrespondence(Base, CorrespondenceType): + mtype = Service + __tablename__ = 'service_pair' + id = Column(Integer, primary_key=True) + start_id = Column(Integer, ForeignKey('service.id')) + start = relation(Service, primaryjoin=start_id == Service.id) + end_id = Column(Integer, ForeignKey('service.id'), nullable=True) + end = relation(Service, primaryjoin=end_id == Service.id) + modified = Column(Boolean) + correct = Column(Boolean) + +service_snap = Table('service_snap', Base.metadata, + Column('spair_id', Integer, ForeignKey('service_pair.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + + +class File(Base, Uniquer): + __tablename__ = 'file' + id = Column(Integer, primary_key=True) + name = Column(UnicodeText) + type = Column(Unicode(12)) + owner = Column(Unicode(12)) + group = Column(Unicode(16)) + perms = Column(Integer(5)) + contents = Column(UnicodeText) + + +class FileCorrespondence(Base, CorrespondenceType): + mtype = File + __tablename__ = 'file_pair' + id = Column(Integer, primary_key=True) + start_id = Column(Integer, ForeignKey('file.id')) + start = relation(File, primaryjoin=start_id == File.id) + end_id = Column(Integer, ForeignKey('file.id'), nullable=True) + end = relation(File, primaryjoin=end_id == File.id) + modified = Column(Boolean) + correct = Column(Boolean) + +file_snap = Table('file_snap', Base.metadata, + Column('fpair_id', Integer, ForeignKey('file_pair.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + +extra_pkg_snap = Table('extra_pkg_snap', Base.metadata, + Column('package_id', Integer, ForeignKey('package.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + +extra_file_snap = Table('extra_file_snap', Base.metadata, + Column('file_id', Integer, ForeignKey('file.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + +extra_service_snap = Table('extra_service_snap', Base.metadata, + Column('service_id', Integer, ForeignKey('service.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + + +class Action(Base): + __tablename__ = 'action' + id = Column(Integer, primary_key=True) + command = Column(UnicodeText) + return_code = Column(Integer) + output = Column(UnicodeText) + +action_snap = Table('action_snap', Base.metadata, + Column('action_id', Integer, ForeignKey('action.id')), + Column('snapshot_id', Integer, ForeignKey('snapshot.id'))) + + +class Snapshot(Base): + __tablename__ = 'snapshot' + id = Column(Integer, primary_key=True) + correct = Column(Boolean) + revision = Column(Unicode(36)) + metadata_id = Column(Integer, ForeignKey('metadata.id')) + client_metadata = relation(Metadata, primaryjoin=metadata_id==Metadata.id) + timestamp = Column(DateTime, default=datetime.datetime.now) + client_id = Column(Integer, ForeignKey('client.id')) + client = relation(Client, backref=backref('snapshots')) + packages = relation(PackageCorrespondence, secondary=package_snap) + services = relation(ServiceCorrespondence, secondary=service_snap) + files = relation(FileCorrespondence, secondary=file_snap) + actions = relation(Action, secondary=action_snap) + extra_packages = relation(Package, secondary=extra_pkg_snap) + extra_services = relation(Service, secondary=extra_service_snap) + extra_files = relation(File, secondary=extra_file_snap) + + c_dispatch = dict([('Package', ('packages', PackageCorrespondence)), + ('Service', ('services', ServiceCorrespondence)), + ('Path', ('files', FileCorrespondence))]) + e_dispatch = dict([('Package', ('extra_packages', Package)), + ('Service', ('extra_services', Service)), + ('Path', ('extra_files', File))]) + + @classmethod + def from_data(cls, session, correct, revision, metadata, entries, extra): + dbm = Metadata.from_metadata(session, metadata) + snap = cls(correct=correct, client_metadata=dbm, revision=revision, + timestamp=datetime.datetime.now(), client=dbm.client) + for (dispatch, data) in [(cls.c_dispatch, entries), + (cls.e_dispatch, extra)]: + for key in dispatch: + dest, ecls = dispatch[key] + for edata in data[key].values(): + getattr(snap, dest).append(ecls.from_record(session, edata)) + return snap + + @classmethod + def by_client(cls, session, clientname): + return session.query(cls).join(cls.client_metadata, Metadata.client).filter(Client.name==clientname) + + @classmethod + def get_current(cls, session, clientname): + return session.query(Snapshot).join(Snapshot.client_metadata, Metadata.client).filter(Client.name==clientname).order_by(desc(Snapshot.timestamp)).first() + + @classmethod + def get_by_date(cls, session, clientname, timestamp): + return session.query(Snapshot)\ + .join(Snapshot.client_metadata, Metadata.client)\ + .filter(Snapshot.timestamp < timestamp)\ + .filter(Client.name==clientname)\ + .order_by(desc(Snapshot.timestamp))\ + .first() diff --git a/build/lib/Bcfg2/Server/__init__.py b/build/lib/Bcfg2/Server/__init__.py new file mode 100644 index 000000000..25f397565 --- /dev/null +++ b/build/lib/Bcfg2/Server/__init__.py @@ -0,0 +1,6 @@ +# $Id$ +"""This is the set of modules for Bcfg2.Server.""" + +__revision__ = '$Revision$' +__all__ = ["Admin", "Core", "FileMonitor", "Plugin", "Plugins", + "Hostbase", "Reports", "Snapshots"] |