summaryrefslogtreecommitdiffstats
path: root/build/lib/Bcfg2/Server/Plugins
diff options
context:
space:
mode:
Diffstat (limited to 'build/lib/Bcfg2/Server/Plugins')
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Account.py93
-rw-r--r--build/lib/Bcfg2/Server/Plugins/BB.py84
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Base.py38
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Bundler.py76
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Bzr.py36
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Cfg.py165
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Cvs.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/DBStats.py110
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Darcs.py49
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Decisions.py64
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Deps.py103
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Editor.py73
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Fossil.py52
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Git.py45
-rw-r--r--build/lib/Bcfg2/Server/Plugins/GroupPatterns.py117
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Guppy.py63
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Hg.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Hostbase.py585
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Metadata.py809
-rw-r--r--build/lib/Bcfg2/Server/Plugins/NagiosGen.py114
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Ohai.py79
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Packages.py869
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Pkgmgr.py155
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Probes.py150
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Properties.py37
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Rules.py11
-rw-r--r--build/lib/Bcfg2/Server/Plugins/SGenshi.py76
-rw-r--r--build/lib/Bcfg2/Server/Plugins/SSHbase.py279
-rw-r--r--build/lib/Bcfg2/Server/Plugins/SSLCA.py239
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Snapshots.py130
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Statistics.py161
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Svcmgr.py12
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Svn.py46
-rw-r--r--build/lib/Bcfg2/Server/Plugins/TCheetah.py78
-rw-r--r--build/lib/Bcfg2/Server/Plugins/TGenshi.py126
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Trigger.py37
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Web.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/__Web.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/__init__.py35
39 files changed, 0 insertions, 5384 deletions
diff --git a/build/lib/Bcfg2/Server/Plugins/Account.py b/build/lib/Bcfg2/Server/Plugins/Account.py
deleted file mode 100644
index e3ea58761..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Account.py
+++ /dev/null
@@ -1,93 +0,0 @@
-"""This handles authentication setup."""
-__revision__ = '$Revision$'
-
-import Bcfg2.Server.Plugin
-
-class Account(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Generator):
- """This module generates account config files,
- based on an internal data repo:
- static.(passwd|group|limits.conf) -> static entries
- dyn.(passwd|group) -> dynamic entries (usually acquired from yp or somesuch)
- useraccess -> users to be granted login access on some hosts
- superusers -> users to be granted root privs on all hosts
- rootlike -> users to be granted root privs on some hosts
-
- """
- name = 'Account'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- self.Entries = {'ConfigFile':{'/etc/passwd':self.from_yp_cb,
- '/etc/group':self.from_yp_cb,
- '/etc/security/limits.conf':self.gen_limits_cb,
- '/root/.ssh/authorized_keys':self.gen_root_keys_cb,
- '/etc/sudoers':self.gen_sudoers}}
- try:
- self.repository = Bcfg2.Server.Plugin.DirectoryBacked(self.data, self.core.fam)
- except:
- self.logger.error("Failed to load repos: %s, %s" % \
- (self.data, "%s/ssh" % (self.data)))
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def from_yp_cb(self, entry, metadata):
- """Build password file from cached yp data."""
- fname = entry.attrib['name'].split('/')[-1]
- entry.text = self.repository.entries["static.%s" % (fname)].data
- entry.text += self.repository.entries["dyn.%s" % (fname)].data
- perms = {'owner':'root', 'group':'root', 'perms':'0644'}
- [entry.attrib.__setitem__(key, value) for (key, value) in \
- perms.iteritems()]
-
- def gen_limits_cb(self, entry, metadata):
- """Build limits entries based on current ACLs."""
- entry.text = self.repository.entries["static.limits.conf"].data
- superusers = self.repository.entries["superusers"].data.split()
- useraccess = [line.split(':') for line in \
- self.repository.entries["useraccess"].data.split()]
- users = [user for (user, host) in \
- useraccess if host == metadata.hostname.split('.')[0]]
- perms = {'owner':'root', 'group':'root', 'perms':'0600'}
- [entry.attrib.__setitem__(key, value) for (key, value) in \
- perms.iteritems()]
- entry.text += "".join(["%s hard maxlogins 1024\n" % uname for uname in superusers + users])
- if "*" not in users:
- entry.text += "* hard maxlogins 0\n"
-
- def gen_root_keys_cb(self, entry, metadata):
- """Build root authorized keys file based on current ACLs."""
- superusers = self.repository.entries['superusers'].data.split()
- try:
- rootlike = [line.split(':', 1) for line in \
- self.repository.entries['rootlike'].data.split()]
- superusers += [user for (user, host) in rootlike \
- if host == metadata.hostname.split('.')[0]]
- except:
- pass
- rdata = self.repository.entries
- entry.text = "".join([rdata["%s.key" % user].data for user \
- in superusers if \
- ("%s.key" % user) in rdata])
- perms = {'owner':'root', 'group':'root', 'perms':'0600'}
- [entry.attrib.__setitem__(key, value) for (key, value) \
- in perms.iteritems()]
-
- def gen_sudoers(self, entry, metadata):
- """Build root authorized keys file based on current ACLs."""
- superusers = self.repository.entries['superusers'].data.split()
- try:
- rootlike = [line.split(':', 1) for line in \
- self.repository.entries['rootlike'].data.split()]
- superusers += [user for (user, host) in rootlike \
- if host == metadata.hostname.split('.')[0]]
- except:
- pass
- entry.text = self.repository.entries['static.sudoers'].data
- entry.text += "".join(["%s ALL=(ALL) ALL\n" % uname \
- for uname in superusers])
- perms = {'owner':'root', 'group':'root', 'perms':'0440'}
- [entry.attrib.__setitem__(key, value) for (key, value) \
- in perms.iteritems()]
diff --git a/build/lib/Bcfg2/Server/Plugins/BB.py b/build/lib/Bcfg2/Server/Plugins/BB.py
deleted file mode 100644
index 137142b66..000000000
--- a/build/lib/Bcfg2/Server/Plugins/BB.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Plugin
-import glob
-import os
-import socket
-
-#manage boot symlinks
- #add statistics check to do build->boot mods
-
-#map profiles: first array is not empty we replace the -p with a determined profile.
-logger = Bcfg2.Server.Plugin.logger
-
-class BBfile(Bcfg2.Server.Plugin.XMLFileBacked):
- """Class for bb files."""
- def Index(self):
- """Build data into an xml object."""
-
- try:
- self.data = lxml.etree.XML(self.data)
- except lxml.etree.XMLSyntaxError:
- Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
- return
- self.tftppath = self.data.get('tftp', '/tftpboot')
- self.macs = {}
- self.users = {}
- self.actions = {}
- self.bootlinks = []
-
- for node in self.data.findall('Node'):
- iface = node.find('Interface')
- if iface != None:
- mac = "01-%s" % (iface.get('mac'.replace(':','-').lower()))
- self.actions[node.get('name')] = node.get('action')
- self.bootlinks.append((mac, node.get('action')))
- try:
- ip = socket.gethostbyname(node.get('name'))
- except:
- logger.error("failed host resolution for %s" % node.get('name'))
-
- self.macs[node.get('name')] = (iface.get('mac'), ip)
- else:
- logger.error("%s" % lxml.etree.tostring(node))
- self.users[node.get('name')] = node.get('user',"").split(':')
-
- def enforce_bootlinks(self):
- for mac, target in self.bootlinks:
- path = self.tftppath + '/' + mac
- if not os.path.islink(path):
- logger.error("Boot file %s not a link" % path)
- if target != os.readlink(path):
- try:
- os.unlink(path)
- os.symlink(target, path)
- except:
- logger.error("Failed to modify link %s" % path)
-
-class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
- __child__ = BBfile
-
-
-class BB(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Connector):
- """The BB plugin maps users to machines and metadata to machines."""
- name = 'BB'
- version = '$Revision$'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.store = BBDirectoryBacked(self.data, core.fam)
-
- def get_additional_data(self, metadata):
-
- users = {}
- for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []):
- pubkeys = []
- for fname in glob.glob('/home/%s/.ssh/*.pub'%user):
- pubkeys.append(open(fname).read())
-
- users[user] = pubkeys
-
- return dict([('users', users),
- ('macs', self.store.entries['bb.xml'].macs)])
diff --git a/build/lib/Bcfg2/Server/Plugins/Base.py b/build/lib/Bcfg2/Server/Plugins/Base.py
deleted file mode 100644
index 8e5ca1cd9..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Base.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""This module sets up a base list of configuration entries."""
-__revision__ = '$Revision$'
-
-import Bcfg2.Server.Plugin
-import copy
-import lxml.etree
-
-class Base(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.XMLDirectoryBacked):
- """This Structure is good for the pile of independent configs
- needed for most actual systems.
- """
- name = 'Base'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- __child__ = Bcfg2.Server.Plugin.StructFile
-
- """Base creates independent clauses based on client metadata."""
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- try:
- Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self,
- self.data,
- self.core.fam)
- except OSError:
- self.logger.error("Failed to load Base repository")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def BuildStructures(self, metadata):
- """Build structures for client described by metadata."""
- ret = lxml.etree.Element("Independent", version='2.0')
- fragments = reduce(lambda x, y: x+y,
- [base.Match(metadata) for base
- in self.entries.values()], [])
- [ret.append(copy.deepcopy(frag)) for frag in fragments]
- return [ret]
diff --git a/build/lib/Bcfg2/Server/Plugins/Bundler.py b/build/lib/Bcfg2/Server/Plugins/Bundler.py
deleted file mode 100644
index 47cd7e2c4..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Bundler.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"""This provides bundle clauses with translation functionality."""
-__revision__ = '$Revision$'
-
-import copy
-import lxml.etree
-import re
-
-import Bcfg2.Server.Plugin
-
-try:
- import genshi.template
- import genshi.template.base
- import Bcfg2.Server.Plugins.SGenshi
- have_genshi = True
-except:
- have_genshi = False
-
-class BundleFile(Bcfg2.Server.Plugin.StructFile):
- def get_xml_value(self, metadata):
- bundlename = self.name.split('/')[-1][:-4]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- [bundle.append(copy.deepcopy(item)) for item in self.Match(metadata)]
- return bundle
-
-class Bundler(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.XMLDirectoryBacked):
- """The bundler creates dependent clauses based on the bundle/translation scheme from Bcfg1."""
- name = 'Bundler'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- patterns = re.compile('^(?P<name>.*)\.(xml|genshi)$')
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- self.encoding = core.encoding
- self.__child__ = self.template_dispatch
- try:
- Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
- except OSError:
- self.logger.error("Failed to load Bundle repository")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def template_dispatch(self, name):
- if name.endswith('.xml'):
- return BundleFile(name)
- elif name.endswith('.genshi'):
- if have_genshi:
- spec = Bcfg2.Server.Plugin.Specificity()
- return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
- spec,
- self.encoding)
-
- def BuildStructures(self, metadata):
- """Build all structures for client (metadata)."""
- bundleset = []
- for bundlename in metadata.bundles:
- entries = [item for (key, item) in self.entries.iteritems() if \
- self.patterns.match(key).group('name') == bundlename]
- if len(entries) == 0:
- continue
- elif len(entries) == 1:
- try:
- bundleset.append(entries[0].get_xml_value(metadata))
- except genshi.template.base.TemplateError, t:
- self.logger.error("Bundler: Failed to template genshi bundle %s" \
- % (bundlename))
- self.logger.error(t)
- except:
- self.logger.error("Bundler: Unexpected bundler error for %s" \
- % (bundlename), exc_info=1)
- else:
- self.logger.error("Got multiple matches for bundle %s" \
- % (bundlename))
- return bundleset
diff --git a/build/lib/Bcfg2/Server/Plugins/Bzr.py b/build/lib/Bcfg2/Server/Plugins/Bzr.py
deleted file mode 100644
index a9a5eb814..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Bzr.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import Bcfg2.Server.Plugin
-from bzrlib.workingtree import WorkingTree
-from bzrlib import errors
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Bzr')
-
-class Bzr(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Bzr is a version plugin for dealing with Bcfg2 repos."""
- name = 'Bzr'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- self.core = core
- self.datastore = datastore
-
- # Read revision from bcfg2 repo
- revision = self.get_revision()
-
- logger.debug("Initialized Bazaar plugin with directory = %(dir)s at revision = %(rev)s" % {'dir': datastore, 'rev': revision})
-
- def get_revision(self):
- """Read Bazaar revision information for the Bcfg2 repository."""
- try:
- working_tree = WorkingTree.open(self.datastore)
- revision = str(working_tree.branch.revno())
- if working_tree.has_changes(working_tree.basis_tree()) or working_tree.unknowns():
- revision += "+"
- except errors.NotBranchError:
- logger.error("Failed to read Bazaar branch; disabling Bazaar support")
- raise Bcfg2.Server.Plugin.PluginInitError
- return revision
diff --git a/build/lib/Bcfg2/Server/Plugins/Cfg.py b/build/lib/Bcfg2/Server/Plugins/Cfg.py
deleted file mode 100644
index dd1e792ec..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Cfg.py
+++ /dev/null
@@ -1,165 +0,0 @@
-"""This module implements a config file repository."""
-__revision__ = '$Revision$'
-
-import binascii
-import logging
-import lxml
-import os
-import re
-import tempfile
-
-import Bcfg2.Server.Plugin
-
-logger = logging.getLogger('Bcfg2.Plugins.Cfg')
-
-def process_delta(data, delta):
- if not delta.specific.delta:
- return data
- if delta.specific.delta == 'cat':
- datalines = data.split('\n')
- for line in delta.data.split('\n'):
- if not line:
- continue
- if line[0] == '+':
- datalines.append(line[1:])
- elif line[0] == '-':
- if line[1:] in datalines:
- datalines.remove(line[1:])
- return "\n".join(datalines)
- elif delta.specific.delta == 'diff':
- basehandle, basename = tempfile.mkstemp()
- basefile = open(basename, 'w')
- basefile.write(data)
- basefile.close()
- os.close(basehandle)
- dhandle, dname = tempfile.mkstemp()
- dfile = open(dname, 'w')
- dfile.write(delta.data)
- dfile.close()
- os.close(dhandle)
- ret = os.system("patch -uf %s < %s > /dev/null 2>&1" \
- % (basefile.name, dfile.name))
- output = open(basefile.name, 'r').read()
- [os.unlink(fname) for fname in [basefile.name, dfile.name]]
- if ret >> 8 != 0:
- raise Bcfg2.Server.Plugin.PluginExecutionError, ('delta', delta)
- return output
-
-class CfgMatcher:
- def __init__(self, fname):
- name = re.escape(fname)
- self.basefile_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|.G(?P<prio>\d+)_(?P<group>\S+))$' % name)
- self.delta_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|\\.G(?P<prio>\d+)_(?P<group>\S+))\\.(?P<delta>(cat|diff))$' % name)
- self.cat_count = fname.count(".cat")
- self.diff_count = fname.count(".diff")
-
- def match(self, fname):
- if fname.count(".cat") > self.cat_count \
- or fname.count('.diff') > self.diff_count:
- return self.delta_reg.match(fname)
- return self.basefile_reg.match(fname)
-
-class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
- def __init__(self, basename, path, entry_type, encoding):
- Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path,
- entry_type, encoding)
- self.specific = CfgMatcher(path.split('/')[-1])
-
- def sort_by_specific(self, one, other):
- return cmp(one.specific, other.specific)
-
- def get_pertinent_entries(self, metadata):
- '''return a list of all entries pertinent to a client => [base, delta1, delta2]'''
- matching = [ent for ent in self.entries.values() if \
- ent.specific.matches(metadata)]
- matching.sort(self.sort_by_specific)
- non_delta = [matching.index(m) for m in matching if not m.specific.delta]
- if not non_delta:
- raise Bcfg2.Server.Plugin.PluginExecutionError
- base = min(non_delta)
- used = matching[:base+1]
- used.reverse()
- return used
-
- def bind_entry(self, entry, metadata):
- self.bind_info_to_entry(entry, metadata)
- used = self.get_pertinent_entries(metadata)
- basefile = used.pop(0)
- data = basefile.data
- if entry.tag == 'Path':
- entry.set('type', 'file')
- for delta in used:
- data = data.strip()
- data = process_delta(data, delta)
- if used:
- data += '\n'
- if entry.get('encoding') == 'base64':
- entry.text = binascii.b2a_base64(data)
- else:
- entry.text = unicode(data, self.encoding)
- if entry.text in ['', None]:
- entry.set('empty', 'true')
-
- def list_accept_choices(self, metadata):
- '''return a list of candidate pull locations'''
- used = self.get_pertinent_entries(metadata)
- ret = []
- if used:
- ret.append(used[0].specific)
- if not ret[0].hostname:
- ret.append(Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname))
- return ret
-
- def build_filename(self, specific):
- bfname = self.path + '/' + self.path.split('/')[-1]
- if specific.all:
- return bfname
- elif specific.group:
- return "%s.G%d_%s" % (bfname, specific.prio, specific.group)
- elif specific.hostname:
- return "%s.H_%s" % (bfname, specific.hostname)
-
- def write_update(self, specific, new_entry, log):
- if 'text' in new_entry:
- name = self.build_filename(specific)
- open(name, 'w').write(new_entry['text'])
- if log:
- logger.info("Wrote file %s" % name)
- badattr = [attr for attr in ['owner', 'group', 'perms'] if attr in new_entry]
- if badattr:
- metadata_updates = {}
- metadata_updates.update(self.metadata)
- for attr in badattr:
- metadata_updates[attr] = new_entry.get(attr)
- if self.infoxml:
- infoxml = lxml.etree.Element('FileInfo')
- infotag = lxml.etree.SubElement(infoxml, 'Info')
- [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \
- for attr in metadata_updates]
- ofile = open(self.path + "/info.xml","w")
- ofile.write(lxml.etree.tostring(infoxml, pretty_print=True))
- ofile.close()
- if log:
- logger.info("Wrote file %s" % (self.path + "/info.xml"))
- else:
- infofile = open(self.path + '/:info', 'w')
- for x in metadata_updates.iteritems():
- infofile.write("%s: %s\n" % x)
- infofile.close()
- if log:
- logger.info("Wrote file %s" % infofile.name)
-
-class Cfg(Bcfg2.Server.Plugin.GroupSpool,
- Bcfg2.Server.Plugin.PullTarget):
- """This generator in the configuration file repository for Bcfg2."""
- name = 'Cfg'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- es_cls = CfgEntrySet
- es_child_cls = Bcfg2.Server.Plugin.SpecificData
-
- def AcceptChoices(self, entry, metadata):
- return self.entries[entry.get('name')].list_accept_choices(metadata)
-
- def AcceptPullData(self, specific, new_entry, log):
- return self.entries[new_entry.get('name')].write_update(specific, new_entry, log)
diff --git a/build/lib/Bcfg2/Server/Plugins/Cvs.py b/build/lib/Bcfg2/Server/Plugins/Cvs.py
deleted file mode 100644
index ea898c023..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Cvs.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-from subprocess import Popen, PIPE
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Cvs')
-
-class Cvs(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """CVS is a version plugin for dealing with Bcfg2 repository."""
- name = 'Cvs'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- self.core = core
- self.datastore = datastore
-
- # path to cvs directory for Bcfg2 repo
- cvs_dir = "%s/CVSROOT" % datastore
-
- # Read revision from Bcfg2 repo
- if os.path.isdir(cvs_dir):
- self.get_revision()
- else:
- logger.error("%s is not a directory" % cvs_dir)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- logger.debug("Initialized cvs plugin with cvs directory = %s" % cvs_dir)
-
- def get_revision(self):
- """Read cvs revision information for the Bcfg2 repository."""
- try:
- data = Popen("env LC_ALL=C cvs log",
- shell=True,
- cwd=self.datastore,
- stdout=PIPE).stdout.readlines()
- revision = data[3].strip('\n')
- except IndexError:
- logger.error("Failed to read cvs log; disabling cvs support")
- logger.error('''Ran command "cvs log %s"''' % (self.datastore))
- logger.error("Got output: %s" % data)
- raise Bcfg2.Server.Plugin.PluginInitError
-
diff --git a/build/lib/Bcfg2/Server/Plugins/DBStats.py b/build/lib/Bcfg2/Server/Plugins/DBStats.py
deleted file mode 100644
index 2712cd45f..000000000
--- a/build/lib/Bcfg2/Server/Plugins/DBStats.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import binascii
-import difflib
-import logging
-import lxml.etree
-import platform
-import time
-
-try:
- from django.core.exceptions import MultipleObjectsReturned
-except ImportError:
- pass
-
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Reports.importscript
-from Bcfg2.Server.Reports.reports.models import Client
-import Bcfg2.Server.Reports.settings
-from Bcfg2.Server.Reports.updatefix import update_database
-# for debugging output only
-logger = logging.getLogger('Bcfg2.Plugins.DBStats')
-
-class DBStats(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
- Bcfg2.Server.Plugin.PullSource):
- name = 'DBStats'
- __version__ = '$Id$'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.PullSource.__init__(self)
- self.cpath = "%s/Metadata/clients.xml" % datastore
- self.core = core
- logger.debug("Searching for new models to add to the statistics database")
- try:
- update_database()
- except Exception, inst:
- logger.debug(str(inst))
- logger.debug(str(type(inst)))
-
- def handle_statistic(self, metadata, data):
- newstats = data.find("Statistics")
- newstats.set('time', time.asctime(time.localtime()))
- # ick
- data = lxml.etree.tostring(newstats)
- ndx = lxml.etree.XML(data)
- e = lxml.etree.Element('Node', name=metadata.hostname)
- e.append(ndx)
- container = lxml.etree.Element("ConfigStatistics")
- container.append(e)
-
- # FIXME need to build a metadata interface to expose a list of clients
- start = time.time()
- for i in [1, 2, 3]:
- try:
- Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clientdata,
- container,
- 0,
- logger,
- True,
- platform.node())
- logger.info("Imported data for %s in %s seconds" \
- % (metadata.hostname, time.time() - start))
- return
- except MultipleObjectsReturned, e:
- logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \
- (metadata.hostname, e))
- logger.error("DBStats: Data is inconsistent")
- break
- except:
- logger.error("DBStats: Failed to write to db (lock); retrying",
- exc_info=1)
- logger.error("DBStats: Retry limit failed for %s; aborting operation" \
- % metadata.hostname)
-
- def GetExtra(self, client):
- c_inst = Client.objects.filter(name=client)[0]
- return [(a.entry.kind, a.entry.name) for a in
- c_inst.current_interaction.extra()]
-
- def GetCurrentEntry(self, client, e_type, e_name):
- try:
- c_inst = Client.objects.filter(name=client)[0]
- except IndexError:
- self.logger.error("Unknown client: %s" % client)
- raise Bcfg2.Server.Plugin.PluginExecutionError
- result = c_inst.current_interaction.bad().filter(entry__kind=e_type,
- entry__name=e_name)
- if not result:
- raise Bcfg2.Server.Plugin.PluginExecutionError
- entry = result[0]
- ret = []
- data = ('owner', 'group', 'perms')
- for t in data:
- if getattr(entry.reason, "current_%s" % t) == '':
- ret.append(getattr(entry.reason, t))
- else:
- ret.append(getattr(entry.reason, "current_%s" % t))
-
- if entry.reason.current_diff != '':
- if entry.reason.is_binary:
- ret.append(binascii.a2b_base64(entry.reason.current_diff))
- else:
- ret.append('\n'.join(difflib.restore(\
- entry.reason.current_diff.split('\n'), 1)))
- elif entry.reason.is_binary:
- # If len is zero the object was too large to store
- raise Bcfg2.Server.Plugin.PluginExecutionError
- else:
- ret.append(None)
- return ret
diff --git a/build/lib/Bcfg2/Server/Plugins/Darcs.py b/build/lib/Bcfg2/Server/Plugins/Darcs.py
deleted file mode 100644
index eb34a52c4..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Darcs.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import os
-from subprocess import Popen, PIPE
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Darcs')
-
-class Darcs(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Darcs is a version plugin for dealing with Bcfg2 repos."""
- name = 'Darcs'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Version.__init__(self)
- self.core = core
- self.datastore = datastore
-
- # path to darcs directory for bcfg2 repo
- darcs_dir = "%s/_darcs" % datastore
-
- # Read changeset from bcfg2 repo
- if os.path.isdir(darcs_dir):
- self.get_revision()
- else:
- logger.error("%s is not present." % darcs_dir)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- logger.debug("Initialized Darcs plugin with darcs directory = %s" % darcs_dir)
-
- def get_revision(self):
- """Read Darcs changeset information for the Bcfg2 repository."""
- try:
- data = Popen("env LC_ALL=C darcs changes",
- shell=True,
- cwd=self.datastore,
- stdout=PIPE).stdout.readlines()
- revision = data[0].strip('\n')
- except:
- logger.error("Failed to read darcs repository; disabling Darcs support")
- logger.error('''Ran command "darcs changes" from directory "%s"''' % (self.datastore))
- logger.error("Got output: %s" % data)
- raise Bcfg2.Server.Plugin.PluginInitError
- return revision
-
diff --git a/build/lib/Bcfg2/Server/Plugins/Decisions.py b/build/lib/Bcfg2/Server/Plugins/Decisions.py
deleted file mode 100644
index 1f9525a0e..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Decisions.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import logging
-import lxml.etree
-import Bcfg2.Server.Plugin
-logger = logging.getLogger('Bcfg2.Plugins.Decisions')
-
-class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
- def handle_event(self, event):
- Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
- self.contents = lxml.etree.XML(self.data)
-
- def get_decisions(self):
- return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')]
-
-class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
- def __init__(self, path, fam, encoding):
- """Container for decision specification files.
-
- Arguments:
- - `path`: repository path
- - `fam`: reference to the file monitor
- - `encoding`: XML character encoding
-
- """
- pattern = '(white|black)list'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \
- DecisionFile, encoding)
- try:
- fam.AddMonitor(path, self)
- except OSError, e:
- logger.error('Adding filemonitor for %s failed. '
- 'Make sure directory exists' % path)
- raise Bcfg2.Server.Plugin.PluginInitError(e)
-
- def HandleEvent(self, event):
- if event.filename != self.path:
- return self.handle_event(event)
-
- def GetDecisions(self, metadata, mode):
- ret = []
- candidates = [c for c in self.get_matching(metadata)
- if c.name.split('/')[-1].startswith(mode)]
- for c in candidates:
- ret += c.get_decisions()
- return ret
-
-class Decisions(DecisionSet,
- Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Decision):
- name = 'Decisions'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- """Decisions plugins
-
- Arguments:
- - `core`: Bcfg2.Core instance
- - `datastore`: File repository location
-
- """
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Decision.__init__(self)
- DecisionSet.__init__(self, self.data, core.fam, core.encoding)
-
diff --git a/build/lib/Bcfg2/Server/Plugins/Deps.py b/build/lib/Bcfg2/Server/Plugins/Deps.py
deleted file mode 100644
index 088f8cdad..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Deps.py
+++ /dev/null
@@ -1,103 +0,0 @@
-"""This plugin provides automatic dependency handling."""
-__revision__ = '$Revision$'
-
-import lxml.etree
-
-import Bcfg2.Server.Plugin
-
-class DNode(Bcfg2.Server.Plugin.INode):
- """DNode provides supports for single predicate types for dependencies."""
- raw = {'Group':"lambda x:'%s' in x.groups and predicate(x)"}
- containers = ['Group']
-
- def __init__(self, data, idict, parent=None):
- self.data = data
- self.contents = {}
- if parent == None:
- self.predicate = lambda x:True
- else:
- predicate = parent.predicate
- if data.tag in self.raw.keys():
- self.predicate = eval(self.raw[data.tag] % (data.get('name')), {'predicate':predicate})
- else:
- raise Exception
- mytype = self.__class__
- self.children = []
- for item in data.getchildren():
- if item.tag in self.containers:
- self.children.append(mytype(item, idict, self))
- else:
- data = [(child.tag, child.get('name')) for child in item.getchildren()]
- try:
- self.contents[item.tag][item.get('name')] = data
- except KeyError:
- self.contents[item.tag] = {item.get('name'):data}
-
-class DepXMLSrc(Bcfg2.Server.Plugin.XMLSrc):
- __node__ = DNode
-
-class Deps(Bcfg2.Server.Plugin.PrioDir,
- Bcfg2.Server.Plugin.StructureValidator):
- name = 'Deps'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- __child__ = DepXMLSrc
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.StructureValidator.__init__(self)
- self.cache = {}
-
- def HandleEvent(self, event):
- self.cache = {}
- Bcfg2.Server.Plugin.PrioDir.HandleEvent(self, event)
-
- def validate_structures(self, metadata, structures):
- entries = []
- prereqs = []
- for structure in structures:
- for entry in structure.getchildren():
- if (entry.tag, entry.get('name')) not in entries \
- and not isinstance(entry, lxml.etree._Comment):
- entries.append((entry.tag, entry.get('name')))
- entries.sort()
- entries = tuple(entries)
- gdata = list(metadata.groups)
- gdata.sort()
- gdata = tuple(gdata)
- if (entries, gdata) in self.cache:
- prereqs = self.cache[(entries, gdata)]
- else:
- [src.Cache(metadata) for src in self.entries.values()]
-
- toexamine = list(entries[:])
- while toexamine:
- entry = toexamine.pop()
- matching = [src for src in self.entries.values()
- if src.cache and entry[0] in src.cache[1]
- and entry[1] in src.cache[1][entry[0]]]
- if len(matching) > 1:
- prio = [int(src.priority) for src in matching]
- if prio.count(max(prio)) > 1:
- self.logger.error("Found conflicting %s sources with same priority for %s, pkg %s" %
- (entry[0].lower(), metadata.hostname, entry[1]))
- raise Bcfg2.Server.Plugin.PluginExecutionError
- index = prio.index(max(prio))
- matching = [matching[index]]
-
- if not matching:
- continue
- elif len(matching) == 1:
- for prq in matching[0].cache[1][entry[0]][entry[1]]:
- if prq not in prereqs and prq not in entries:
- toexamine.append(prq)
- prereqs.append(prq)
- self.cache[(entries, gdata)] = prereqs
-
- newstruct = lxml.etree.Element("Independent")
- for tag, name in prereqs:
- try:
- lxml.etree.SubElement(newstruct, tag, name=name)
- except:
- self.logger.error("Failed to add dep entry for %s:%s" % (tag, name))
- structures.append(newstruct)
diff --git a/build/lib/Bcfg2/Server/Plugins/Editor.py b/build/lib/Bcfg2/Server/Plugins/Editor.py
deleted file mode 100644
index bfd4d6e93..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Editor.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import Bcfg2.Server.Plugin
-import re
-import lxml.etree
-
-def linesub(pattern, repl, filestring):
- """Substitutes instances of pattern with repl in filestring."""
- if filestring == None:
- filestring = ''
- output = list()
- fileread = filestring.split('\n')
- for line in fileread:
- output.append(re.sub(pattern, repl, filestring))
- return '\n'.join(output)
-
-class EditDirectives(Bcfg2.Server.Plugin.SpecificData):
- """This object handles the editing directives."""
- def ProcessDirectives(self, input):
- """Processes a list of edit directives on input."""
- temp = input
- for directive in self.data.split('\n'):
- directive = directive.split(',')
- temp = linesub(directive[0], directive[1], temp)
- return temp
-
-class EditEntrySet(Bcfg2.Server.Plugin.EntrySet):
- def __init__(self, basename, path, entry_type, encoding):
- self.ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|%s\.H_.*)$" %path.split('/')[-1])
- Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, entry_type, encoding)
- self.inputs = dict()
-
- def bind_entry(self, entry, metadata):
- client = metadata.hostname
- filename = entry.get('name')
- permdata = {'owner':'root', 'group':'root'}
- permdata['perms'] = '0644'
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
- entry.text = self.entries['edits'].ProcessDirectives(self.get_client_data(client))
- if not entry.text:
- entry.set('empty', 'true')
- try:
- f = open('%s/%s.H_%s' %(self.path, filename.split('/')[-1], client), 'w')
- f.write(entry.text)
- f.close()
- except:
- pass
-
- def get_client_data(self, client):
- return self.inputs[client]
-
-
-class Editor(Bcfg2.Server.Plugin.GroupSpool,
- Bcfg2.Server.Plugin.Probing):
- name = 'Editor'
- __version__ = '$Id$'
- __author__ = 'bcfg2-dev@mcs.anl.gov'
- filename_pattern = 'edits'
- es_child_cls = EditDirectives
- es_cls = EditEntrySet
-
- def GetProbes(self, _):
- '''Return a set of probes for execution on client'''
- probelist = list()
- for name in self.entries.keys():
- probe = lxml.etree.Element('probe')
- probe.set('name', name)
- probe.set('source', "Editor")
- probe.text = "cat %s" % name
- probelist.append(probe)
- return probelist
-
- def ReceiveData(self, client, datalist):
- for data in datalist:
- self.entries[data.get('name')].inputs[client.hostname] = data.text
diff --git a/build/lib/Bcfg2/Server/Plugins/Fossil.py b/build/lib/Bcfg2/Server/Plugins/Fossil.py
deleted file mode 100644
index 57d427673..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Fossil.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import os
-from subprocess import Popen, PIPE
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Fossil')
-
-class Fossil(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Fossil is a version plugin for dealing with Bcfg2 repos."""
- name = 'Fossil'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- self.core = core
- self.datastore = datastore
-
- # path to fossil file for bcfg2 repo
- fossil_file = "%s/_FOSSIL_" % datastore
-
- # Read revision from bcfg2 repo
- if os.path.isfile(fossil_file):
- revision = self.get_revision()
- elif not os.path.isdir(datastore):
- logger.error("%s is not a directory" % datastore)
- raise Bcfg2.Server.Plugin.PluginInitError
- else:
- logger.error("%s is not a file" % fossil_file)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- logger.debug("Initialized Fossil.py plugin with %(ffile)s at revision %(frev)s" \
- % {'ffile': fossil_file, 'frev': revision})
-
- def get_revision(self):
- """Read fossil revision information for the Bcfg2 repository."""
- try:
- data = Popen("env LC_ALL=C fossil info",
- shell=True,
- cwd=self.datastore,
- stdout=PIPE).stdout.readlines()
- revline = [line.split(': ')[1].strip() for line in data if \
- line.split(': ')[0].strip() == 'checkout'][-1]
- revision = revline.split(' ')[0]
- except IndexError:
- logger.error("Failed to read fossil info; disabling fossil support")
- logger.error('''Ran command "fossil info" from directory "%s"''' % (self.datastore))
- logger.error("Got output: %s" % data)
- raise Bcfg2.Server.Plugin.PluginInitError
- return revision
diff --git a/build/lib/Bcfg2/Server/Plugins/Git.py b/build/lib/Bcfg2/Server/Plugins/Git.py
deleted file mode 100644
index aaeac12ae..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Git.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""The Git plugin provides a revision interface for Bcfg2 repos using git."""
-
-import os
-from dulwich.repo import Repo
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Git')
-
-
-class Git(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Git is a version plugin for dealing with Bcfg2 repos."""
- name = 'Git'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Version.__init__(self)
- self.core = core
- self.datastore = datastore
-
- # path to git directory for bcfg2 repo
- git_dir = "%s/.git" % datastore
-
- # Read revision from bcfg2 repo
- if os.path.isdir(git_dir):
- self.get_revision()
- else:
- logger.error("%s is not a directory" % git_dir)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- logger.debug("Initialized git plugin with git directory %s" % git_dir)
-
- def get_revision(self):
- """Read git revision information for the Bcfg2 repository."""
- try:
- repo = Repo(self.datastore)
- revision = repo.head()
- except:
- logger.error("Failed to read git repository; disabling git support")
- raise Bcfg2.Server.Plugin.PluginInitError
- return revision
diff --git a/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py
deleted file mode 100644
index 3801a6a08..000000000
--- a/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import lxml.etree
-import re
-
-import Bcfg2.Server.Plugin
-
-class PackedDigitRange(object):
- def __init__(self, digit_range):
- self.sparse = list()
- self.ranges = list()
- for item in digit_range.split(','):
- if '-' in item:
- self.ranges.append(tuple([int(x) for x in item.split('-')]))
- else:
- self.sparse.append(int(item))
-
- def includes(self, other):
- iother = int(other)
- if iother in self.sparse:
- return True
- for (start, end) in self.ranges:
- if iother in xrange(start, end+1):
- return True
- return False
-
-class PatternMap(object):
- range_finder = '\\[\\[[\d\-,]+\\]\\]'
- def __init__(self, pattern, rangestr, groups):
- self.pattern = pattern
- self.rangestr = rangestr
- self.groups = groups
- if pattern != None:
- self.re = re.compile(pattern)
- self.process = self.process_re
- elif rangestr != None:
- self.process = self.process_range
- self.re = re.compile('^' + re.subn(self.range_finder, '(\d+)', rangestr)[0])
- dmatcher = re.compile(re.subn(self.range_finder, '\\[\\[([\d\-,]+)\\]\\]', rangestr)[0])
- self.dranges = [PackedDigitRange(x) for x in dmatcher.match(rangestr).groups()]
- else:
- raise Exception
-
- def process_range(self, name):
- match = self.re.match(name)
- if not match:
- return None
- digits = match.groups()
- for i in range(len(digits)):
- if not self.dranges[i].includes(digits[i]):
- return None
- return self.groups
-
- def process_re(self, name):
- match = self.re.match(name)
- if not match:
- return None
- ret = list()
- sub = match.groups()
- for group in self.groups:
- newg = group
- for idx in range(len(sub)):
- newg = newg.replace('$%s' % (idx+1), sub[idx])
- ret.append(newg)
- return ret
-
-class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- self.patterns = []
-
- def Index(self):
- self.patterns = []
- try:
- parsed = lxml.etree.XML(self.data)
- except:
- Bcfg2.Server.Plugin.logger.error("Failed to read file %s" % self.name)
- return
- for entry in parsed.findall('GroupPattern'):
- try:
- pat = None
- rng = None
- if entry.find('NamePattern') is not None:
- pat = entry.find('NamePattern').text
- if entry.find('NameRange') is not None:
- rng = entry.find('NameRange').text
- groups = [g.text for g in entry.findall('Group')]
- self.patterns.append(PatternMap(pat, rng, groups))
- except:
- Bcfg2.Server.Plugin.logger.error(\
- "GroupPatterns: Failed to initialize pattern %s" % \
- (entry.get('pattern')))
-
- def process_patterns(self, hostname):
- ret = []
- for pattern in self.patterns:
- try:
- gn = pattern.process(hostname)
- if gn is not None:
- ret.extend(gn)
- except:
- Bcfg2.Server.Plugin.logger.error(\
- "GroupPatterns: Failed to process pattern %s for %s" % \
- (pattern.pattern, hostname), exc_info=1)
- return ret
-
-class GroupPatterns(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Connector):
- name = "GroupPatterns"
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = PatternFile(self.data + '/config.xml',
- core.fam)
-
- def get_additional_groups(self, metadata):
- return self.config.process_patterns(metadata.hostname)
diff --git a/build/lib/Bcfg2/Server/Plugins/Guppy.py b/build/lib/Bcfg2/Server/Plugins/Guppy.py
deleted file mode 100644
index b217378d6..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Guppy.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
-This plugin is used to trace memory leaks within the bcfg2-server
-process using Guppy. By default the remote debugger is started
-when this plugin is enabled. The debugger can be shutoff in a running
-process using "bcfg2-admin xcmd Guppy.Disable" and reenabled using
-"bcfg2-admin xcmd Guppy.Enable".
-
-To attach the console run:
-
-python -c "from guppy import hpy;hpy().monitor()"
-
-For example:
-
-# python -c "from guppy import hpy;hpy().monitor()"
-<Monitor>
-*** Connection 1 opened ***
-<Monitor> lc
-CID PID ARGV
- 1 25063 ['/usr/sbin/bcfg2-server', '-D', '/var/run/bcfg2-server.pid']
-<Monitor> sc 1
-Remote connection 1. To return to Monitor, type <Ctrl-C> or .<RETURN>
-<Annex> int
-Remote interactive console. To return to Annex, type '-'.
->>> hp.heap()
-...
-
-
-"""
-import re
-import Bcfg2.Server.Plugin
-
-class Guppy(Bcfg2.Server.Plugin.Plugin):
- """Guppy is a debugging plugin to help trace memory leaks"""
- name = 'Guppy'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- experimental = True
- __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable','Disable']
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
-
- self.Enable()
-
- def Enable(self):
- """Enable remote debugging"""
- try:
- from guppy.heapy import Remote
- Remote.on()
- except:
- self.logger.error("Failed to create Heapy context")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def Disable(self):
- """Disable remote debugging"""
- try:
- from guppy.heapy import Remote
- Remote.off()
- except:
- self.logger.error("Failed to disable Heapy")
- raise Bcfg2.Server.Plugin.PluginInitError
-
diff --git a/build/lib/Bcfg2/Server/Plugins/Hg.py b/build/lib/Bcfg2/Server/Plugins/Hg.py
deleted file mode 100644
index 3f2864a1c..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Hg.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-from mercurial import ui, hg
-from subprocess import Popen, PIPE
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Mercurial')
-
-class Hg(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Mercurial is a version plugin for dealing with Bcfg2 repository."""
- name = 'Mercurial'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Version.__init__(self)
- self.core = core
- self.datastore = datastore
-
- # path to hg directory for Bcfg2 repo
- hg_dir = "%s/.hg" % datastore
-
- # Read changeset from bcfg2 repo
- if os.path.isdir(hg_dir):
- self.get_revision()
- else:
- logger.error("%s is not present." % hg_dir)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- logger.debug("Initialized hg plugin with hg directory = %s" % hg_dir)
-
- def get_revision(self):
- """Read hg revision information for the Bcfg2 repository."""
- try:
- repo_path = "%s/" % self.datastore
- repo = hg.repository(ui.ui(), repo_path)
- tip = repo.changelog.tip()
- revision = repo.changelog.rev(tip)
- except:
- logger.error("Failed to read hg repository; disabling mercurial support")
- raise Bcfg2.Server.Plugin.PluginInitError
- return revision
-
diff --git a/build/lib/Bcfg2/Server/Plugins/Hostbase.py b/build/lib/Bcfg2/Server/Plugins/Hostbase.py
deleted file mode 100644
index 65992596d..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Hostbase.py
+++ /dev/null
@@ -1,585 +0,0 @@
-'''This file provides the Hostbase plugin. It manages dns/dhcp/nis host information'''
-__revision__ = '$Revision$'
-
-import os
-os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
-from lxml.etree import Element, SubElement
-import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
-from time import strftime
-from sets import Set
-from django.template import Context, loader
-from django.db import connection
-import re
-import cStringIO
-
-class Hostbase(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.Generator):
- """The Hostbase plugin handles host/network info."""
- name = 'Hostbase'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- filepath = '/my/adm/hostbase/files/bind'
-
- def __init__(self, core, datastore):
-
- self.ready = False
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- files = ['zone.tmpl', 'reversesoa.tmpl', 'named.tmpl', 'reverseappend.tmpl',
- 'dhcpd.tmpl', 'hosts.tmpl', 'hostsappend.tmpl']
- self.filedata = {}
- self.dnsservers = []
- self.dhcpservers = []
- self.templates = {'zone':loader.get_template('zone.tmpl'),
- 'reversesoa':loader.get_template('reversesoa.tmpl'),
- 'named':loader.get_template('named.tmpl'),
- 'namedviews':loader.get_template('namedviews.tmpl'),
- 'reverseapp':loader.get_template('reverseappend.tmpl'),
- 'dhcp':loader.get_template('dhcpd.tmpl'),
- 'hosts':loader.get_template('hosts.tmpl'),
- 'hostsapp':loader.get_template('hostsappend.tmpl'),
- }
- self.Entries['ConfigFile'] = {}
- self.__rmi__ = ['rebuildState']
- try:
- self.rebuildState(None)
- except:
- raise PluginInitError
-
- def FetchFile(self, entry, metadata):
- """Return prebuilt file data."""
- fname = entry.get('name').split('/')[-1]
- if not fname in self.filedata:
- raise PluginExecutionError
- perms = {'owner':'root', 'group':'root', 'perms':'644'}
- [entry.attrib.__setitem__(key, value) for (key, value) in perms.iteritems()]
- entry.text = self.filedata[fname]
-
- def BuildStructures(self, metadata):
- """Build hostbase bundle."""
- if metadata.hostname not in self.dnsservers or metadata.hostname not in self.dhcpservers:
- return []
- output = Element("Bundle", name='hostbase')
- if metadata.hostname in self.dnsservers:
- for configfile in self.Entries['ConfigFile']:
- if re.search('/etc/bind/', configfile):
- SubElement(output, "ConfigFile", name=configfile)
- if metadata.hostname in self.dhcpservers:
- SubElement(output, "ConfigFile", name="/etc/dhcp3/dhcpd.conf")
- return [output]
-
- def rebuildState(self, _):
- """Pre-cache all state information for hostbase config files
- callable as an XMLRPC function.
-
- """
- self.buildZones()
- self.buildDHCP()
- self.buildHosts()
- self.buildHostsLPD()
- self.buildPrinters()
- self.buildNetgroups()
- return True
-
- def buildZones(self):
- """Pre-build and stash zone files."""
- cursor = connection.cursor()
-
- cursor.execute("SELECT id, serial FROM hostbase_zone")
- zones = cursor.fetchall()
-
- for zone in zones:
- # update the serial number for all zone files
- todaydate = (strftime('%Y%m%d'))
- try:
- if todaydate == str(zone[1])[:8]:
- serial = zone[1] + 1
- else:
- serial = int(todaydate) * 100
- except (KeyError):
- serial = int(todaydate) * 100
- cursor.execute("""UPDATE hostbase_zone SET serial = \'%s\' WHERE id = \'%s\'""" % (str(serial), zone[0]))
-
- cursor.execute("SELECT * FROM hostbase_zone WHERE zone NOT LIKE \'%%.rev\'")
- zones = cursor.fetchall()
-
- iplist = []
- hosts = {}
-
- for zone in zones:
- zonefile = cStringIO.StringIO()
- externalzonefile = cStringIO.StringIO()
- cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
- INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- nameservers = cursor.fetchall()
- cursor.execute("""SELECT i.ip_addr FROM hostbase_zone_addresses z
- INNER JOIN hostbase_zoneaddress i ON z.zoneaddress_id = i.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- addresses = cursor.fetchall()
- cursor.execute("""SELECT m.priority, m.mx FROM hostbase_zone_mxs z
- INNER JOIN hostbase_mx m ON z.mx_id = m.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- mxs = cursor.fetchall()
- context = Context({
- 'zone': zone,
- 'nameservers': nameservers,
- 'addresses': addresses,
- 'mxs': mxs
- })
- zonefile.write(self.templates['zone'].render(context))
- externalzonefile.write(self.templates['zone'].render(context))
-
- querystring = """SELECT h.hostname, p.ip_addr,
- n.name, c.cname, m.priority, m.mx, n.dns_view
- FROM (((((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
- INNER JOIN hostbase_mx m ON m.id = x.mx_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE n.name LIKE '%%%%%s'
- AND h.status = 'active'
- ORDER BY h.hostname, n.name, p.ip_addr
- """ % zone[1]
- cursor.execute(querystring)
- zonehosts = cursor.fetchall()
- prevhost = (None, None, None, None)
- cnames = cStringIO.StringIO()
- cnamesexternal = cStringIO.StringIO()
- for host in zonehosts:
- if not host[2].split(".", 1)[1] == zone[1]:
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- cnames = cStringIO.StringIO()
- cnamesexternal = cStringIO.StringIO()
- continue
- if not prevhost[1] == host[1] or not prevhost[2] == host[2]:
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- cnames = cStringIO.StringIO()
- cnamesexternal = cStringIO.StringIO()
- zonefile.write("%-32s%-10s%-32s\n" %
- (host[2].split(".", 1)[0], 'A', host[1]))
- zonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- if host[6] == 'global':
- externalzonefile.write("%-32s%-10s%-32s\n" %
- (host[2].split(".", 1)[0], 'A', host[1]))
- externalzonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- elif not prevhost[5] == host[5]:
- zonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- if host[6] == 'global':
- externalzonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
-
- if host[3]:
- try:
- if host[3].split(".", 1)[1] == zone[1]:
- cnames.write("%-32s%-10s%-32s\n" %
- (host[3].split(".", 1)[0],
- 'CNAME',host[2].split(".", 1)[0]))
- if host[6] == 'global':
- cnamesexternal.write("%-32s%-10s%-32s\n" %
- (host[3].split(".", 1)[0],
- 'CNAME',host[2].split(".", 1)[0]))
- else:
- cnames.write("%-32s%-10s%-32s\n" %
- (host[3]+".",
- 'CNAME',
- host[2].split(".", 1)[0]))
- if host[6] == 'global':
- cnamesexternal.write("%-32s%-10s%-32s\n" %
- (host[3]+".",
- 'CNAME',
- host[2].split(".", 1)[0]))
-
- except:
- pass
- prevhost = host
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- zonefile.write("\n\n%s" % zone[9])
- externalzonefile.write("\n\n%s" % zone[9])
- self.filedata[zone[1]] = zonefile.getvalue()
- self.filedata[zone[1] + ".external"] = externalzonefile.getvalue()
- zonefile.close()
- externalzonefile.close()
- self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone[1])] = self.FetchFile
- self.Entries['ConfigFile']["%s/%s.external" % (self.filepath, zone[1])] = self.FetchFile
-
- cursor.execute("SELECT * FROM hostbase_zone WHERE zone LIKE \'%%.rev\' AND zone <> \'.rev\'")
- reversezones = cursor.fetchall()
-
- reversenames = []
- for reversezone in reversezones:
- cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
- INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
- WHERE z.zone_id = \'%s\'""" % reversezone[0])
- reverse_nameservers = cursor.fetchall()
-
- context = Context({
- 'inaddr': reversezone[1].rstrip('.rev'),
- 'zone': reversezone,
- 'nameservers': reverse_nameservers,
- })
-
- self.filedata[reversezone[1]] = self.templates['reversesoa'].render(context)
- self.filedata[reversezone[1] + '.external'] = self.templates['reversesoa'].render(context)
- self.filedata[reversezone[1]] += reversezone[9]
- self.filedata[reversezone[1] + '.external'] += reversezone[9]
-
- subnet = reversezone[1].split(".")
- subnet.reverse()
- reversenames.append((reversezone[1].rstrip('.rev'),".".join(subnet[1:])))
-
- for filename in reversenames:
- cursor.execute("""
- SELECT DISTINCT h.hostname, p.ip_addr, n.dns_view FROM ((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON n.ip_id = p.id
- WHERE p.ip_addr LIKE '%s%%%%' AND h.status = 'active' ORDER BY p.ip_addr
- """ % filename[1])
- reversehosts = cursor.fetchall()
- zonefile = cStringIO.StringIO()
- externalzonefile = cStringIO.StringIO()
- if len(filename[0].split(".")) == 2:
- originlist = []
- [originlist.append((".".join([ip[1].split(".")[2], filename[0]]),
- ".".join([filename[1], ip[1].split(".")[2]])))
- for ip in reversehosts
- if (".".join([ip[1].split(".")[2], filename[0]]),
- ".".join([filename[1], ip[1].split(".")[2]])) not in originlist]
- for origin in originlist:
- hosts = [(host[1].split("."), host[0])
- for host in reversehosts
- if host[1].rstrip('0123456789').rstrip('.') == origin[1]]
- hosts_external = [(host[1].split("."), host[0])
- for host in reversehosts
- if (host[1].rstrip('0123456789').rstrip('.') == origin[1]
- and host[2] == 'global')]
- context = Context({
- 'hosts': hosts,
- 'inaddr': origin[0],
- 'fileorigin': filename[0],
- })
- zonefile.write(self.templates['reverseapp'].render(context))
- context = Context({
- 'hosts': hosts_external,
- 'inaddr': origin[0],
- 'fileorigin': filename[0],
- })
- externalzonefile.write(self.templates['reverseapp'].render(context))
- else:
- originlist = [filename[0]]
- hosts = [(host[1].split("."), host[0])
- for host in reversehosts
- if (host[1].split("."), host[0]) not in hosts]
- hosts_external = [(host[1].split("."), host[0])
- for host in reversehosts
- if ((host[1].split("."), host[0]) not in hosts_external
- and host[2] == 'global')]
- context = Context({
- 'hosts': hosts,
- 'inaddr': filename[0],
- 'fileorigin': None,
- })
- zonefile.write(self.templates['reverseapp'].render(context))
- context = Context({
- 'hosts': hosts_external,
- 'inaddr': filename[0],
- 'fileorigin': None,
- })
- externalzonefile.write(self.templates['reverseapp'].render(context))
- self.filedata['%s.rev' % filename[0]] += zonefile.getvalue()
- self.filedata['%s.rev.external' % filename[0]] += externalzonefile.getvalue()
- zonefile.close()
- externalzonefile.close()
- self.Entries['ConfigFile']['%s/%s.rev' % (self.filepath, filename[0])] = self.FetchFile
- self.Entries['ConfigFile']['%s/%s.rev.external' % (self.filepath, filename[0])] = self.FetchFile
-
- ## here's where the named.conf file gets written
- context = Context({
- 'zones': zones,
- 'reverses': reversenames,
- })
- self.filedata['named.conf'] = self.templates['named'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf'] = self.FetchFile
- self.filedata['named.conf.views'] = self.templates['namedviews'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf.views'] = self.FetchFile
-
-
- def buildDHCP(self):
- """Pre-build dhcpd.conf and stash in the filedata table."""
-
- # fetches all the hosts with DHCP == True
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname, mac_addr, ip_addr
- FROM (hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip ip ON i.id = ip.interface_id
- WHERE i.dhcp=1 AND h.status='active' AND i.mac_addr <> ''
- AND i.mac_addr <> 'float' AND i.mac_addr <> 'unknown'
- ORDER BY h.hostname, i.mac_addr
- """)
-
- dhcphosts = cursor.fetchall()
- count = 0
- hosts = []
- hostdata = [dhcphosts[0][0], dhcphosts[0][1], dhcphosts[0][2]]
- if len(dhcphosts) > 1:
- for x in range(1, len(dhcphosts)):
- # if an interface has 2 or more ip addresses
- # adds the ip to the current interface
- if hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0] and hostdata[1] == dhcphosts[x][1]:
- hostdata[2] = ", ".join([hostdata[2], dhcphosts[x][2]])
- # if a host has 2 or more interfaces
- # writes the current one and grabs the next
- elif hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0]:
- hosts.append(hostdata)
- count += 1
- hostdata = ["-".join([dhcphosts[x][0], str(count)]), dhcphosts[x][1], dhcphosts[x][2]]
- # new host found, writes current data to the template
- else:
- hosts.append(hostdata)
- count = 0
- hostdata = [dhcphosts[x][0], dhcphosts[x][1], dhcphosts[x][2]]
- #makes sure the last of the data gets written out
- if hostdata not in hosts:
- hosts.append(hostdata)
-
- context = Context({
- 'hosts': hosts,
- 'numips': len(hosts),
- })
-
- self.filedata['dhcpd.conf'] = self.templates['dhcp'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/dhcpd.conf'] = self.FetchFile
-
-
- def buildHosts(self):
- """Pre-build and stash /etc/hosts file."""
-
- append_data = []
-
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname FROM hostbase_host ORDER BY hostname
- """)
- hostbase = cursor.fetchall()
- domains = [host[0].split(".", 1)[1] for host in hostbase]
- domains_set = Set(domains)
- domain_data = [(domain, domains.count(domain)) for domain in domains_set]
- domain_data.sort()
-
- cursor.execute("""
- SELECT ip_addr FROM hostbase_ip ORDER BY ip_addr
- """)
- ips = cursor.fetchall()
- three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
- for ip in ips]
- three_octets_set = Set(three_octets)
- three_octets_data = [(octet, three_octets.count(octet)) \
- for octet in three_octets_set]
- three_octets_data.sort()
-
- for three_octet in three_octets_data:
- querystring = """SELECT h.hostname, h.primary_user,
- p.ip_addr, n.name, c.cname
- FROM (((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE p.ip_addr LIKE \'%s.%%%%\' AND h.status = 'active'""" % three_octet[0]
- cursor.execute(querystring)
- tosort = list(cursor.fetchall())
- tosort.sort(lambda x, y: cmp(int(x[2].split(".")[-1]), int(y[2].split(".")[-1])))
- append_data.append((three_octet, tuple(tosort)))
-
- two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
- two_octets_set = Set(two_octets)
- two_octets_data = [(octet, two_octets.count(octet))
- for octet in two_octets_set]
- two_octets_data.sort()
-
- context = Context({
- 'domain_data': domain_data,
- 'three_octets_data': three_octets_data,
- 'two_octets_data': two_octets_data,
- 'three_octets': three_octets,
- 'num_ips': len(three_octets),
- })
-
- self.filedata['hosts'] = self.templates['hosts'].render(context)
-
- for subnet in append_data:
- ips = []
- simple = True
- namelist = [name.split('.', 1)[0] for name in [subnet[1][0][3]]]
- cnamelist = []
- if subnet[1][0][4]:
- cnamelist.append(subnet[1][0][4].split('.', 1)[0])
- simple = False
- appenddata = subnet[1][0]
- for ip in subnet[1][1:]:
- if appenddata[2] == ip[2]:
- namelist.append(ip[3].split('.', 1)[0])
- if ip[4]:
- cnamelist.append(ip[4].split('.', 1)[0])
- simple = False
- appenddata = ip
- else:
- if appenddata[0] == ip[0]:
- simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
- cnamelist, simple, appenddata[1]))
- appenddata = ip
- simple = True
- namelist = [ip[3].split('.', 1)[0]]
- cnamelist = []
- if ip[4]:
- cnamelist.append(ip[4].split('.', 1)[0])
- simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
- cnamelist, simple, appenddata[1]))
- context = Context({
- 'subnet': subnet[0],
- 'ips': ips,
- })
- self.filedata['hosts'] += self.templates['hostsapp'].render(context)
- self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile
-
- def buildPrinters(self):
- """The /mcs/etc/printers.data file"""
- header = """# This file is automatically generated. DO NOT EDIT IT!
-#
-Name Room User Type Notes
-============== ========== ============================== ======================== ====================
-"""
-
- cursor = connection.cursor()
- # fetches all the printers from the database
- cursor.execute("""
- SELECT printq, location, primary_user, comments
- FROM hostbase_host
- WHERE whatami='printer' AND printq <> '' AND status = 'active'
- ORDER BY printq
- """)
- printers = cursor.fetchall()
-
- printersfile = header
- for printer in printers:
- # splits up the printq line and gets the
- # correct description out of the comments section
- temp = printer[3].split('\n')
- for printq in re.split(',[ ]*', printer[0]):
- if len(temp) > 1:
- printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
- (printq, printer[1], printer[2], temp[1], temp[0]))
- else:
- printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
- (printq, printer[1], printer[2], '', printer[3]))
- self.filedata['printers.data'] = printersfile
- self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile
-
- def buildHostsLPD(self):
- """Creates the /mcs/etc/hosts.lpd file"""
-
- # this header needs to be changed to be more generic
- header = """+@machines
-+@all-machines
-achilles.ctd.anl.gov
-raven.ops.anl.gov
-seagull.hr.anl.gov
-parrot.ops.anl.gov
-condor.ops.anl.gov
-delphi.esh.anl.gov
-anlcv1.ctd.anl.gov
-anlvms.ctd.anl.gov
-olivia.ctd.anl.gov\n\n"""
-
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname FROM hostbase_host WHERE netgroup=\"red\" AND status = 'active'
- ORDER BY hostname""")
- redmachines = list(cursor.fetchall())
- cursor.execute("""
- SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE netgroup=\"red\" AND n.only=1 AND h.status = 'active'
- """)
- redmachines.extend(list(cursor.fetchall()))
- cursor.execute("""
- SELECT hostname FROM hostbase_host WHERE netgroup=\"win\" AND status = 'active'
- ORDER BY hostname""")
- winmachines = list(cursor.fetchall())
- cursor.execute("""
- SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE netgroup=\"win\" AND n.only=1 AND h.status = 'active'
- """)
- winmachines.__add__(list(cursor.fetchall()))
- hostslpdfile = header
- for machine in redmachines:
- hostslpdfile += machine[0] + "\n"
- hostslpdfile += "\n"
- for machine in winmachines:
- hostslpdfile += machine[0] + "\n"
- self.filedata['hosts.lpd'] = hostslpdfile
- self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile
-
-
- def buildNetgroups(self):
- """Makes the *-machine files"""
- header = """###################################################################
-# This file lists hosts in the '%s' machine netgroup, it is
-# automatically generated. DO NOT EDIT THIS FILE!
-#
-# Number of hosts in '%s' machine netgroup: %i
-#\n\n"""
-
- cursor = connection.cursor()
- # fetches all the hosts that with valid netgroup entries
- cursor.execute("""
- SELECT h.hostname, n.name, h.netgroup, n.only FROM ((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE h.netgroup <> '' AND h.netgroup <> 'none' AND h.status = 'active'
- ORDER BY h.netgroup, h.hostname
- """)
- nameslist = cursor.fetchall()
- # gets the first host and initializes the hash
- hostdata = nameslist[0]
- netgroups = {hostdata[2]:[hostdata[0]]}
- for row in nameslist:
- # if new netgroup, create it
- if row[2] not in netgroups:
- netgroups.update({row[2]:[]})
- # if it belongs in the netgroup and has multiple interfaces, put them in
- if hostdata[0] == row[0] and row[3]:
- netgroups[row[2]].append(row[1])
- hostdata = row
- # if its a new host, write the old one to the hash
- elif hostdata[0] != row[0]:
- netgroups[row[2]].append(row[0])
- hostdata = row
-
- for netgroup in netgroups:
- fileoutput = cStringIO.StringIO()
- fileoutput.write(header % (netgroup, netgroup, len(netgroups[netgroup])))
- for each in netgroups[netgroup]:
- fileoutput.write(each + "\n")
- self.filedata['%s-machines' % netgroup] = fileoutput.getvalue()
- fileoutput.close()
- self.Entries['ConfigFile']['/my/adm/hostbase/makenets/machines/%s-machines' % netgroup] = self.FetchFile
-
- cursor.execute("""
- UPDATE hostbase_host SET dirty=0
- """)
diff --git a/build/lib/Bcfg2/Server/Plugins/Metadata.py b/build/lib/Bcfg2/Server/Plugins/Metadata.py
deleted file mode 100644
index 81fd3e173..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Metadata.py
+++ /dev/null
@@ -1,809 +0,0 @@
-"""This file stores persistent metadata for the Bcfg2 Configuration Repository."""
-
-__revision__ = '$Revision$'
-
-import copy
-import fcntl
-import lxml.etree
-import os
-import os.path
-import socket
-import time
-import Bcfg2.Server.Plugin
-
-class MetadataConsistencyError(Exception):
- """This error gets raised when metadata is internally inconsistent."""
- pass
-
-class MetadataRuntimeError(Exception):
- """This error is raised when the metadata engine is called prior to reading enough data."""
- pass
-
-class ClientMetadata(object):
- """This object contains client metadata."""
- def __init__(self, client, profile, groups, bundles,
- aliases, addresses, categories, uuid, password, query):
- self.hostname = client
- self.profile = profile
- self.bundles = bundles
- self.aliases = aliases
- self.addresses = addresses
- self.groups = groups
- self.categories = categories
- self.uuid = uuid
- self.password = password
- self.connectors = []
- self.query = query
-
- def inGroup(self, group):
- """Test to see if client is a member of group."""
- return group in self.groups
-
- def group_in_category(self, category):
- for grp in self.query.all_groups_in_category(category):
- if grp in self.groups:
- return grp
- return ''
-
-class MetadataQuery(object):
- def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category):
- # resolver is set later
- self.by_name = by_name
- self.names_by_groups = by_groups
- self.names_by_profiles = by_profiles
- self.all_clients = get_clients
- self.all_groups = all_groups
- self.all_groups_in_category = all_groups_in_category
-
- def by_groups(self, groups):
- return [self.by_name(name) for name in self.names_by_groups(groups)]
-
- def by_profiles(self, profiles):
- return [self.by_name(name) for name in self.names_by_profiles(profiles)]
-
- def all(self):
- return [self.by_name(name) for name in self.all_clients()]
-
-class Metadata(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Metadata,
- Bcfg2.Server.Plugin.Statistics):
- """This class contains data for bcfg2 server metadata."""
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- name = "Metadata"
-
- def __init__(self, core, datastore, watch_clients=True):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Metadata.__init__(self)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- if watch_clients:
- try:
- core.fam.AddMonitor("%s/%s" % (self.data, "groups.xml"), self)
- core.fam.AddMonitor("%s/%s" % (self.data, "clients.xml"), self)
- except:
- print("Unable to add file monitor for groups.xml or clients.xml")
- raise Bcfg2.Server.Plugin.PluginInitError
- self.states = {}
- if watch_clients:
- self.states = {"groups.xml":False, "clients.xml":False}
- self.addresses = {}
- self.auth = dict()
- self.clients = {}
- self.aliases = {}
- self.groups = {}
- self.cgroups = {}
- self.public = []
- self.private = []
- self.profiles = []
- self.categories = {}
- self.bad_clients = {}
- self.uuid = {}
- self.secure = []
- self.floating = []
- self.passwords = {}
- self.session_cache = {}
- self.clientdata = None
- self.clientdata_original = None
- self.default = None
- self.pdirty = False
- self.extra = {'groups.xml':[], 'clients.xml':[]}
- self.password = core.password
- self.query = MetadataQuery(core.build_metadata,
- lambda:self.clients.keys(),
- self.get_client_names_by_groups,
- self.get_client_names_by_profiles,
- self.get_all_group_names,
- self.get_all_groups_in_category)
-
- @classmethod
- def init_repo(cls, repo, groups, os_selection, clients):
- path = '%s/%s' % (repo, cls.name)
- os.makedirs(path)
- open("%s/Metadata/groups.xml" %
- repo, "w").write(groups % os_selection)
- open("%s/Metadata/clients.xml" %
- repo, "w").write(clients % socket.getfqdn())
-
- def get_groups(self):
- '''return groups xml tree'''
- groups_tree = lxml.etree.parse(self.data + "/groups.xml")
- root = groups_tree.getroot()
- return root
-
- def search_group(self, group_name, tree):
- """Find a group."""
- for node in tree.findall("//Group"):
- if node.get("name") == group_name:
- return node
- for child in node:
- if child.tag == "Alias" and child.attrib["name"] == group_name:
- return node
- return None
-
- def add_group(self, group_name, attribs):
- """Add group to groups.xml."""
- tree = lxml.etree.parse(self.data + "/groups.xml")
- root = tree.getroot()
- element = lxml.etree.Element("Group", name=group_name)
- for key, val in attribs.iteritems():
- element.set(key, val)
- node = self.search_group(group_name, tree)
- if node != None:
- self.logger.error("Group \"%s\" already exists" % (group_name))
- raise MetadataConsistencyError
- root.append(element)
- group_tree = open(self.data + "/groups.xml","w")
- fd = group_tree.fileno()
- while True:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- continue
- else:
- break
- tree.write(group_tree)
- fcntl.lockf(fd, fcntl.LOCK_UN)
- group_tree.close()
-
- def update_group(self, group_name, attribs):
- """Update a groups attributes."""
- tree = lxml.etree.parse(self.data + "/groups.xml")
- root = tree.getroot()
- node = self.search_group(group_name, tree)
- if node == None:
- self.logger.error("Group \"%s\" not found" % (group_name))
- raise MetadataConsistencyError
- node.attrib.update(attribs)
- group_tree = open(self.data + "/groups.xml","w")
- fd = group_tree.fileno()
- while True:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- continue
- else:
- break
- tree.write(group_tree)
- fcntl.lockf(fd, fcntl.LOCK_UN)
- group_tree.close()
-
- def remove_group(self, group_name):
- """Remove a group."""
- tree = lxml.etree.parse(self.data + "/groups.xml")
- root = tree.getroot()
- node = self.search_group(group_name, tree)
- if node == None:
- self.logger.error("Client \"%s\" not found" % (group_name))
- raise MetadataConsistencyError
- root.remove(node)
- group_tree = open(self.data + "/groups.xml","w")
- fd = group_tree.fileno()
- while True:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- continue
- else:
- break
- tree.write(group_tree)
- fcntl.lockf(fd, fcntl.LOCK_UN)
- group_tree.close()
-
- def add_bundle(self, bundle_name):
- """Add bundle to groups.xml."""
- tree = lxml.etree.parse(self.data + "/groups.xml")
- root = tree.getroot()
- element = lxml.etree.Element("Bundle", name=bundle_name)
- node = self.search_group(bundle_name, tree)
- if node != None:
- self.logger.error("Bundle \"%s\" already exists" % (bundle_name))
- raise MetadataConsistencyError
- root.append(element)
- group_tree = open(self.data + "/groups.xml","w")
- fd = group_tree.fileno()
- while True:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- continue
- else:
- break
- tree.write(group_tree)
- fcntl.lockf(fd, fcntl.LOCK_UN)
- group_tree.close()
-
- def remove_bundle(self, bundle_name):
- """Remove a bundle."""
- tree = lxml.etree.parse(self.data + "/groups.xml")
- root = tree.getroot()
- node = self.search_group(bundle_name, tree)
- if node == None:
- self.logger.error("Bundle \"%s\" not found" % (bundle_name))
- raise MetadataConsistencyError
- root.remove(node)
- group_tree = open(self.data + "/groups.xml","w")
- fd = group_tree.fileno()
- while True:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- continue
- else:
- break
- tree.write(group_tree)
- fcntl.lockf(fd, fcntl.LOCK_UN)
- group_tree.close()
-
- def search_client(self, client_name, tree):
- """Find a client."""
- for node in tree.findall("//Client"):
- if node.get("name") == client_name:
- return node
- for child in node:
- if child.tag == "Alias" and child.attrib["name"] == client_name:
- return node
- return None
-
- def add_client(self, client_name, attribs):
- """Add client to clients.xml."""
- tree = lxml.etree.parse(self.data + "/clients.xml")
- root = tree.getroot()
- element = lxml.etree.Element("Client", name=client_name)
- for key, val in attribs.iteritems():
- element.set(key, val)
- node = self.search_client(client_name, tree)
- if node != None:
- self.logger.error("Client \"%s\" already exists" % (client_name))
- raise MetadataConsistencyError
- root.append(element)
- client_tree = open(self.data + "/clients.xml","w")
- fd = client_tree.fileno()
- while True:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- continue
- else:
- break
- tree.write(client_tree)
- fcntl.lockf(fd, fcntl.LOCK_UN)
- client_tree.close()
-
- def update_client(self, client_name, attribs):
- """Update a clients attributes."""
- tree = lxml.etree.parse(self.data + "/clients.xml")
- root = tree.getroot()
- node = self.search_client(client_name, tree)
- if node == None:
- self.logger.error("Client \"%s\" not found" % (client_name))
- raise MetadataConsistencyError
- node.attrib.update(attribs)
- client_tree = open(self.data + "/clients.xml","w")
- fd = client_tree.fileno()
- while True:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- continue
- else:
- break
- tree.write(client_tree)
- fcntl.lockf(fd, fcntl.LOCK_UN)
- client_tree.close()
-
- def HandleEvent(self, event):
- """Handle update events for data files."""
- filename = event.filename.split('/')[-1]
- if filename in ['groups.xml', 'clients.xml']:
- dest = filename
- elif filename in reduce(lambda x, y:x+y, self.extra.values()):
- if event.code2str() == 'exists':
- return
- dest = [key for key, value in self.extra.iteritems() if filename in value][0]
- else:
- return
- if event.code2str() == 'endExist':
- return
- try:
- xdata = lxml.etree.parse("%s/%s" % (self.data, dest))
- except lxml.etree.XMLSyntaxError:
- self.logger.error('Failed to parse %s' % (dest))
- return
- included = [ent.get('href') for ent in \
- xdata.findall('./{http://www.w3.org/2001/XInclude}include')]
- xdata_original = copy.deepcopy(xdata)
- if included:
- for name in included:
- if name not in self.extra[dest]:
- self.core.fam.AddMonitor("%s/%s" % (self.data, name), self)
- self.extra[dest].append(name)
- try:
- xdata.xinclude()
- except lxml.etree.XIncludeError:
- self.logger.error("Failed to process XInclude for file %s" % dest)
-
- if dest == 'clients.xml':
- self.clients = {}
- self.aliases = {}
- self.raliases = {}
- self.bad_clients = {}
- self.secure = []
- self.floating = []
- self.addresses = {}
- self.raddresses = {}
- self.clientdata_original = xdata_original
- self.clientdata = xdata
- for client in xdata.findall('.//Client'):
- clname = client.get('name').lower()
- if 'address' in client.attrib:
- caddr = client.get('address')
- if caddr in self.addresses:
- self.addresses[caddr].append(clname)
- else:
- self.addresses[caddr] = [clname]
- if clname not in self.raddresses:
- self.raddresses[clname] = set()
- self.raddresses[clname].add(caddr)
- if 'auth' in client.attrib:
- self.auth[client.get('name')] = client.get('auth',
- 'cert+password')
- if 'uuid' in client.attrib:
- self.uuid[client.get('uuid')] = clname
- if client.get('secure', 'false') == 'true':
- self.secure.append(clname)
- if client.get('location', 'fixed') == 'floating':
- self.floating.append(clname)
- if 'password' in client.attrib:
- self.passwords[clname] = client.get('password')
- for alias in [alias for alias in client.findall('Alias')\
- if 'address' in alias.attrib]:
- if alias.get('address') in self.addresses:
- self.addresses[alias.get('address')].append(clname)
- else:
- self.addresses[alias.get('address')] = [clname]
- if clname not in self.raddresses:
- self.raddresses[clname] = set()
- self.raddresses[clname].add(alias.get('address'))
- self.clients.update({clname: client.get('profile')})
- [self.aliases.update({alias.get('name'): clname}) \
- for alias in client.findall('Alias')]
- self.raliases[clname] = set()
- [self.raliases[clname].add(alias.get('name')) for alias \
- in client.findall('Alias')]
- elif dest == 'groups.xml':
- self.public = []
- self.private = []
- self.profiles = []
- self.groups = {}
- grouptmp = {}
- self.categories = {}
- for group in xdata.xpath('//Groups/Group') \
- + xdata.xpath('Group'):
- grouptmp[group.get('name')] = tuple([[item.get('name') for item in group.findall(spec)]
- for spec in ['./Bundle', './Group']])
- grouptmp[group.get('name')][1].append(group.get('name'))
- if group.get('default', 'false') == 'true':
- self.default = group.get('name')
- if group.get('profile', 'false') == 'true':
- self.profiles.append(group.get('name'))
- if group.get('public', 'false') == 'true':
- self.public.append(group.get('name'))
- elif group.get('public', 'true') == 'false':
- self.private.append(group.get('name'))
- if 'category' in group.attrib:
- self.categories[group.get('name')] = group.get('category')
- for group in grouptmp:
- # self.groups[group] => (bundles, groups, categories)
- self.groups[group] = (set(), set(), {})
- tocheck = [group]
- group_cat = self.groups[group][2]
- while tocheck:
- now = tocheck.pop()
- self.groups[group][1].add(now)
- if now in grouptmp:
- (bundles, groups) = grouptmp[now]
- for ggg in [ggg for ggg in groups if ggg not in self.groups[group][1]]:
- if ggg not in self.categories or \
- self.categories[ggg] not in self.groups[group][2]:
- self.groups[group][1].add(ggg)
- tocheck.append(ggg)
- if ggg in self.categories:
- group_cat[self.categories[ggg]] = ggg
- elif ggg in self.categories:
- self.logger.info("Group %s: %s cat-suppressed %s" % \
- (group,
- group_cat[self.categories[ggg]],
- ggg))
- [self.groups[group][0].add(bund) for bund in bundles]
- self.states[dest] = True
- if False not in self.states.values():
- # check that all client groups are real and complete
- real = self.groups.keys()
- for client in self.clients.keys():
- if self.clients[client] not in self.profiles:
- self.logger.error("Client %s set as nonexistent or incomplete group %s" \
- % (client, self.clients[client]))
- self.logger.error("Removing client mapping for %s" % (client))
- self.bad_clients[client] = self.clients[client]
- del self.clients[client]
- for bclient in self.bad_clients.keys():
- if self.bad_clients[bclient] in self.profiles:
- self.logger.info("Restored profile mapping for client %s" % bclient)
- self.clients[bclient] = self.bad_clients[bclient]
- del self.bad_clients[bclient]
-
- def set_profile(self, client, profile, addresspair):
- """Set group parameter for provided client."""
- self.logger.info("Asserting client %s profile to %s" % (client, profile))
- if False in self.states.values():
- raise MetadataRuntimeError
- if profile not in self.public:
- self.logger.error("Failed to set client %s to private group %s" % (client, profile))
- raise MetadataConsistencyError
- if client in self.clients:
- self.logger.info("Changing %s group from %s to %s" % (client, self.clients[client], profile))
- cli = self.clientdata_original.xpath('.//Client[@name="%s"]' % (client))
- cli[0].set('profile', profile)
- else:
- self.logger.info("Creating new client: %s, profile %s" % \
- (client, profile))
- if addresspair in self.session_cache:
- # we are working with a uuid'd client
- lxml.etree.SubElement(self.clientdata_original.getroot(),
- 'Client',
- name=self.session_cache[addresspair][1],
- uuid=client, profile=profile,
- address=addresspair[0])
- else:
- lxml.etree.SubElement(self.clientdata_original.getroot(),
- 'Client', name=client,
- profile=profile)
- self.clients[client] = profile
- self.write_back_clients()
-
- def write_back_clients(self):
- """Write changes to client.xml back to disk."""
- try:
- datafile = open("%s/%s" % (self.data, 'clients.xml.new'), 'w')
- except IOError:
- self.logger.error("Failed to write clients.xml.new")
- raise MetadataRuntimeError
- # prep data
- dataroot = self.clientdata_original.getroot()
- if hasattr(dataroot, 'iter'):
- items = dataroot.iter()
- else:
- items = dataroot.getchildren()
- for item in items:
- # no items have text data of any sort
- item.tail = None
- item.text = None
- newcontents = lxml.etree.tostring(dataroot, pretty_print=True)
-
- fd = datafile.fileno()
- while self.locked(fd) == True:
- pass
- try:
- datafile.write(newcontents)
- except:
- fcntl.lockf(fd, fcntl.LOCK_UN)
- self.logger.error("Metadata: Failed to write new clients data to clients.xml.new", exc_info=1)
- os.unlink("%s/%s" % (self.data, "clients.xml.new"))
- raise MetadataRuntimeError
- datafile.close()
-
- # check if clients.xml is a symlink
- clientsxml = "%s/%s" % (self.data, 'clients.xml')
- if os.path.islink(clientsxml):
- clientsxml = os.readlink(clientsxml)
-
- try:
- os.rename("%s/%s" % (self.data, 'clients.xml.new'), clientsxml)
- except:
- self.logger.error("Metadata: Failed to rename clients.xml.new")
- raise MetadataRuntimeError
-
- def locked(self, fd):
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError:
- return True
- return False
-
- def resolve_client(self, addresspair):
- """Lookup address locally or in DNS to get a hostname."""
- if addresspair in self.session_cache:
- (stamp, uuid) = self.session_cache[addresspair]
- if time.time() - stamp < 90:
- return self.session_cache[addresspair][1]
- address = addresspair[0]
- if address in self.addresses:
- if len(self.addresses[address]) != 1:
- self.logger.error("Address %s has multiple reverse assignments; a uuid must be used" % (address))
- raise MetadataConsistencyError
- return self.addresses[address][0]
- try:
- cname = socket.gethostbyaddr(address)[0].lower()
- if cname in self.aliases:
- return self.aliases[cname]
- return cname
- except socket.herror:
- warning = "address resolution error for %s" % (address)
- self.logger.warning(warning)
- raise MetadataConsistencyError
-
- def get_initial_metadata(self, client):
- """Return the metadata for a given client."""
- if False in self.states.values():
- raise MetadataRuntimeError
- client = client.lower()
- if client in self.aliases:
- client = self.aliases[client]
- if client in self.clients:
- profile = self.clients[client]
- (bundles, groups, categories) = self.groups[profile]
- else:
- if self.default == None:
- self.logger.error("Cannot set group for client %s; no default group set" % (client))
- raise MetadataConsistencyError
- self.set_profile(client, self.default, (None, None))
- profile = self.default
- [bundles, groups, categories] = self.groups[self.default]
- aliases = self.raliases.get(client, set())
- addresses = self.raddresses.get(client, set())
- newgroups = set(groups)
- newbundles = set(bundles)
- newcategories = {}
- newcategories.update(categories)
- if client in self.passwords:
- password = self.passwords[client]
- else:
- password = None
- uuids = [item for item, value in self.uuid.iteritems() if value == client]
- if uuids:
- uuid = uuids[0]
- else:
- uuid = None
- for group in self.cgroups.get(client, []):
- if group in self.groups:
- nbundles, ngroups, ncategories = self.groups[group]
- else:
- nbundles, ngroups, ncategories = ([], [group], {})
- [newbundles.add(b) for b in nbundles if b not in newbundles]
- [newgroups.add(g) for g in ngroups if g not in newgroups]
- newcategories.update(ncategories)
- return ClientMetadata(client, profile, newgroups, newbundles, aliases,
- addresses, newcategories, uuid, password, self.query)
-
- def get_all_group_names(self):
- all_groups = set()
- [all_groups.update(g[1]) for g in self.groups.values()]
- return all_groups
-
- def get_all_groups_in_category(self, category):
- all_groups = set()
- [all_groups.add(g) for g in self.categories \
- if self.categories[g] == category]
- return all_groups
-
- def get_client_names_by_profiles(self, profiles):
- return [client for client, profile in self.clients.iteritems() \
- if profile in profiles]
-
- def get_client_names_by_groups(self, groups):
- gprofiles = [profile for profile in self.profiles if \
- self.groups[profile][1].issuperset(groups)]
- return self.get_client_names_by_profiles(gprofiles)
-
- def merge_additional_groups(self, imd, groups):
- for group in groups:
- if group in self.categories and \
- self.categories[group] in imd.categories:
- continue
- nb, ng, _ = self.groups.get(group, (list(), [group], dict()))
- for b in nb:
- if b not in imd.bundles:
- imd.bundles.add(b)
- for g in ng:
- if g not in imd.groups:
- if g in self.categories and \
- self.categories[g] in imd.categories:
- continue
- if g in self.private:
- self.logger.error("Refusing to add dynamic membership in private group %s for client %s" % (g, imd.hostname))
- continue
- imd.groups.add(g)
-
- def merge_additional_data(self, imd, source, data):
- if not hasattr(imd, source):
- setattr(imd, source, data)
- imd.connectors.append(source)
-
- def validate_client_address(self, client, addresspair):
- """Check address against client."""
- address = addresspair[0]
- if client in self.floating:
- self.debug_log("Client %s is floating" % client)
- return True
- if address in self.addresses:
- if client in self.addresses[address]:
- self.debug_log("Client %s matches address %s" % (client, address))
- return True
- else:
- self.logger.error("Got request for non-float client %s from %s" \
- % (client, address))
- return False
- resolved = self.resolve_client(addresspair)
- if resolved.lower() == client.lower():
- return True
- else:
- self.logger.error("Got request for %s from incorrect address %s" \
- % (client, address))
- self.logger.error("Resolved to %s" % resolved)
- return False
-
- def AuthenticateConnection(self, cert, user, password, address):
- """This function checks auth creds."""
- if cert:
- id_method = 'cert'
- certinfo = dict([x[0] for x in cert['subject']])
- # look at cert.cN
- client = certinfo['commonName']
- self.debug_log("Got cN %s; using as client name" % client)
- auth_type = self.auth.get(client, 'cert+password')
- elif user == 'root':
- id_method = 'address'
- try:
- client = self.resolve_client(address)
- except MetadataConsistencyError:
- self.logger.error("Client %s failed to resolve; metadata problem" % (address[0]))
- return False
- else:
- id_method = 'uuid'
- # user maps to client
- if user not in self.uuid:
- client = user
- self.uuid[user] = user
- else:
- client = self.uuid[user]
-
- # we have the client name
- self.debug_log("Authenticating client %s" % client)
-
- # next we validate the address
- if id_method == 'uuid':
- addr_is_valid = True
- else:
- addr_is_valid = self.validate_client_address(client, address)
-
- if not addr_is_valid:
- return False
-
- if id_method == 'cert' and auth_type != 'cert+password':
- # we are done if cert+password not required
- return True
-
- if client not in self.passwords:
- if client in self.secure:
- self.logger.error("Client %s in secure mode but has no password" % (address[0]))
- return False
- if password != self.password:
- self.logger.error("Client %s used incorrect global password" % (address[0]))
- return False
- if client not in self.secure:
- if client in self.passwords:
- plist = [self.password, self.passwords[client]]
- else:
- plist = [self.password]
- if password not in plist:
- self.logger.error("Client %s failed to use either allowed password" % \
- (address[0]))
- return False
- else:
- # client in secure mode and has a client password
- if password != self.passwords[client]:
- self.logger.error("Client %s failed to use client password in secure mode" % \
- (address[0]))
- return False
- # populate the session cache
- if user != 'root':
- self.session_cache[address] = (time.time(), client)
- return True
-
- def process_statistics(self, meta, _):
- """Hook into statistics interface to toggle clients in bootstrap mode."""
- client = meta.hostname
- if client in self.auth and self.auth[client] == 'bootstrap':
- self.logger.info("Asserting client %s auth mode to cert" % client)
- cli = self.clientdata_original.xpath('.//Client[@name="%s"]' \
- % (client))
- cli[0].set('auth', 'cert')
- self.write_back_clients()
-
- def viz(self, hosts, bundles, key, colors):
- """Admin mode viz support."""
- groups_tree = lxml.etree.parse(self.data + "/groups.xml")
- try:
- groups_tree.xinclude()
- except lxml.etree.XincludeError:
- self.logger.error("Failed to process XInclude for file %s" % dest)
- groups = groups_tree.getroot()
- categories = {'default':'grey83'}
- instances = {}
- viz_str = ""
- egroups = groups.findall("Group") + groups.findall('.//Groups/Group')
- for group in egroups:
- if not group.get('category') in categories:
- categories[group.get('category')] = colors.pop()
- group.set('color', categories[group.get('category')])
- if None in categories:
- del categories[None]
- if hosts:
- clients = self.clients
- for client, profile in clients.iteritems():
- if profile in instances:
- instances[profile].append(client)
- else:
- instances[profile] = [client]
- for profile, clist in instances.iteritems():
- clist.sort()
- viz_str += '''\t"%s-instances" [ label="%s", shape="record" ];\n''' \
- % (profile, '|'.join(clist))
- viz_str += '''\t"%s-instances" -> "group-%s";\n''' \
- % (profile, profile)
- if bundles:
- bundles = []
- [bundles.append(bund.get('name')) \
- for bund in groups.findall('.//Bundle') \
- if bund.get('name') not in bundles]
- bundles.sort()
- for bundle in bundles:
- viz_str += '''\t"bundle-%s" [ label="%s", shape="septagon"];\n''' \
- % (bundle, bundle)
- gseen = []
- for group in egroups:
- if group.get('profile', 'false') == 'true':
- style = "filled, bold"
- else:
- style = "filled"
- gseen.append(group.get('name'))
- viz_str += '\t"group-%s" [label="%s", style="%s", fillcolor=%s];\n' % \
- (group.get('name'), group.get('name'), style, group.get('color'))
- if bundles:
- for bundle in group.findall('Bundle'):
- viz_str += '\t"group-%s" -> "bundle-%s";\n' % \
- (group.get('name'), bundle.get('name'))
- gfmt = '\t"group-%s" [label="%s", style="filled", fillcolor="grey83"];\n'
- for group in egroups:
- for parent in group.findall('Group'):
- if parent.get('name') not in gseen:
- viz_str += gfmt % (parent.get('name'), parent.get('name'))
- gseen.append(parent.get("name"))
- viz_str += '\t"group-%s" -> "group-%s" ;\n' % \
- (group.get('name'), parent.get('name'))
- if key:
- for category in categories:
- viz_str += '''\t"''' + category + '''" [label="''' + category + \
- '''", shape="record", style="filled", fillcolor=''' + \
- categories[category] + '''];\n'''
- return viz_str
diff --git a/build/lib/Bcfg2/Server/Plugins/NagiosGen.py b/build/lib/Bcfg2/Server/Plugins/NagiosGen.py
deleted file mode 100644
index cd6f843fb..000000000
--- a/build/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ /dev/null
@@ -1,114 +0,0 @@
-'''This module implements a Nagios configuration generator'''
-
-import glob
-import logging
-import os
-import re
-import socket
-
-import Bcfg2.Server.Plugin
-
-LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen')
-
-host_config_fmt = \
-'''
-define host{
- host_name %s
- alias %s
- address %s
-'''
-
-class NagiosGen(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Generator):
- """NagiosGen is a Bcfg2 plugin that dynamically generates
- Nagios configuration file based on Bcfg2 data.
- """
- name = 'NagiosGen'
- __version__ = '0.6'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- self.Entries = {'Path':
- {'/etc/nagiosgen.status' : self.createhostconfig,
- '/etc/nagios/nagiosgen.cfg': self.createserverconfig}}
-
- self.client_attrib = {'encoding':'ascii',
- 'owner':'root',
- 'group':'root',
- 'type':'file',
- 'perms':'0400'}
- self.server_attrib = {'encoding':'ascii',
- 'owner':'nagios',
- 'group':'nagios',
- 'type':'file',
- 'perms':'0440'}
-
- def createhostconfig(self, entry, metadata):
- """Build host specific configuration file."""
- host_address = socket.gethostbyname(metadata.hostname)
- host_groups = [grp for grp in metadata.groups if \
- os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
- host_config = host_config_fmt % \
- (metadata.hostname, metadata.hostname, host_address)
-
- if host_groups:
- host_config += ' hostgroups %s\n' % (",".join(host_groups))
-
- xtra = None
- if hasattr(metadata, 'Properties') and \
- 'NagiosGen.xml' in metadata.Properties:
- for q in (metadata.hostname, 'default'):
- xtra = metadata.Properties['NagiosGen.xml'].data.find(q)
- if xtra is not None:
- break
-
- if xtra is not None:
- directives = list(xtra)
- for item in directives:
- host_config += ' %-32s %s\n' % (item.tag, item.text)
-
- else:
- host_config += ' use default\n'
-
- host_config += '}\n'
- entry.text = host_config
- [entry.attrib.__setitem__(key, value) for \
- (key, value) in self.client_attrib.iteritems()]
- try:
- fileh = open("%s/%s-host.cfg" % \
- (self.data, metadata.hostname), 'w')
- fileh.write(host_config)
- fileh.close()
- except OSError, ioerr:
- LOGGER.error("Failed to write %s/%s-host.cfg" % \
- (self.data, metadata.hostname))
- LOGGER.error(ioerr)
-
- def createserverconfig(self, entry, _):
- """Build monolithic server configuration file."""
- host_configs = glob.glob('%s/*-host.cfg' % self.data)
- group_configs = glob.glob('%s/*-group.cfg' % self.data)
- host_data = ""
- group_data = ""
- for host in host_configs:
- hostfile = open(host, 'r')
- host_data += hostfile.read()
- hostfile.close()
- for group in group_configs:
- group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group)
- if host_data.find(group_name) != -1:
- groupfile = open(group, 'r')
- group_data += groupfile.read()
- groupfile.close()
- entry.text = group_data + host_data
- [entry.attrib.__setitem__(key, value) for \
- (key, value) in self.server_attrib.iteritems()]
- try:
- fileh = open("%s/nagiosgen.cfg" % (self.data), 'w')
- fileh.write(group_data + host_data)
- fileh.close()
- except OSError, ioerr:
- LOGGER.error("Failed to write %s/nagiosgen.cfg" % (self.data))
- LOGGER.error(ioerr)
diff --git a/build/lib/Bcfg2/Server/Plugins/Ohai.py b/build/lib/Bcfg2/Server/Plugins/Ohai.py
deleted file mode 100644
index 0f7c7187f..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Ohai.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import lxml.etree
-import os
-
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Ohai')
-
-import Bcfg2.Server.Plugin
-
-try:
- import json
-except:
- # FIXME: can be removed when server prereq is >= python 2.6
- # necessary for clients without the in-tree json module
- try:
- import simplejson as json
- except:
- logger.error("Unable to load any json modules. Make sure "
- "python-simplejson is installed.")
- raise ImportError
-
-
-class OhaiCache(object):
-
- def __init__(self, dirname):
- self.dirname = dirname
- self.cache = dict()
-
- def __setitem__(self, item, value):
- if value == None:
- # simply return if the client returned nothing
- return
- self.cache[item] = json.loads(value)
- file("%s/%s.json" % (self.dirname, item), 'w').write(value)
-
- def __getitem__(self, item):
- if item not in self.cache:
- try:
- data = open("%s/%s.json" % (self.dirname, item)).read()
- except:
- raise KeyError, item
- self.cache[item] = json.loads(data)
- return self.cache[item]
-
- def __iter__(self):
- data = self.cache.keys()
- data.extend([x[:-5] for x in os.listdir(self.dirname)])
- return data.__iter__()
-
-
-class Ohai(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Probing,
- Bcfg2.Server.Plugin.Connector):
- """The Ohai plugin is used to detect information about the client operating system."""
- name = 'Ohai'
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Probing.__init__(self)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai',
- interpreter='/bin/sh')
- self.probe.text = 'ohai'
- try:
- os.stat(self.data)
- except:
- os.makedirs(self.data)
- self.cache = OhaiCache(self.data)
-
- def GetProbes(self, meta, force=False):
- return [self.probe]
-
- def ReceiveData(self, meta, datalist):
- self.cache[meta.hostname] = datalist[0].text
-
- def get_additional_data(self, meta):
- if meta.hostname in self.cache:
- return self.cache[meta.hostname]
- return dict()
diff --git a/build/lib/Bcfg2/Server/Plugins/Packages.py b/build/lib/Bcfg2/Server/Plugins/Packages.py
deleted file mode 100644
index 194330723..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Packages.py
+++ /dev/null
@@ -1,869 +0,0 @@
-import cPickle
-import copy
-import gzip
-import tarfile
-import glob
-import logging
-import lxml.etree
-import os
-import re
-import sys
-import urllib2
-
-# FIXME: Remove when server python dep is 2.5 or greater
-if sys.version_info >= (2, 5):
- from hashlib import md5
-else:
- from md5 import md5
-
-import Bcfg2.Logger
-import Bcfg2.Server.Plugin
-
-# build sources.list?
-# caching for yum
-
-class NoData(Exception):
- pass
-
-class SomeData(Exception):
- pass
-
-logger = logging.getLogger('Packages')
-
-def source_from_xml(xsource):
- ret = dict([('rawurl', False), ('url', False)])
- for key, tag in [('groups', 'Group'), ('components', 'Component'),
- ('arches', 'Arch'), ('blacklist', 'Blacklist'),
- ('whitelist', 'Whitelist')]:
- ret[key] = [item.text for item in xsource.findall(tag)]
- # version and component need to both contain data for sources to work
- try:
- ret['version'] = xsource.find('Version').text
- except:
- ret['version'] = 'placeholder'
- if ret['components'] == []:
- ret['components'] = ['placeholder']
- try:
- if xsource.find('Recommended').text in ['True', 'true']:
- ret['recommended'] = True
- else:
- ret['recommended'] = False
- except:
- ret['recommended'] = False
- if xsource.find('RawURL') is not None:
- ret['rawurl'] = xsource.find('RawURL').text
- if not ret['rawurl'].endswith('/'):
- ret['rawurl'] += '/'
- else:
- ret['url'] = xsource.find('URL').text
- if not ret['url'].endswith('/'):
- ret['url'] += '/'
- return ret
-
-def _fetch_url(url):
- if '@' in url:
- mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
- if not mobj:
- raise ValueError
- user = mobj.group(2)
- passwd = mobj.group(3)
- url = mobj.group(1) + mobj.group(4)
- auth = urllib2.HTTPBasicAuthHandler(urllib2.HTTPPasswordMgrWithDefaultRealm())
- auth.add_password(None, url, user, passwd)
- urllib2.install_opener(urllib2.build_opener(auth))
- return urllib2.urlopen(url).read()
-
-class Source(object):
- basegroups = []
-
- def __init__(self, basepath, url, version, arches, components, groups, rawurl,
- blacklist, whitelist, recommended):
- self.basepath = basepath
- self.version = version
- self.components = components
- self.url = url
- self.rawurl = rawurl
- self.groups = groups
- self.arches = arches
- self.deps = dict()
- self.provides = dict()
- self.blacklist = set(blacklist)
- self.whitelist = set(whitelist)
- self.cachefile = '%s/cache-%s' % (self.basepath, md5(cPickle.dumps( \
- [self.version, self.components, self.url, \
- self.rawurl, self.groups, self.arches])).hexdigest())
- self.recommended = recommended
- self.url_map = []
-
- def load_state(self):
- pass
-
- def setup_data(self, force_update=False):
- should_read = True
- should_download = False
- if os.path.exists(self.cachefile):
- try:
- self.load_state()
- should_read = False
- except:
- logger.error("Cachefile %s load failed; falling back to file read"\
- % (self.cachefile))
- if should_read:
- try:
- self.read_files()
- except:
- logger.error("Packages: File read failed; falling back to file download")
- should_download = True
-
- if should_download or force_update:
- try:
- self.update()
- self.read_files()
- except:
- logger.error("Failed to update source", exc_info=1)
-
- def get_urls(self):
- return []
- urls = property(get_urls)
-
- def get_files(self):
- return [self.escape_url(url) for url in self.urls]
- files = property(get_files)
-
- def get_vpkgs(self, meta):
- agroups = ['global'] + [a for a in self.arches if a in meta.groups]
- vdict = dict()
- for agrp in agroups:
- for key, value in self.provides[agrp].iteritems():
- if key not in vdict:
- vdict[key] = set(value)
- else:
- vdict[key].update(value)
- return vdict
-
- def escape_url(self, url):
- return "%s/%s" % (self.basepath, url.replace('/', '@'))
-
- def file_init(self):
- pass
-
- def read_files(self):
- pass
-
- def update(self):
- for url in self.urls:
- logger.info("Packages: Updating %s" % url)
- fname = self.escape_url(url)
- try:
- data = _fetch_url(url)
- except ValueError:
- logger.error("Packages: Bad url string %s" % url)
- continue
- except urllib2.HTTPError, h:
- logger.error("Packages: Failed to fetch url %s. code=%s" \
- % (url, h.code))
- continue
- file(fname, 'w').write(data)
-
- def applies(self, metadata):
- return len([g for g in self.basegroups if g in metadata.groups]) != 0 and \
- len([g for g in metadata.groups if g in self.groups]) \
- == len(self.groups)
-
- def get_arches(self, metadata):
- return ['global'] + [a for a in self.arches if a in metadata.groups]
-
- def get_deps(self, metadata, pkgname):
- for arch in self.get_arches(metadata):
- if pkgname in self.deps[arch]:
- return self.deps[arch][pkgname]
- raise NoData
-
- def get_provides(self, metadata, required):
- for arch in self.get_arches(metadata):
- if required in self.provides[arch]:
- return self.provides[arch][required]
- raise NoData
-
- def is_package(self, metadata, _):
- return False
-
- def get_url_info(self):
- return {'groups': copy.copy(self.groups), \
- 'urls': [copy.deepcopy(url) for url in self.url_map]}
-
-class YUMSource(Source):
- xp = '{http://linux.duke.edu/metadata/common}'
- rp = '{http://linux.duke.edu/metadata/rpm}'
- rpo = '{http://linux.duke.edu/metadata/repo}'
- fl = '{http://linux.duke.edu/metadata/filelists}'
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
- ptype = 'yum'
-
- def __init__(self, basepath, url, version, arches, components, groups,
- rawurl, blacklist, whitelist, recommended):
- Source.__init__(self, basepath, url, version, arches, components,
- groups, rawurl, blacklist, whitelist, recommended)
- if not self.rawurl:
- self.baseurl = self.url + '%(version)s/%(component)s/%(arch)s/'
- else:
- self.baseurl = self.rawurl
- self.packages = dict()
- self.deps = dict([('global', dict())])
- self.provides = dict([('global', dict())])
- self.filemap = dict([(x, dict()) for x in ['global'] + self.arches])
- self.needed_paths = set()
- self.file_to_arch = dict()
-
- def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.packages, self.deps, self.provides,
- self.filemap, self.url_map), cache, 2)
- cache.close()
-
- def load_state(self):
- data = file(self.cachefile)
- (self.packages, self.deps, self.provides, \
- self.filemap, self.url_map) = cPickle.load(data)
-
- def get_urls(self):
- surls = list()
- self.url_map = []
- for arch in self.arches:
- usettings = [{'version': self.version, 'component':comp,
- 'arch':arch} for comp in self.components]
- for setting in usettings:
- setting['groups'] = self.groups
- setting['url'] = self.baseurl % setting
- self.url_map.append(copy.deepcopy(setting))
- surls.append((arch, [setting['url'] for setting in usettings]))
- urls = []
- for (sarch, surl_list) in surls:
- for surl in surl_list:
- if not surl.endswith('/'):
- surl += '/'
- rmdurl = surl + 'repodata/repomd.xml'
- try:
- repomd = _fetch_url(rmdurl)
- xdata = lxml.etree.XML(repomd)
- except ValueError:
- logger.error("Packages: Bad url string %s" % rmdurl)
- continue
- except urllib2.HTTPError, h:
- logger.error("Packages: Failed to fetch url %s. code=%s" \
- % (rmdurl, h.code))
- continue
- except:
- logger.error("Failed to process url %s" % rmdurl)
- continue
- for elt in xdata.findall(self.rpo + 'data'):
- if elt.get('type') not in ['filelists', 'primary']:
- continue
- floc = elt.find(self.rpo + 'location')
- fullurl = surl + floc.get('href')
- urls.append(fullurl)
- self.file_to_arch[self.escape_url(fullurl)] = sarch
- return urls
- urls = property(get_urls)
-
- def read_files(self):
- for fname in [f for f in self.files if f.endswith('primary.xml.gz')]:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_primary(fdata, farch)
- for fname in [f for f in self.files if f.endswith('filelists.xml.gz')]:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_filelist(fdata, farch)
- # merge data
- sdata = self.packages.values()
- self.packages['global'] = copy.deepcopy(sdata.pop())
- while sdata:
- self.packages['global'].intersection(sdata.pop())
-
- for key in self.packages:
- if key == 'global':
- continue
- self.packages[key] = self.packages['global'].difference(self.packages[key])
- self.save_state()
-
- def parse_filelist(self, data, arch):
- if arch not in self.filemap:
- self.filemap[arch] = dict()
- for pkg in data.findall(self.fl + 'package'):
- for fentry in [fe for fe in pkg.findall(self.fl + 'file') \
- if fe.text in self.needed_paths]:
- if fentry.text in self.filemap[arch]:
- self.filemap[arch][fentry.text].add(pkg.get('name'))
- else:
- self.filemap[arch][fentry.text] = set([pkg.get('name')])
-
- def parse_primary(self, data, arch):
- if arch not in self.packages:
- self.packages[arch] = set()
- if arch not in self.deps:
- self.deps[arch] = dict()
- if arch not in self.provides:
- self.provides[arch] = dict()
- for pkg in data.getchildren():
- if not pkg.tag.endswith('package'):
- continue
- pkgname = pkg.find(self.xp + 'name').text
- self.packages[arch].add(pkgname)
-
- pdata = pkg.find(self.xp + 'format')
- pre = pdata.find(self.rp + 'requires')
- self.deps[arch][pkgname] = set()
- for entry in pre.getchildren():
- self.deps[arch][pkgname].add(entry.get('name'))
- if entry.get('name').startswith('/'):
- self.needed_paths.add(entry.get('name'))
- pro = pdata.find(self.rp + 'provides')
- if pro != None:
- for entry in pro.getchildren():
- prov = entry.get('name')
- if prov not in self.provides[arch]:
- self.provides[arch][prov] = list()
- self.provides[arch][prov].append(pkgname)
-
- def is_package(self, metadata, item):
- arch = [a for a in self.arches if a in metadata.groups]
- if not arch:
- return False
- return (item in self.packages['global'] or item in self.packages[arch[0]]) and \
- item not in self.blacklist and \
- ((len(self.whitelist) == 0) or item in self.whitelist)
-
- def get_vpkgs(self, metadata):
- rv = Source.get_vpkgs(self, metadata)
- for arch, fmdata in self.filemap.iteritems():
- if arch not in metadata.groups and arch != 'global':
- continue
- for filename, pkgs in fmdata.iteritems():
- rv[filename] = pkgs
- return rv
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('rpmlib')])
- unknown.difference_update(filtered)
-
-class APTSource(Source):
- basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
- ptype = 'deb'
-
- def __init__(self, basepath, url, version, arches, components, groups,
- rawurl, blacklist, whitelist, recommended):
- Source.__init__(self, basepath, url, version, arches, components, groups,
- rawurl, blacklist, whitelist, recommended)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url, 'version': self.version, \
- 'components': self.components, 'arches': self.arches, 'groups': self.groups}]
-
- def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = file(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- return ["%sdists/%s/%s/binary-%s/Packages.gz" % \
- (self.url, self.version, part, arch) for part in self.components \
- for arch in self.arches]
- else:
- return ["%sPackages.gz" % (self.rawurl)]
- urls = property(get_urls)
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
- for fname in self.files:
- if not self.rawurl:
- barch = [x for x in fname.split('@') if x.startswith('binary-')][0][7:]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- reader = gzip.GzipFile(fname)
- except:
- print("Failed to read file %s" % fname)
- raise
- for line in reader.readlines():
- words = line.strip().split(':', 1)
- if words[0] == 'Package':
- pkgname = words[1].strip().rstrip()
- self.pkgnames.add(pkgname)
- bdeps[barch][pkgname] = []
- elif words[0] in depfnames:
- vindex = 0
- for dep in words[1].split(','):
- if '|' in dep:
- cdeps = [re.sub('\s+', '', re.sub('\(.*\)', '', cdep)) for cdep in dep.split('|')]
- dyn_dname = "choice-%s-%s-%s" % (pkgname, barch, vindex)
- vindex += 1
- bdeps[barch][pkgname].append(dyn_dname)
- bprov[barch][dyn_dname] = set(cdeps)
- else:
- raw_dep = re.sub('\(.*\)', '', dep)
- raw_dep = raw_dep.rstrip().strip()
- bdeps[barch][pkgname].append(raw_dep)
- elif words[0] == 'Provides':
- for pkg in words[1].split(','):
- dname = pkg.rstrip().strip()
- if dname not in bprov[barch]:
- bprov[barch][dname] = set()
- bprov[barch][dname].add(pkgname)
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in bprov.values():
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return pkg in self.pkgnames and \
- pkg not in self.blacklist and \
- (len(self.whitelist) == 0 or pkg in self.whitelist)
-
-class PACSource(Source):
- basegroups = ['arch', 'parabola']
- ptype = 'pacman'
-
- def __init__(self, basepath, url, version, arches, components, groups,
- rawurl, blacklist, whitelist, recommended):
- Source.__init__(self, basepath, url, version, arches, components, groups,
- rawurl, blacklist, whitelist, recommended)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url, 'version': self.version, \
- 'components': self.components, 'arches': self.arches, 'groups': self.groups}]
-
- def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = file(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- return ["%s/%s/os/%s/%s.db.tar.gz" % \
- (self.url, part, arch, part) for part in self.components \
- for arch in self.arches]
- else:
- raise Exception("PACSource : RAWUrl not supported (yet)")
- urls = property(get_urls)
-
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
-
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
-
- for fname in self.files:
- if not self.rawurl:
- barch = [x for x in fname.split('@') if x in self.arches][0]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
-
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- print "try to read : " + fname
- tar = tarfile.open(fname, "r")
- reader = gzip.GzipFile(fname)
- except:
- print("Failed to read file %s" % fname)
- raise
-
- for tarinfo in tar:
- if tarinfo.isdir():
- self.pkgnames.add(tarinfo.name.rsplit("-",2)[0])
- print "added : " + tarinfo.name.rsplit("-",2)[0]
- tar.close()
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in bprov.values():
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return pkg in self.pkgnames and \
- pkg not in self.blacklist and \
- (len(self.whitelist) == 0 or pkg in self.whitelist)
-
-class Packages(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.StructureValidator,
- Bcfg2.Server.Plugin.Generator,
- Bcfg2.Server.Plugin.Connector):
- name = 'Packages'
- conflicts = ['Pkgmgr']
- experimental = True
- __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.StructureValidator.__init__(self)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.cachepath = self.data + '/cache'
- self.sentinels = set()
- self.sources = []
- self.disableResolver = False
- self.disableMetaData = False
- self.virt_pkgs = dict()
-
- if not os.path.exists(self.cachepath):
- # create cache directory if needed
- os.makedirs(self.cachepath)
- self._load_config()
-
- def get_relevant_groups(self, meta):
- mgrps = list(set([g for g in meta.groups for s in self.get_matching_sources(meta) \
- if g in s.basegroups or g in s.groups or g in s.arches]))
- mgrps.sort()
- return tuple(mgrps)
-
- def build_vpkgs_entry(self, meta):
- # build single entry for all matching sources
- mgrps = self.get_relevant_groups(meta)
- vpkgs = dict()
- for source in self.get_matching_sources(meta):
- s_vpkgs = source.get_vpkgs(meta)
- for name, prov_set in s_vpkgs.iteritems():
- if name not in vpkgs:
- vpkgs[name] = set(prov_set)
- else:
- vpkgs[name].update(prov_set)
- return vpkgs
-
- def get_matching_sources(self, meta):
- return [s for s in self.sources if s.applies(meta)]
-
- def HandlesEntry(self, entry, metadata):
- if [x for x in metadata.groups if x in self.sentinels] \
- and entry.tag == 'Package':
- return True
- return False
-
- def HandleEntry(self, entry, metadata):
- entry.set('version', 'auto')
- for source in self.sources:
- if [x for x in metadata.groups if x in source.basegroups]:
- entry.set('type', source.ptype)
-
- def complete(self, meta, input_requirements, debug=False):
- '''Build the transitive closure of all package dependencies
-
- Arguments:
- meta - client metadata instance
- packages - set of package names
- debug - print out debug information for the decision making process
- returns => (set(packages), set(unsatisfied requirements), package type)
- '''
- sources = self.get_matching_sources(meta)
- # reverse list so that priorities correspond to file order
- sources.reverse()
- if len(sources) == 0:
- self.logger.error("Packages: No matching sources for client %s; improper group memberships?" % (meta.hostname))
- return set(), set(), 'failed'
- ptype = set([s.ptype for s in sources])
- if len(ptype) < 1:
- return set(), set(), 'failed'
-
- # setup vpkg cache
- pgrps = self.get_relevant_groups(meta)
- if pgrps not in self.virt_pkgs:
- self.virt_pkgs[pgrps] = self.build_vpkgs_entry(meta)
- vpkg_cache = self.virt_pkgs[pgrps]
-
- # unclassified is set of unsatisfied requirements (may be pkg for vpkg)
- unclassified = set(input_requirements)
- vpkgs = set()
- both = set()
- pkgs = set(input_requirements)
-
- packages = set()
- examined = set()
- unknown = set()
-
- final_pass = False
- really_done = False
- # do while unclassified or vpkgs or both or pkgs
- while unclassified or pkgs or both or final_pass:
- #print len(unclassified), len(pkgs), len(both), len(vpkgs), final_pass
- if really_done:
- break
- if len(unclassified) + len(pkgs) + len(both) == 0:
- # one more pass then exit
- really_done = True
-
- while unclassified:
- current = unclassified.pop()
- examined.add(current)
- is_pkg = True in [source.is_package(meta, current) for source in sources]
- is_vpkg = current in vpkg_cache
-
- if is_pkg and is_vpkg:
- both.add(current)
- elif is_pkg and not is_vpkg:
- pkgs.add(current)
- elif is_vpkg and not is_pkg:
- vpkgs.add(current)
- elif not is_vpkg and not is_pkg:
- unknown.add(current)
-
- while pkgs:
- # direct packages; current can be added, and all deps should be resolved
- current = pkgs.pop()
- if debug:
- self.logger.debug("Packages: handling package requirement %s" % (current))
- deps = ()
- for source in sources:
- if source.is_package(meta, current):
- try:
- deps = source.get_deps(meta, current)
- break
- except:
- continue
- packages.add(current)
- newdeps = set(deps).difference(examined)
- if debug and newdeps:
- self.logger.debug("Packages: Package %s added requirements %s" % (current, newdeps))
- unclassified.update(newdeps)
-
- satisfied_vpkgs = set()
- for current in vpkgs:
- # virtual dependencies, satisfied if one of N in the config, or can be forced if only one provider
- if len(vpkg_cache[current]) == 1:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied by %s" % (current, vpkg_cache[current]))
- unclassified.update(vpkg_cache[current].difference(examined))
- satisfied_vpkgs.add(current)
- elif [item for item in vpkg_cache[current] if item in packages]:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied by %s" % (current, [item for item in vpkg_cache[current] if item in packages]))
- satisfied_vpkgs.add(current)
- vpkgs.difference_update(satisfied_vpkgs)
-
- satisfied_both = set()
- for current in both:
- # packages that are both have virtual providers as well as a package with that name
- # allow use of virt through explicit specification, then fall back to forcing current on last pass
- if [item for item in vpkg_cache[current] if item in packages]:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied by %s" % (current, [item for item in vpkg_cache[current] if item in packages]))
- satisfied_both.add(current)
- elif current in input_requirements or final_pass:
- pkgs.add(current)
- satisfied_both.add(current)
- both.difference_update(satisfied_both)
-
- if len(unclassified) + len(pkgs) == 0:
- final_pass = True
- else:
- final_pass = False
-
- for source in sources:
- source.filter_unknown(unknown)
-
- return packages, unknown, ptype.pop()
-
- def validate_structures(self, meta, structures):
- '''Ensure client configurations include all needed prerequisites
-
- Arguments:
- meta - client metadata instance
- structures - a list of structure-stage entry combinations
- '''
- if self.disableResolver: return # Config requests no resolver
-
- initial = set([pkg.get('name') for struct in structures \
- for pkg in struct.findall('Package') +
- struct.findall('BoundPackage')])
- news = lxml.etree.Element('Independent')
- packages, unknown, ptype = self.complete(meta, initial,
- debug=self.debug_flag)
- if unknown:
- self.logger.info("Got unknown entries")
- self.logger.info(list(unknown))
- newpkgs = list(packages.difference(initial))
- newpkgs.sort()
- for pkg in newpkgs:
- lxml.etree.SubElement(news, 'BoundPackage', name=pkg,
- type=ptype, version='auto', origin='Packages')
- structures.append(news)
-
- def make_non_redundant(self, meta, plname=None, plist=None):
- '''build a non-redundant version of a list of packages
-
- Arguments:
- meta - client metadata instance
- plname - name of file containing a list of packages
- '''
- if plname is not None:
- pkgnames = set([x.strip() for x in open(plname).readlines()])
- elif plist is not None:
- pkgnames = set(plist)
- redundant = set()
- sources = self.get_matching_sources(meta)
- for source in sources:
- for pkgname in pkgnames:
- if source.is_pkg(meta, current):
- try:
- deps = source.get_deps(meta, pkgname)
- except:
- continue
- for rpkg in deps:
- if rpkg in pkgnames:
- redundant.add(rpkg)
- return pkgnames.difference(redundant), redundant
-
- def Refresh(self):
- '''Packages.Refresh() => True|False\nReload configuration specification and download sources\n'''
- self._load_config(force_update=True)
- return True
-
- def Reload(self):
- '''Packages.Refresh() => True|False\nReload configuration specification and sources\n'''
- self._load_config()
- return True
-
- def _load_config(self, force_update=False):
- '''
- Load the configuration data and setup sources
-
- Keyword args:
- force_update Force downloading repo data
- '''
- self.virt_pkgs = dict()
- try:
- xdata = lxml.etree.parse(self.data + '/config.xml')
- xdata.xinclude()
- xdata = xdata.getroot()
- except (lxml.etree.XIncludeError, \
- lxml.etree.XMLSyntaxError), xmlerr:
- self.logger.error("Package: Error processing xml: %s" % xmlerr)
- raise Bcfg2.Server.Plugin.PluginInitError
- except IOError:
- self.logger.error("Failed to read Packages configuration. Have" +
- " you created your config.xml file?")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- # Load Packages config
- config = xdata.xpath('//Sources/Config')
- if config:
- if config[0].get("resolver", "enabled").lower() == "disabled":
- self.logger.info("Packages: Resolver disabled")
- self.disableResolver = True
- if config[0].get("metadata", "enabled").lower() == "disabled":
- self.logger.info("Packages: Metadata disabled")
- self.disableResolver = True
- self.disableMetaData = True
-
- self.sentinels = set()
- self.sources = []
- for s in xdata.findall('.//APTSource'):
- self.sources.append(APTSource(self.cachepath, **source_from_xml(s)))
- for s in xdata.findall('.//YUMSource'):
- self.sources.append(YUMSource(self.cachepath, **source_from_xml(s)))
- for s in xdata.findall('.//PACSource'):
- self.sources.append(PACSource(self.cachepath, **source_from_xml(s)))
-
- cachefiles = []
- for source in self.sources:
- cachefiles.append(source.cachefile)
- if not self.disableMetaData: source.setup_data(force_update)
- self.sentinels.update(source.basegroups)
- for cfile in glob.glob("%s/cache-*" % self.cachepath):
- if cfile not in cachefiles:
- os.unlink(cfile)
-
- def get_additional_data(self, meta):
- sdata = []
- [sdata.extend(copy.deepcopy(src.url_map)) for src in self.get_matching_sources(meta)]
- return dict(sources=sdata)
diff --git a/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py
deleted file mode 100644
index b58a7c91d..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py
+++ /dev/null
@@ -1,155 +0,0 @@
-'''This module implements a package management scheme for all images'''
-__revision__ = '$Revision$'
-
-import logging
-import re
-import Bcfg2.Server.Plugin
-
-logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
-
-class FuzzyDict(dict):
- fuzzy = re.compile('(?P<name>.*):(?P<alist>\S+(,\S+)*)')
- def __getitem__(self, key):
- if isinstance(key, str):
- mdata = self.fuzzy.match(key)
- if mdata:
- return dict.__getitem__(self, mdata.groupdict()['name'])
- else:
- print "got non-string key %s" % str(key)
- return dict.__getitem__(self, key)
-
- def has_key(self, key):
- if isinstance(key, str):
- mdata = self.fuzzy.match(key)
- if self.fuzzy.match(key):
- return dict.has_key(self, mdata.groupdict()['name'])
- return dict.has_key(self, key)
-
- def get(self, key, default=None):
- try:
- return self.__getitem__(key)
- except:
- if default:
- return default
- raise
-
-class PNode(Bcfg2.Server.Plugin.INode):
- """PNode has a list of packages available at a particular group intersection."""
- splitters = {'rpm':re.compile('^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
- '(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'),
- 'encap':re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')}
- ignore = ['Package']
-
- def Match(self, metadata, data):
- """Return a dictionary of package mappings."""
- if self.predicate(metadata):
- for key in self.contents:
- try:
- data[key].update(self.contents[key])
- except:
- data[key] = FuzzyDict()
- data[key].update(self.contents[key])
- for child in self.children:
- child.Match(metadata, data)
-
- def __init__(self, data, pdict, parent=None):
- # copy local attributes to all child nodes if no local attribute exists
- if not pdict.has_key('Package'):
- pdict['Package'] = set()
- for child in data.getchildren():
- for attr in [key for key in data.attrib.keys() \
- if key != 'name' and not child.attrib.has_key(key)]:
- try:
- child.set(attr, data.get(attr))
- except:
- # don't fail on things like comments and other immutable elements
- pass
- Bcfg2.Server.Plugin.INode.__init__(self, data, pdict, parent)
- if not self.contents.has_key('Package'):
- self.contents['Package'] = FuzzyDict()
- for pkg in data.findall('./Package'):
- if pkg.attrib.has_key('name') and pkg.get('name') not in pdict['Package']:
- pdict['Package'].add(pkg.get('name'))
- if pkg.get('name') != None:
- self.contents['Package'][pkg.get('name')] = {}
- if pkg.getchildren():
- self.contents['Package'][pkg.get('name')]['__children__'] \
- = pkg.getchildren()
- if pkg.attrib.has_key('simplefile'):
- pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
- self.contents['Package'][pkg.get('name')].update(pkg.attrib)
- else:
- if pkg.attrib.has_key('file'):
- if pkg.attrib.has_key('multiarch'):
- archs = pkg.get('multiarch').split()
- srcs = pkg.get('srcs', pkg.get('multiarch')).split()
- url = ' '.join(["%s/%s" % (pkg.get('uri'), pkg.get('file') % {'src':srcs[idx], 'arch':archs[idx]})
- for idx in range(len(archs))])
- pkg.set('url', url)
- else:
- pkg.set('url', '%s/%s' % (pkg.get('uri'), pkg.get('file')))
- if self.splitters.has_key(pkg.get('type')) and pkg.get('file') != None:
- mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
- if not mdata:
- logger.error("Failed to match pkg %s" % pkg.get('file'))
- continue
- pkgname = mdata.group('name')
- self.contents['Package'][pkgname] = mdata.groupdict()
- self.contents['Package'][pkgname].update(pkg.attrib)
- if pkg.attrib.get('file'):
- self.contents['Package'][pkgname]['url'] = pkg.get('url')
- self.contents['Package'][pkgname]['type'] = pkg.get('type')
- if pkg.get('verify'):
- self.contents['Package'][pkgname]['verify'] = pkg.get('verify')
- if pkg.get('multiarch'):
- self.contents['Package'][pkgname]['multiarch'] = pkg.get('multiarch')
- if pkgname not in pdict['Package']:
- pdict['Package'].add(pkgname)
- if pkg.getchildren():
- self.contents['Package'][pkgname]['__children__'] = pkg.getchildren()
- else:
- self.contents['Package'][pkg.get('name')].update(pkg.attrib)
-
-
-class PkgSrc(Bcfg2.Server.Plugin.XMLSrc):
- """PkgSrc files contain a PNode hierarchy that returns matching package entries."""
- __node__ = PNode
- __cacheobj__ = FuzzyDict
-
-class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
- """This is a generator that handles package assignments."""
- name = 'Pkgmgr'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- __child__ = PkgSrc
- __element__ = 'Package'
-
- def HandleEvent(self, event):
- '''Handle events and update dispatch table'''
- Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event)
- for src in self.entries.values():
- for itype, children in src.items.iteritems():
- for child in children:
- try:
- self.Entries[itype][child] = self.BindEntry
- except KeyError:
- self.Entries[itype] = FuzzyDict([(child,
- self.BindEntry)])
-
- def BindEntry(self, entry, metadata):
- """Bind data for entry, and remove instances that are not requested."""
- pname = entry.get('name')
- Bcfg2.Server.Plugin.PrioDir.BindEntry(self, entry, metadata)
- if entry.findall('Instance'):
- mdata = FuzzyDict.fuzzy.match(pname)
- if mdata:
- arches = mdata.group('alist').split(',')
- [entry.remove(inst) for inst in \
- entry.findall('Instance') \
- if inst.get('arch') not in arches]
-
- def HandlesEntry(self, entry, metadata):
- return entry.tag == 'Package' and entry.get('name').split(':')[0] in self.Entries['Package'].keys()
-
- def HandleEntry(self, entry, metadata):
- self.BindEntry(entry, metadata)
diff --git a/build/lib/Bcfg2/Server/Plugins/Probes.py b/build/lib/Bcfg2/Server/Plugins/Probes.py
deleted file mode 100644
index c00185732..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Probes.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import lxml.etree
-import re
-
-import Bcfg2.Server.Plugin
-
-specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)")
-probe_matcher = re.compile("(.*/)?(?P<basename>\S+)")
-
-class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
- ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$")
- def __init__(self, path, fam, encoding, plugin_name):
- fpattern = '[0-9A-Za-z_\-]+'
- self.plugin_name = plugin_name
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- Bcfg2.Server.Plugin.SpecificData,
- encoding)
- fam.AddMonitor(path, self)
- self.bangline = re.compile('^#!(?P<interpreter>.*)$')
-
- def HandleEvent(self, event):
- if event.filename != self.path:
- return self.handle_event(event)
-
- def get_probe_data(self, metadata):
- ret = []
- build = dict()
- candidates = self.get_matching(metadata)
- candidates.sort(lambda x, y: cmp(x.specific, y.specific))
- for entry in candidates:
- rem = specific_probe_matcher.match(entry.name)
- if not rem:
- rem = probe_matcher.match(entry.name)
- pname = rem.group('basename')
- if pname not in build:
- build[pname] = entry
-
- for (name, entry) in build.iteritems():
- probe = lxml.etree.Element('probe')
- probe.set('name', name.split('/')[-1])
- probe.set('source', self.plugin_name)
- probe.text = entry.data
- match = self.bangline.match(entry.data.split('\n')[0])
- if match:
- probe.set('interpreter', match.group('interpreter'))
- else:
- probe.set('interpreter', '/bin/sh')
- ret.append(probe)
- return ret
-
-class Probes(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Probing,
- Bcfg2.Server.Plugin.Connector):
- """A plugin to gather information from a client machine."""
- name = 'Probes'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.Probing.__init__(self)
-
- try:
- self.probes = ProbeSet(self.data, core.fam, core.encoding,
- self.name)
- except:
- raise Bcfg2.Server.Plugin.PluginInitError
-
- self.probedata = dict()
- self.cgroups = dict()
- self.load_data()
-
- def write_data(self):
- """Write probe data out for use with bcfg2-info."""
- top = lxml.etree.Element("Probed")
- for client, probed in self.probedata.iteritems():
- cx = lxml.etree.SubElement(top, 'Client', name=client)
- for probe in probed:
- lxml.etree.SubElement(cx, 'Probe', name=probe,
- value=self.probedata[client][probe])
- for group in self.cgroups[client]:
- lxml.etree.SubElement(cx, "Group", name=group)
- data = lxml.etree.tostring(top, encoding='UTF-8', xml_declaration=True,
- pretty_print='true')
- try:
- datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w')
- except IOError:
- self.logger.error("Failed to write probed.xml")
- datafile.write(data)
-
- def load_data(self):
- try:
- data = lxml.etree.parse(self.data + '/probed.xml').getroot()
- except:
- self.logger.error("Failed to read file probed.xml")
- return
- self.probedata = {}
- self.cgroups = {}
- for client in data.getchildren():
- self.probedata[client.get('name')] = {}
- self.cgroups[client.get('name')]=[]
- for pdata in client:
- if (pdata.tag == 'Probe'):
- self.probedata[client.get('name')][pdata.get('name')] = pdata.get('value')
- elif (pdata.tag == 'Group'):
- self.cgroups[client.get('name')].append(pdata.get('name'))
-
- def GetProbes(self, meta, force=False):
- """Return a set of probes for execution on client."""
- return self.probes.get_probe_data(meta)
-
- def ReceiveData(self, client, datalist):
- self.cgroups[client.hostname] = []
- self.probedata[client.hostname] = {}
- for data in datalist:
- self.ReceiveDataItem(client, data)
- self.write_data()
-
- def ReceiveDataItem(self, client, data):
- """Receive probe results pertaining to client."""
- if not self.cgroups.has_key(client.hostname):
- self.cgroups[client.hostname] = []
- if data.text == None:
- self.logger.error("Got null response to probe %s from %s" % \
- (data.get('name'), client.hostname))
- try:
- self.probedata[client.hostname].update({data.get('name'): ''})
- except KeyError:
- self.probedata[client.hostname] = {data.get('name'): ''}
- return
- dlines = data.text.split('\n')
- self.logger.debug("%s:probe:%s:%s" % (client.hostname,
- data.get('name'), [line.strip() for line in dlines]))
- for line in dlines[:]:
- if line.split(':')[0] == 'group':
- newgroup = line.split(':')[1].strip()
- if newgroup not in self.cgroups[client.hostname]:
- self.cgroups[client.hostname].append(newgroup)
- dlines.remove(line)
- dtext = "\n".join(dlines)
- try:
- self.probedata[client.hostname].update({data.get('name'):dtext})
- except KeyError:
- self.probedata[client.hostname] = {data.get('name'):dtext}
-
- def get_additional_groups(self, meta):
- return self.cgroups.get(meta.hostname, list())
-
- def get_additional_data(self, meta):
- return self.probedata.get(meta.hostname, dict())
diff --git a/build/lib/Bcfg2/Server/Plugins/Properties.py b/build/lib/Bcfg2/Server/Plugins/Properties.py
deleted file mode 100644
index 86330f6a0..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Properties.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import copy
-import lxml.etree
-
-import Bcfg2.Server.Plugin
-
-
-class PropertyFile(Bcfg2.Server.Plugin.XMLFileBacked):
- """Class for properties files."""
-
- def Index(self):
- """Build data into an xml object."""
- try:
- self.data = lxml.etree.XML(self.data)
- except lxml.etree.XMLSyntaxError:
- Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
-
-
-class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
- __child__ = PropertyFile
-
-
-class Properties(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Connector):
- """
- The properties plugin maps property
- files into client metadata instances.
- """
- name = 'Properties'
- version = '$Revision$'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.store = PropDirectoryBacked(self.data, core.fam)
-
- def get_additional_data(self, _):
- return copy.deepcopy(self.store.entries)
diff --git a/build/lib/Bcfg2/Server/Plugins/Rules.py b/build/lib/Bcfg2/Server/Plugins/Rules.py
deleted file mode 100644
index eb0547cdb..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Rules.py
+++ /dev/null
@@ -1,11 +0,0 @@
-"""This generator provides rule-based entry mappings."""
-__revision__ = '$Revision$'
-
-import Bcfg2.Server.Plugin
-
-
-class Rules(Bcfg2.Server.Plugin.PrioDir):
- """This is a generator that handles service assignments."""
- name = 'Rules'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
diff --git a/build/lib/Bcfg2/Server/Plugins/SGenshi.py b/build/lib/Bcfg2/Server/Plugins/SGenshi.py
deleted file mode 100644
index cead06e34..000000000
--- a/build/lib/Bcfg2/Server/Plugins/SGenshi.py
+++ /dev/null
@@ -1,76 +0,0 @@
-'''This module implements a templating generator based on Genshi'''
-__revision__ = '$Revision$'
-
-import genshi.input
-import genshi.template
-import lxml.etree
-import logging
-
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Plugins.TGenshi
-
-logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
-
-
-class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile):
-
- def get_xml_value(self, metadata):
- if not hasattr(self, 'template'):
- logger.error("No parsed template information for %s" % (self.name))
- raise Bcfg2.Server.Plugin.PluginExecutionError
- try:
- stream = self.template.generate(metadata=metadata,).filter( \
- Bcfg2.Server.Plugins.TGenshi.removecomment)
- data = stream.render('xml', strip_whitespace=False)
- return lxml.etree.XML(data)
- except LookupError, lerror:
- logger.error('Genshi lookup error: %s' % lerror)
- except genshi.template.TemplateError, terror:
- logger.error('Genshi template error: %s' % terror)
- except genshi.input.ParseError, perror:
- logger.error('Genshi parse error: %s' % perror)
- raise
-
-
-class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
-
- def __init__(self, path, fam, encoding):
- fpattern = '\S+\.xml'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- SGenshiTemplateFile, encoding)
- fam.AddMonitor(path, self)
-
- def HandleEvent(self, event):
- '''passthrough event handler for old calling convention'''
- if event.filename != self.path:
- return self.handle_event(event)
-
- def BuildStructures(self, metadata):
- """Build SGenshi structures."""
- ret = []
- for entry in self.get_matching(metadata):
- try:
- ret.append(entry.get_xml_value(metadata))
- except:
- logger.error("SGenshi: Failed to template file %s" % entry.name)
- return ret
-
-
-class SGenshi(SGenshiEntrySet,
- Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure):
- """The SGenshi plugin provides templated structures."""
- name = 'SGenshi'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- try:
- SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding)
- except:
- logger.error("Failed to load %s repository; disabling %s" \
- % (self.name, self.name))
- raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/SSHbase.py b/build/lib/Bcfg2/Server/Plugins/SSHbase.py
deleted file mode 100644
index 6d68ecb0a..000000000
--- a/build/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ /dev/null
@@ -1,279 +0,0 @@
-'''This module manages ssh key files for bcfg2'''
-__revision__ = '$Revision$'
-
-import binascii
-import os
-import socket
-import shutil
-import tempfile
-from subprocess import Popen, PIPE
-import Bcfg2.Server.Plugin
-
-
-class SSHbase(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Generator,
- Bcfg2.Server.Plugin.DirectoryBacked,
- Bcfg2.Server.Plugin.PullTarget):
- """
- The sshbase generator manages ssh host keys (both v1 and v2)
- for hosts. It also manages the ssh_known_hosts file. It can
- integrate host keys from other management domains and similarly
- export its keys. The repository contains files in the following
- formats:
-
- ssh_host_key.H_(hostname) -> the v1 host private key for
- (hostname)
- ssh_host_key.pub.H_(hostname) -> the v1 host public key
- for (hostname)
- ssh_host_(dr)sa_key.H_(hostname) -> the v2 ssh host
- private key for (hostname)
- ssh_host_(dr)sa_key.pub.H_(hostname) -> the v2 ssh host
- public key for (hostname)
- ssh_known_hosts -> the current known hosts file. this
- is regenerated each time a new key is generated.
-
- """
- name = 'SSHbase'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- pubkeys = ["ssh_host_dsa_key.pub.H_%s",
- "ssh_host_rsa_key.pub.H_%s", "ssh_host_key.pub.H_%s"]
- hostkeys = ["ssh_host_dsa_key.H_%s",
- "ssh_host_rsa_key.H_%s", "ssh_host_key.H_%s"]
- keypatterns = ['ssh_host_dsa_key', 'ssh_host_rsa_key', 'ssh_host_key',
- 'ssh_host_dsa_key.pub', 'ssh_host_rsa_key.pub',
- 'ssh_host_key.pub']
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- Bcfg2.Server.Plugin.PullTarget.__init__(self)
- try:
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
- self.core.fam)
- except OSError, ioerr:
- self.logger.error("Failed to load SSHbase repository from %s" \
- % (self.data))
- self.logger.error(ioerr)
- raise Bcfg2.Server.Plugin.PluginInitError
- self.Entries = {'Path':
- {'/etc/ssh/ssh_known_hosts': self.build_skn,
- '/etc/ssh/ssh_host_dsa_key': self.build_hk,
- '/etc/ssh/ssh_host_rsa_key': self.build_hk,
- '/etc/ssh/ssh_host_dsa_key.pub': self.build_hk,
- '/etc/ssh/ssh_host_rsa_key.pub': self.build_hk,
- '/etc/ssh/ssh_host_key': self.build_hk,
- '/etc/ssh/ssh_host_key.pub': self.build_hk}}
- self.ipcache = {}
- self.namecache = {}
- self.__skn = False
-
- def get_skn(self):
- """Build memory cache of the ssh known hosts file."""
- if not self.__skn:
- self.__skn = "\n".join([value.data for key, value in \
- self.entries.iteritems() if \
- key.endswith('.static')])
- names = dict()
- # if no metadata is registered yet, defer
- if len(self.core.metadata.query.all()) == 0:
- self.__skn = False
- return self.__skn
- for cmeta in self.core.metadata.query.all():
- names[cmeta.hostname] = set([cmeta.hostname])
- names[cmeta.hostname].update(cmeta.aliases)
- newnames = set()
- newips = set()
- for name in names[cmeta.hostname]:
- newnames.add(name.split('.')[0])
- try:
- newips.add(self.get_ipcache_entry(name)[0])
- except:
- continue
- names[cmeta.hostname].update(newnames)
- names[cmeta.hostname].update(cmeta.addresses)
- names[cmeta.hostname].update(newips)
- # TODO: Only perform reverse lookups on IPs if an option is set.
- if True:
- for ip in newips:
- try:
- names[cmeta.hostname].update(self.get_namecache_entry(ip))
- except:
- continue
- names[cmeta.hostname] = sorted(names[cmeta.hostname])
- # now we have our name cache
- pubkeys = [pubk for pubk in self.entries.keys() \
- if pubk.find('.pub.H_') != -1]
- pubkeys.sort()
- badnames = set()
- for pubkey in pubkeys:
- hostname = pubkey.split('H_')[1]
- if hostname not in names:
- if hostname not in badnames:
- badnames.add(hostname)
- self.logger.error("SSHbase: Unknown host %s; ignoring public keys" % hostname)
- continue
- self.__skn += "%s %s" % (','.join(names[hostname]),
- self.entries[pubkey].data)
- return self.__skn
-
- def set_skn(self, value):
- """Set backing data for skn."""
- self.__skn = value
- skn = property(get_skn, set_skn)
-
- def HandleEvent(self, event=None):
- """Local event handler that does skn regen on pubkey change."""
- Bcfg2.Server.Plugin.DirectoryBacked.HandleEvent(self, event)
- if event and '_key.pub.H_' in event.filename:
- self.skn = False
- if event and event.filename.endswith('.static'):
- self.skn = False
- if not self.__skn:
- if (len(self.entries.keys())) >= (len(os.listdir(self.data))-1):
- _ = self.skn
-
- def HandlesEntry(self, entry, _):
- """Handle key entries dynamically."""
- return entry.tag == 'Path' and \
- ([fpat for fpat in self.keypatterns
- if entry.get('name').endswith(fpat)]
- or entry.get('name').endswith('ssh_known_hosts'))
-
- def HandleEntry(self, entry, metadata):
- """Bind data."""
- if entry.get('name').endswith('ssh_known_hosts'):
- return self.build_skn(entry, metadata)
- else:
- return self.build_hk(entry, metadata)
-
- def get_ipcache_entry(self, client):
- """Build a cache of dns results."""
- if client in self.ipcache:
- if self.ipcache[client]:
- return self.ipcache[client]
- else:
- raise socket.gaierror
- else:
- # need to add entry
- try:
- ipaddr = socket.gethostbyname(client)
- self.ipcache[client] = (ipaddr, client)
- return (ipaddr, client)
- except socket.gaierror:
- cmd = "getent hosts %s" % client
- ipaddr = Popen(cmd, shell=True, \
- stdout=PIPE).stdout.read().strip().split()
- if ipaddr:
- self.ipcache[client] = (ipaddr, client)
- return (ipaddr, client)
- self.ipcache[client] = False
- self.logger.error("Failed to find IP address for %s" % client)
- raise socket.gaierror
-
- def get_namecache_entry(self, cip):
- """Build a cache of name lookups from client IP addresses."""
- if cip in self.namecache:
- # lookup cached name from IP
- if self.namecache[cip]:
- return self.namecache[cip]
- else:
- raise socket.gaierror
- else:
- # add an entry that has not been cached
- try:
- rvlookup = socket.gethostbyaddr(cip)
- if rvlookup[0]:
- self.namecache[cip] = [rvlookup[0]]
- else:
- self.namecache[cip] = []
- self.namecache[cip].extend(rvlookup[1])
- return self.namecache[cip]
- except socket.gaierror:
- self.namecache[cip] = False
- self.logger.error("Failed to find any names associated with IP address %s" % cip)
- raise
-
- def build_skn(self, entry, metadata):
- """This function builds builds a host specific known_hosts file."""
- client = metadata.hostname
- entry.text = self.skn
- hostkeys = [keytmpl % client for keytmpl in self.pubkeys \
- if (keytmpl % client) in self.entries]
- hostkeys.sort()
- for hostkey in hostkeys:
- entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" % (
- self.entries[hostkey].data)
- permdata = {'owner':'root',
- 'group':'root',
- 'type':'file',
- 'perms':'0644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
- def build_hk(self, entry, metadata):
- """This binds host key data into entries."""
- client = metadata.hostname
- filename = "%s.H_%s" % (entry.get('name').split('/')[-1], client)
- if filename not in self.entries.keys():
- self.GenerateHostKeys(client)
- if not filename in self.entries:
- self.logger.error("%s still not registered" % filename)
- raise Bcfg2.Server.Plugin.PluginExecutionError
- keydata = self.entries[filename].data
- permdata = {'owner':'root',
- 'group':'root',
- 'type':'file',
- 'perms':'0600'}
- if entry.get('name')[-4:] == '.pub':
- permdata['perms'] = '0644'
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
- if "ssh_host_key.H_" == filename[:15]:
- entry.attrib['encoding'] = 'base64'
- entry.text = binascii.b2a_base64(keydata)
- else:
- entry.text = keydata
-
- def GenerateHostKeys(self, client):
- """Generate new host keys for client."""
- keylist = [keytmpl % client for keytmpl in self.hostkeys]
- for hostkey in keylist:
- if 'ssh_host_rsa_key.H_' == hostkey[:19]:
- keytype = 'rsa'
- elif 'ssh_host_dsa_key.H_' == hostkey[:19]:
- keytype = 'dsa'
- else:
- keytype = 'rsa1'
-
- if hostkey not in self.entries.keys():
- fileloc = "%s/%s" % (self.data, hostkey)
- publoc = self.data + '/' + ".".join([hostkey.split('.')[0],
- 'pub',
- "H_%s" % client])
- tempdir = tempfile.mkdtemp()
- temploc = "%s/%s" % (tempdir, hostkey)
- cmd = 'ssh-keygen -q -f %s -N "" -t %s -C root@%s < /dev/null'
- os.system(cmd % (temploc, keytype, client))
- shutil.copy(temploc, fileloc)
- shutil.copy("%s.pub" % temploc, publoc)
- self.AddEntry(hostkey)
- self.AddEntry(".".join([hostkey.split('.')[0]]+['pub', "H_%s" \
- % client]))
- try:
- os.unlink(temploc)
- os.unlink("%s.pub" % temploc)
- os.rmdir(tempdir)
- except OSError:
- self.logger.error("Failed to unlink temporary ssh keys")
-
- def AcceptChoices(self, _, metadata):
- return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)]
-
- def AcceptPullData(self, specific, entry, log):
- """Per-plugin bcfg2-admin pull support."""
- # specific will always be host specific
- filename = "%s/%s.H_%s" % (self.data, entry['name'].split('/')[-1],
- specific.hostname)
- open(filename, 'w').write(entry['text'])
- if log:
- print "Wrote file %s" % filename
diff --git a/build/lib/Bcfg2/Server/Plugins/SSLCA.py b/build/lib/Bcfg2/Server/Plugins/SSLCA.py
deleted file mode 100644
index 0dc448e69..000000000
--- a/build/lib/Bcfg2/Server/Plugins/SSLCA.py
+++ /dev/null
@@ -1,239 +0,0 @@
-import Bcfg2.Server.Plugin
-import Bcfg2.Options
-import lxml.etree
-import posixpath
-import tempfile
-import os
-from subprocess import Popen, PIPE, STDOUT
-from ConfigParser import ConfigParser
-
-class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
- """
- The SSLCA generator handles the creation and
- management of ssl certificates and their keys.
- """
- name = 'SSLCA'
- __version__ = '$Id:$'
- __author__ = 'g.hagger@gmail.com'
- __child__ = Bcfg2.Server.Plugin.FileBacked
- key_specs = {}
- cert_specs = {}
- CAs = {}
-
- def HandleEvent(self, event=None):
- """
- Updates which files this plugin handles based upon filesystem events.
- Allows configuration items to be added/removed without server restarts.
- """
- action = event.code2str()
- if event.filename[0] == '/':
- return
- epath = "".join([self.data, self.handles[event.requestID],
- event.filename])
- if posixpath.isdir(epath):
- ident = self.handles[event.requestID] + event.filename
- else:
- ident = self.handles[event.requestID][:-1]
-
- fname = "".join([ident, '/', event.filename])
-
- if event.filename.endswith('.xml'):
- if action in ['exists', 'created', 'changed']:
- if event.filename.endswith('key.xml'):
- key_spec = dict(lxml.etree.parse(epath).find('Key').items())
- self.key_specs[ident] = {
- 'bits': key_spec.get('bits', 2048),
- 'type': key_spec.get('type', 'rsa')
- }
- self.Entries['Path'][ident] = self.get_key
- elif event.filename.endswith('cert.xml'):
- cert_spec = dict(lxml.etree.parse(epath).find('Cert').items())
- ca = cert_spec.get('ca', 'default')
- self.cert_specs[ident] = {
- 'ca': ca,
- 'format': cert_spec.get('format', 'pem'),
- 'key': cert_spec.get('key'),
- 'days': cert_spec.get('days', 365),
- 'C': cert_spec.get('c'),
- 'L': cert_spec.get('l'),
- 'ST': cert_spec.get('st'),
- 'OU': cert_spec.get('ou'),
- 'O': cert_spec.get('o'),
- 'emailAddress': cert_spec.get('emailaddress')
- }
- cp = ConfigParser()
- cp.read(self.core.cfile)
- self.CAs[ca] = dict(cp.items('sslca_'+ca))
- self.Entries['Path'][ident] = self.get_cert
- if action == 'deleted':
- if ident in self.Entries['Path']:
- del self.Entries['Path'][ident]
- else:
- if action in ['exists', 'created']:
- if posixpath.isdir(epath):
- self.AddDirectoryMonitor(epath[len(self.data):])
- if ident not in self.entries and posixpath.isfile(epath):
- self.entries[fname] = self.__child__(epath)
- self.entries[fname].HandleEvent(event)
- if action == 'changed':
- self.entries[fname].HandleEvent(event)
- elif action == 'deleted':
- if fname in self.entries:
- del self.entries[fname]
- else:
- self.entries[fname].HandleEvent(event)
-
- def get_key(self, entry, metadata):
- """
- either grabs a prexisting key hostfile, or triggers the generation
- of a new key if one doesn't exist.
- """
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner':'root',
- 'group':'root',
- 'type':'file',
- 'perms':'644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
- # check if we already have a hostfile, or need to generate a new key
- # TODO: verify key fits the specs
- path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname])
- if filename not in self.entries.keys():
- key = self.build_key(filename, entry, metadata)
- open(self.data + filename, 'w').write(key)
- entry.text = key
- else:
- entry.text = self.entries[filename].data
-
- def build_key(self, filename, entry, metadata):
- """
- generates a new key according the the specification
- """
- type = self.key_specs[entry.get('name')]['type']
- bits = self.key_specs[entry.get('name')]['bits']
- if type == 'rsa':
- cmd = "openssl genrsa %s " % bits
- elif type == 'dsa':
- cmd = "openssl dsaparam -noout -genkey %s" % bits
- key = Popen(cmd, shell=True, stdout=PIPE).stdout.read()
- return key
-
- def get_cert(self, entry, metadata):
- """
- either grabs a prexisting cert hostfile, or triggers the generation
- of a new cert if one doesn't exist.
- """
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner':'root',
- 'group':'root',
- 'type':'file',
- 'perms':'644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
- path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname])
-
- # first - ensure we have a key to work with
- key = self.cert_specs[entry.get('name')].get('key')
- key_filename = "".join([key, '/', key.rsplit('/', 1)[1], '.H_', metadata.hostname])
- if key_filename not in self.entries:
- e = lxml.etree.Element('Path')
- e.attrib['name'] = key
- self.core.Bind(e, metadata)
-
- # check if we have a valid hostfile
- if filename in self.entries.keys() and self.verify_cert(filename, entry):
- entry.text = self.entries[filename].data
- else:
- cert = self.build_cert(key_filename, entry, metadata)
- open(self.data + filename, 'w').write(cert)
- entry.text = cert
-
- def verify_cert(self, filename, entry):
- """
- check that a certificate validates against the ca cert,
- and that it has not expired.
- """
- chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
- cert = self.data + filename
- cmd = "openssl verify -CAfile %s %s" % (chaincert, cert)
- res = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
- if res == cert + ": OK\n":
- return True
- return False
-
- def build_cert(self, key_filename, entry, metadata):
- """
- creates a new certificate according to the specification
- """
- req_config = self.build_req_config(entry, metadata)
- req = self.build_request(key_filename, req_config, entry)
- ca = self.cert_specs[entry.get('name')]['ca']
- ca_config = self.CAs[ca]['config']
- days = self.cert_specs[entry.get('name')]['days']
- passphrase = self.CAs[ca].get('passphrase')
- if passphrase:
- cmd = "openssl ca -config %s -in %s -days %s -batch -passin pass:%s" % (ca_config, req, days, passphrase)
- else:
- cmd = "openssl ca -config %s -in %s -days %s -batch" % (ca_config, req, days)
- cert = Popen(cmd, shell=True, stdout=PIPE).stdout.read()
- try:
- os.unlink(req_config)
- os.unlink(req)
- except OSError:
- self.logger.error("Failed to unlink temporary files")
- return cert
-
- def build_req_config(self, entry, metadata):
- """
- generates a temporary openssl configuration file that is
- used to generate the required certificate request
- """
- # create temp request config file
- conffile = open(tempfile.mkstemp()[1], 'w')
- cp = ConfigParser({})
- cp.optionxform = str
- defaults = {
- 'req': {
- 'default_md': 'sha1',
- 'distinguished_name': 'req_distinguished_name',
- 'req_extensions': 'v3_req',
- 'x509_extensions': 'v3_req',
- 'prompt': 'no'
- },
- 'req_distinguished_name': {},
- 'v3_req': {
- 'subjectAltName': '@alt_names'
- },
- 'alt_names': {}
- }
- for section in defaults.keys():
- cp.add_section(section)
- for key in defaults[section]:
- cp.set(section, key, defaults[section][key])
- x = 1
- altnames = list(metadata.aliases)
- altnames.append(metadata.hostname)
- for altname in altnames:
- cp.set('alt_names', 'DNS.'+str(x), altname)
- x += 1
- for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
- if self.cert_specs[entry.get('name')][item]:
- cp.set('req_distinguished_name', item, self.cert_specs[entry.get('name')][item])
- cp.set('req_distinguished_name', 'CN', metadata.hostname)
- cp.write(conffile)
- conffile.close()
- return conffile.name
-
- def build_request(self, key_filename, req_config, entry):
- """
- creates the certificate request
- """
- req = tempfile.mkstemp()[1]
- days = self.cert_specs[entry.get('name')]['days']
- key = self.data + key_filename
- cmd = "openssl req -new -config %s -days %s -key %s -text -out %s" % (req_config, days, key, req)
- res = Popen(cmd, shell=True, stdout=PIPE).stdout.read()
- return req
-
diff --git a/build/lib/Bcfg2/Server/Plugins/Snapshots.py b/build/lib/Bcfg2/Server/Plugins/Snapshots.py
deleted file mode 100644
index a4489ae95..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Snapshots.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#import lxml.etree
-import logging
-import binascii
-import difflib
-#import sqlalchemy
-#import sqlalchemy.orm
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Snapshots
-import Bcfg2.Logger
-from Bcfg2.Server.Snapshots.model import Snapshot
-import Queue
-import time
-import threading
-
-logger = logging.getLogger('Snapshots')
-
-ftypes = ['ConfigFile', 'SymLink', 'Directory']
-datafields = {
- 'Package': ['version'],
- 'Path': ['type'],
- 'Service': ['status'],
- 'ConfigFile': ['owner', 'group', 'perms'],
- 'Directory': ['owner', 'group', 'perms'],
- 'SymLink': ['to'],
- }
-
-def build_snap_ent(entry):
- basefields = []
- if entry.tag in ['Package', 'Service']:
- basefields += ['type']
- desired = dict([(key, unicode(entry.get(key))) for key in basefields])
- state = dict([(key, unicode(entry.get(key))) for key in basefields])
- desired.update([(key, unicode(entry.get(key))) for key in \
- datafields[entry.tag]])
- if entry.tag == 'ConfigFile' or \
- ((entry.tag == 'Path') and (entry.get('type') == 'file')):
- if entry.text == None:
- desired['contents'] = None
- else:
- if entry.get('encoding', 'ascii') == 'ascii':
- desired['contents'] = unicode(entry.text)
- else:
- desired['contents'] = unicode(binascii.a2b_base64(entry.text))
-
- if 'current_bfile' in entry.attrib:
- state['contents'] = unicode(binascii.a2b_base64( \
- entry.get('current_bfile')))
- elif 'current_bdiff' in entry.attrib:
- diff = binascii.a2b_base64(entry.get('current_bdiff'))
- state['contents'] = unicode( \
- '\n'.join(difflib.restore(diff.split('\n'), 1)))
-
- state.update([(key, unicode(entry.get('current_' + key, entry.get(key)))) \
- for key in datafields[entry.tag]])
- if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false':
- state = None
- return [desired, state]
-
-
-class Snapshots(Bcfg2.Server.Plugin.Statistics,
- Bcfg2.Server.Plugin.Plugin):
- name = 'Snapshots'
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
- self.work_queue = Queue.Queue()
- self.loader = threading.Thread(target=self.load_snapshot)
- self.loader.start()
-
- def load_snapshot(self):
- while self.running:
- try:
- (metadata, data) = self.work_queue.get(block=True, timeout=5)
- except:
- continue
- self.statistics_from_old_stats(metadata, data)
-
- def process_statistics(self, metadata, data):
- return self.work_queue.put((metadata, data))
-
- def statistics_from_old_stats(self, metadata, xdata):
- # entries are name -> (modified, correct, start, desired, end)
- # not sure we can get all of this from old format stats
- t1 = time.time()
- entries = dict([('Package', dict()),
- ('Service', dict()), ('Path', dict())])
- extra = dict([('Package', dict()), ('Service', dict()),
- ('Path', dict())])
- bad = []
- state = xdata.find('.//Statistics')
- correct = state.get('state') == 'clean'
- revision = unicode(state.get('revision', '-1'))
- for entry in state.find('.//Bad'):
- data = [False, False, unicode(entry.get('name'))] \
- + build_snap_ent(entry)
- if entry.tag in ftypes:
- etag = 'Path'
- else:
- etag = entry.tag
- entries[etag][entry.get('name')] = data
- for entry in state.find('.//Modified'):
- if entry.tag in ftypes:
- etag = 'Path'
- else:
- etag = entry.tag
- if entry.get('name') in entries[etag]:
- data = [True, False, unicode(entry.get('name'))] + \
- build_snap_ent(entry)
- else:
- data = [True, False, unicode(entry.get('name'))] + \
- build_snap_ent(entry)
- for entry in state.find('.//Extra'):
- if entry.tag in datafields:
- data = build_snap_ent(entry)[1]
- ename = unicode(entry.get('name'))
- data['name'] = ename
- extra[entry.tag][ename] = data
- else:
- print "extra", entry.tag, entry.get('name')
- t2 = time.time()
- snap = Snapshot.from_data(self.session, correct, revision,
- metadata, entries, extra)
- self.session.add(snap)
- self.session.commit()
- t3 = time.time()
- logger.info("Snapshot storage took %fs" % (t3-t2))
- return True
diff --git a/build/lib/Bcfg2/Server/Plugins/Statistics.py b/build/lib/Bcfg2/Server/Plugins/Statistics.py
deleted file mode 100644
index c7fa0e534..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Statistics.py
+++ /dev/null
@@ -1,161 +0,0 @@
-'''This file manages the statistics collected by the BCFG2 Server'''
-__revision__ = '$Revision$'
-
-import binascii
-import copy
-import difflib
-import logging
-from lxml.etree import XML, SubElement, Element, XMLSyntaxError
-import lxml.etree
-import os
-import Queue
-from time import asctime, localtime, time, strptime, mktime
-import threading
-
-import Bcfg2.Server.Plugin
-
-
-class StatisticsStore(object):
- """Manages the memory and file copy of statistics collected about client runs."""
- __min_write_delay__ = 0
-
- def __init__(self, filename):
- self.filename = filename
- self.element = Element('Dummy')
- self.dirty = 0
- self.lastwrite = 0
- self.logger = logging.getLogger('Bcfg2.Server.Statistics')
- self.ReadFromFile()
-
- def WriteBack(self, force=0):
- """Write statistics changes back to persistent store."""
- if (self.dirty and (self.lastwrite + self.__min_write_delay__ <= time())) \
- or force:
- try:
- fout = open(self.filename + '.new', 'w')
- except IOError, ioerr:
- self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
- else:
- fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True))
- fout.close()
- os.rename(self.filename + '.new', self.filename)
- self.dirty = 0
- self.lastwrite = time()
-
- def ReadFromFile(self):
- """Reads current state regarding statistics."""
- try:
- fin = open(self.filename, 'r')
- data = fin.read()
- fin.close()
- self.element = XML(data)
- self.dirty = 0
- except (IOError, XMLSyntaxError):
- self.logger.error("Creating new statistics file %s"%(self.filename))
- self.element = Element('ConfigStatistics')
- self.WriteBack()
- self.dirty = 0
-
- def updateStats(self, xml, client):
- """Updates the statistics of a current node with new data."""
-
- # Current policy:
- # - Keep anything less than 24 hours old
- # - Keep latest clean run for clean nodes
- # - Keep latest clean and dirty run for dirty nodes
- newstat = xml.find('Statistics')
-
- if newstat.get('state') == 'clean':
- node_dirty = 0
- else:
- node_dirty = 1
-
- # Find correct node entry in stats data
- # The following list comprehension should be guarenteed to return at
- # most one result
- nodes = [elem for elem in self.element.findall('Node') \
- if elem.get('name') == client]
- nummatch = len(nodes)
- if nummatch == 0:
- # Create an entry for this node
- node = SubElement(self.element, 'Node', name=client)
- elif nummatch == 1 and not node_dirty:
- # Delete old instance
- node = nodes[0]
- [node.remove(elem) for elem in node.findall('Statistics') \
- if self.isOlderThan24h(elem.get('time'))]
- elif nummatch == 1 and node_dirty:
- # Delete old dirty statistics entry
- node = nodes[0]
- [node.remove(elem) for elem in node.findall('Statistics') \
- if (elem.get('state') == 'dirty' \
- and self.isOlderThan24h(elem.get('time')))]
- else:
- # Shouldn't be reached
- self.logger.error("Duplicate node entry for %s"%(client))
-
- # Set current time for stats
- newstat.set('time', asctime(localtime()))
-
- # Add statistic
- node.append(copy.deepcopy(newstat))
-
- # Set dirty
- self.dirty = 1
- self.WriteBack(force=1)
-
- def isOlderThan24h(self, testTime):
- """Helper function to determine if <time> string is older than 24 hours."""
- now = time()
- utime = mktime(strptime(testTime))
- secondsPerDay = 60*60*24
-
- return (now-utime) > secondsPerDay
-
-
-class Statistics(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
- Bcfg2.Server.Plugin.PullSource):
- name = 'Statistics'
- __version__ = '$Id$'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.PullSource.__init__(self)
- fpath = "%s/etc/statistics.xml" % datastore
- self.data_file = StatisticsStore(fpath)
-
- def handle_statistic(self, metadata, data):
- self.data_file.updateStats(data, metadata.hostname)
-
- def FindCurrent(self, client):
- rt = self.data_file.element.xpath('//Node[@name="%s"]' % client)[0]
- maxtime = max([strptime(stat.get('time')) for stat \
- in rt.findall('Statistics')])
- return [stat for stat in rt.findall('Statistics') \
- if strptime(stat.get('time')) == maxtime][0]
-
- def GetExtra(self, client):
- return [(entry.tag, entry.get('name')) for entry \
- in self.FindCurrent(client).xpath('.//Extra/*')]
-
- def GetCurrentEntry(self, client, e_type, e_name):
- curr = self.FindCurrent(client)
- entry = curr.xpath('.//Bad/%s[@name="%s"]' % (e_type, e_name))
- if not entry:
- raise Bcfg2.Server.Plugin.PluginExecutionError
- cfentry = entry[-1]
-
- owner = cfentry.get('current_owner', cfentry.get('owner'))
- group = cfentry.get('current_group', cfentry.get('group'))
- perms = cfentry.get('current_perms', cfentry.get('perms'))
- if 'current_bfile' in cfentry.attrib:
- contents = binascii.a2b_base64(cfentry.get('current_bfile'))
- elif 'current_bdiff' in cfentry.attrib:
- diff = binascii.a2b_base64(cfentry.get('current_bdiff'))
- contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
- else:
- contents = None
-
- return (owner, group, perms, contents)
diff --git a/build/lib/Bcfg2/Server/Plugins/Svcmgr.py b/build/lib/Bcfg2/Server/Plugins/Svcmgr.py
deleted file mode 100644
index 6d25c1a6d..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Svcmgr.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""This generator provides service mappings."""
-__revision__ = '$Revision$'
-
-import Bcfg2.Server.Plugin
-
-
-class Svcmgr(Bcfg2.Server.Plugin.PrioDir):
- """This is a generator that handles service assignments."""
- name = 'Svcmgr'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
diff --git a/build/lib/Bcfg2/Server/Plugins/Svn.py b/build/lib/Bcfg2/Server/Plugins/Svn.py
deleted file mode 100644
index cb4ab649b..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Svn.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import os
-from subprocess import Popen, PIPE
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Svn')
-
-
-class Svn(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Svn is a version plugin for dealing with Bcfg2 repos."""
- name = 'Svn'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- self.core = core
- self.datastore = datastore
-
- # path to svn directory for bcfg2 repo
- svn_dir = "%s/.svn" % datastore
-
- # Read revision from bcfg2 repo
- if os.path.isdir(svn_dir):
- self.get_revision()
- else:
- logger.error("%s is not a directory" % svn_dir)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- logger.debug("Initialized svn plugin with svn directory = %s" % svn_dir)
-
- def get_revision(self):
- """Read svn revision information for the Bcfg2 repository."""
- try:
- data = Popen(("env LC_ALL=C svn info %s" %
- (self.datastore)), shell=True,
- stdout=PIPE).communicate()[0].split('\n')
- return [line.split(': ')[1] for line in data \
- if line[:9] == 'Revision:'][-1]
- except IndexError:
- logger.error("Failed to read svn info; disabling svn support")
- logger.error('''Ran command "svn info %s"''' % (self.datastore))
- logger.error("Got output: %s" % data)
- raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/TCheetah.py b/build/lib/Bcfg2/Server/Plugins/TCheetah.py
deleted file mode 100644
index d40f4baf3..000000000
--- a/build/lib/Bcfg2/Server/Plugins/TCheetah.py
+++ /dev/null
@@ -1,78 +0,0 @@
-'''This module implements a templating generator based on Cheetah'''
-__revision__ = '$Revision$'
-
-import binascii
-import logging
-import sys
-import traceback
-import Bcfg2.Server.Plugin
-
-logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
-
-try:
- import Cheetah.Template
- import Cheetah.Parser
-except:
- logger.error("TCheetah: Failed to import Cheetah. Is it installed?")
- raise
-
-
-class TemplateFile:
- """Template file creates Cheetah template structures for the loaded file."""
-
- def __init__(self, name, specific, encoding):
- self.name = name
- self.specific = specific
- self.encoding = encoding
- self.template = None
- self.searchlist = dict()
-
- def handle_event(self, event):
- """Handle all fs events for this template."""
- if event.code2str() == 'deleted':
- return
- try:
- s = {'useStackFrames': False}
- self.template = Cheetah.Template.Template(open(self.name).read(),
- compilerSettings=s,
- searchList=self.searchlist)
- except Cheetah.Parser.ParseError, perror:
- logger.error("Cheetah parse error for file %s" % (self.name))
- logger.error(perror.report())
-
- def bind_entry(self, entry, metadata):
- """Build literal file information."""
- self.template.metadata = metadata
- self.searchlist['metadata'] = metadata
- self.template.path = entry.get('realname', entry.get('name'))
- self.searchlist['path'] = entry.get('realname', entry.get('name'))
- self.template.source_path = self.name
- self.searchlist['source_path'] = self.name
-
- if entry.tag == 'Path':
- entry.set('type', 'file')
- try:
- if type(self.template) == unicode:
- entry.text = self.template
- else:
- if entry.get('encoding') == 'base64':
- # take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(self.template)
- else:
- entry.text = unicode(str(self.template), self.encoding)
- except:
- (a, b, c) = sys.exc_info()
- msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
- logger.error(msg)
- logger.error("TCheetah template error for %s" % self.searchlist['path'])
- del a, b, c
- raise Bcfg2.Server.Plugin.PluginExecutionError
-
-
-class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
- """The TCheetah generator implements a templating mechanism for configuration files."""
- name = 'TCheetah'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- filename_pattern = 'template'
- es_child_cls = TemplateFile
diff --git a/build/lib/Bcfg2/Server/Plugins/TGenshi.py b/build/lib/Bcfg2/Server/Plugins/TGenshi.py
deleted file mode 100644
index 29e6d7307..000000000
--- a/build/lib/Bcfg2/Server/Plugins/TGenshi.py
+++ /dev/null
@@ -1,126 +0,0 @@
-"""This module implements a templating generator based on Genshi."""
-__revision__ = '$Revision$'
-
-import binascii
-import logging
-import Bcfg2.Server.Plugin
-
-logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
-
-# try to import genshi stuff
-try:
- import genshi.core
- import genshi.input
- from genshi.template import TemplateLoader, \
- TextTemplate, MarkupTemplate, TemplateError
-except ImportError:
- logger.error("TGenshi: Failed to import Genshi. Is it installed?")
- raise Bcfg2.Server.Plugin.PluginInitError
-try:
- from genshi.template import NewTextTemplate
- have_ntt = True
-except:
- have_ntt = False
-
-def removecomment(stream):
- """A genshi filter that removes comments from the stream."""
- for kind, data, pos in stream:
- if kind is genshi.core.COMMENT:
- continue
- yield kind, data, pos
-
-
-class TemplateFile:
- """Template file creates Genshi template structures for the loaded file."""
-
- def __init__(self, name, specific, encoding):
- self.name = name
- self.specific = specific
- self.encoding = encoding
- if self.specific.all:
- matchname = self.name
- elif self.specific.group:
- matchname = self.name[:self.name.find('.G')]
- else:
- matchname = self.name[:self.name.find('.H')]
- if matchname.endswith('.txt'):
- self.template_cls = TextTemplate
- elif matchname.endswith('.newtxt'):
- if not have_ntt:
- logger.error("Genshi NewTextTemplates not supported by this version of Genshi")
- else:
- self.template_cls = NewTextTemplate
- else:
- self.template_cls = MarkupTemplate
- self.HandleEvent = self.handle_event
-
- def handle_event(self, event=None):
- """Handle all fs events for this template."""
- if event and event.code2str() == 'deleted':
- return
- try:
- loader = TemplateLoader()
- try:
- self.template = loader.load(self.name, cls=self.template_cls,
- encoding=self.encoding)
- except LookupError, lerror:
- logger.error('Genshi lookup error: %s' % lerror)
- except TemplateError, terror:
- logger.error('Genshi template error: %s' % terror)
- except genshi.input.ParseError, perror:
- logger.error('Genshi parse error: %s' % perror)
-
- def bind_entry(self, entry, metadata):
- """Build literal file information."""
- fname = entry.get('realname', entry.get('name'))
- if entry.tag == 'Path':
- entry.set('type', 'file')
- try:
- stream = self.template.generate( \
- name=fname, metadata=metadata,
- path=self.name).filter(removecomment)
- if have_ntt:
- ttypes = [TextTemplate, NewTextTemplate]
- else:
- ttypes = [TextTemplate]
- if True in [isinstance(self.template, t) for t in ttypes]:
- try:
- textdata = stream.render('text', strip_whitespace=False)
- except TypeError:
- textdata = stream.render('text')
- if type(textdata) == unicode:
- entry.text = textdata
- else:
- if entry.get('encoding') == 'base64':
- # take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(textdata)
- else:
- entry.text = unicode(textdata, self.encoding)
- else:
- try:
- xmldata = stream.render('xml', strip_whitespace=False)
- except TypeError:
- xmldata = stream.render('xml')
- if type(xmldata) == unicode:
- entry.text = xmldata
- else:
- entry.text = unicode(xmldata, self.encoding)
- except TemplateError, terror:
- logger.error('Genshi template error: %s' % terror)
- raise Bcfg2.Server.Plugin.PluginExecutionError
- except AttributeError, err:
- logger.error('Genshi template loading error: %s' % err)
- raise Bcfg2.Server.Plugin.PluginExecutionError
-
-
-class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
- """
- The TGenshi generator implements a templating
- mechanism for configuration files.
-
- """
- name = 'TGenshi'
- __version__ = '$Id$'
- __author__ = 'jeff@ocjtech.us'
- filename_pattern = 'template\.(txt|newtxt|xml)'
- es_child_cls = TemplateFile
diff --git a/build/lib/Bcfg2/Server/Plugins/Trigger.py b/build/lib/Bcfg2/Server/Plugins/Trigger.py
deleted file mode 100644
index f6dd47e12..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Trigger.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import os
-import Bcfg2.Server.Plugin
-
-
-def async_run(prog, args):
- pid = os.fork()
- if pid:
- os.waitpid(pid, 0)
- else:
- dpid = os.fork()
- if not dpid:
- os.system(" ".join([prog] + args))
- os._exit(0)
-
-
-class Trigger(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Statistics):
- """Trigger is a plugin that calls external scripts (on the server)."""
- name = 'Trigger'
- __version__ = '$Id'
- __author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- try:
- os.stat(self.data)
- except:
- self.logger.error("Trigger: spool directory %s does not exist; unloading" % self.data)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def process_statistics(self, metadata, _):
- args = [metadata.hostname, '-p', metadata.profile, '-g',
- ':'.join([g for g in metadata.groups])]
- for notifier in os.listdir(self.data):
- n = self.data + '/' + notifier
- async_run(n, args)
diff --git a/build/lib/Bcfg2/Server/Plugins/Web.py b/build/lib/Bcfg2/Server/Plugins/Web.py
deleted file mode 100644
index e1646e429..000000000
--- a/build/lib/Bcfg2/Server/Plugins/Web.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-import BaseHTTPServer
-import SimpleHTTPServer
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Web')
-
-class Web(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Web is a simple webserver to display the content of the Bcfg2 repos."""
- name = 'Web'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Version.__init__(self)
- self.core = core
- self.datastore = datastore
-
- # Change directory to the Bcfg2 repo
- ##path = '/home/fab/backup'
- if not os.path.exists(datastore):
- ##print "Path '%s' doesn't exisit" % datastore
- logger.error("%s doesn't exist" % datastore)
- raise Bcfg2.Server.Plugin.PluginInitError
- else:
- os.chdir(datastore)
- self.start_web()
-
- logger.debug("Serving at port %s" % port)
-
-
- def start_web(self, port=6788):
- """Starts the webserver for directory listing of the Bcfg2 repo."""
- try:
- server_class = BaseHTTPServer.HTTPServer
- handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler
- server_address = ('', port)
- server = server_class(server_address, handler_class)
- server.serve_forever()
- except:
- logger.error("Failed to start webserver")
- raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/__Web.py b/build/lib/Bcfg2/Server/Plugins/__Web.py
deleted file mode 100644
index e1646e429..000000000
--- a/build/lib/Bcfg2/Server/Plugins/__Web.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-import BaseHTTPServer
-import SimpleHTTPServer
-import Bcfg2.Server.Plugin
-
-# for debugging output only
-import logging
-logger = logging.getLogger('Bcfg2.Plugins.Web')
-
-class Web(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Version):
- """Web is a simple webserver to display the content of the Bcfg2 repos."""
- name = 'Web'
- __version__ = '$Id$'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- experimental = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Version.__init__(self)
- self.core = core
- self.datastore = datastore
-
- # Change directory to the Bcfg2 repo
- ##path = '/home/fab/backup'
- if not os.path.exists(datastore):
- ##print "Path '%s' doesn't exisit" % datastore
- logger.error("%s doesn't exist" % datastore)
- raise Bcfg2.Server.Plugin.PluginInitError
- else:
- os.chdir(datastore)
- self.start_web()
-
- logger.debug("Serving at port %s" % port)
-
-
- def start_web(self, port=6788):
- """Starts the webserver for directory listing of the Bcfg2 repo."""
- try:
- server_class = BaseHTTPServer.HTTPServer
- handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler
- server_address = ('', port)
- server = server_class(server_address, handler_class)
- server.serve_forever()
- except:
- logger.error("Failed to start webserver")
- raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/__init__.py b/build/lib/Bcfg2/Server/Plugins/__init__.py
deleted file mode 100644
index c69c37452..000000000
--- a/build/lib/Bcfg2/Server/Plugins/__init__.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Imports for Bcfg2.Server.Plugins."""
-__revision__ = '$Revision$'
-
-__all__ = [
- 'Account',
- 'Base',
- 'Bundler',
- 'Bzr',
- 'Cfg',
- 'Cvs',
- 'Darcs',
- 'Decisions',
- 'Fossil',
- 'Git',
- 'GroupPatterns',
- 'Hg',
- 'Hostbase',
- 'Metadata',
- 'NagiosGen',
- 'Ohai',
- 'Packages',
- 'Properties',
- 'Probes',
- 'Pkgmgr',
- 'Rules',
- 'SSHbase',
- 'Snapshots',
- 'Statistics',
- 'Svcmgr',
- 'Svn',
- 'TCheetah',
- 'Trigger',
- 'SGenshi',
- 'TGenshi',
- ]