summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Server/Plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Server/Plugins')
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Account.py101
-rw-r--r--src/lib/Bcfg2/Server/Plugins/BB.py83
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Base.py43
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py99
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bzr.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg.py283
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cvs.py46
-rw-r--r--src/lib/Bcfg2/Server/Plugins/DBStats.py113
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Darcs.py48
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Decisions.py66
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Defaults.py53
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Deps.py134
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Editor.py80
-rw-r--r--src/lib/Bcfg2/Server/Plugins/FileProbes.py230
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Fossil.py51
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Git.py44
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupPatterns.py124
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Guppy.py62
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hg.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hostbase.py593
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py245
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py866
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py151
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py92
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py139
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py343
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py120
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesConfig.py15
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py97
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py278
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py688
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py263
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Pkgmgr.py169
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py285
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py76
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Rules.py55
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SGenshi.py97
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py413
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSLCA.py274
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Snapshots.py142
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Statistics.py161
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svcmgr.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svn.py46
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svn2.py125
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TCheetah.py80
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TGenshi.py135
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py83
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Trigger.py43
-rw-r--r--src/lib/Bcfg2/Server/Plugins/__init__.py34
49 files changed, 7858 insertions, 0 deletions
diff --git a/src/lib/Bcfg2/Server/Plugins/Account.py b/src/lib/Bcfg2/Server/Plugins/Account.py
new file mode 100644
index 000000000..f2703dccb
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Account.py
@@ -0,0 +1,101 @@
+"""This handles authentication setup."""
+
+import Bcfg2.Server.Plugin
+
+
+class Account(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Generator):
+ """This module generates account config files,
+ based on an internal data repo:
+ static.(passwd|group|limits.conf) -> static entries
+ dyn.(passwd|group) -> dynamic entries (usually acquired from yp or somesuch)
+ useraccess -> users to be granted login access on some hosts
+ superusers -> users to be granted root privs on all hosts
+ rootlike -> users to be granted root privs on some hosts
+
+ """
+ name = 'Account'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ self.Entries = {'ConfigFile': {'/etc/passwd': self.from_yp_cb,
+ '/etc/group': self.from_yp_cb,
+ '/etc/security/limits.conf': self.gen_limits_cb,
+ '/root/.ssh/authorized_keys': self.gen_root_keys_cb,
+ '/etc/sudoers': self.gen_sudoers}}
+ try:
+ self.repository = Bcfg2.Server.Plugin.DirectoryBacked(self.data,
+ self.core.fam)
+ except:
+ self.logger.error("Failed to load repos: %s, %s" % \
+ (self.data, "%s/ssh" % (self.data)))
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def from_yp_cb(self, entry, metadata):
+ """Build password file from cached yp data."""
+ fname = entry.attrib['name'].split('/')[-1]
+ entry.text = self.repository.entries["static.%s" % (fname)].data
+ entry.text += self.repository.entries["dyn.%s" % (fname)].data
+ perms = {'owner': 'root',
+ 'group': 'root',
+ 'perms': '0644'}
+ [entry.attrib.__setitem__(key, value) for (key, value) in \
+ list(perms.items())]
+
+ def gen_limits_cb(self, entry, metadata):
+ """Build limits entries based on current ACLs."""
+ entry.text = self.repository.entries["static.limits.conf"].data
+ superusers = self.repository.entries["superusers"].data.split()
+ useraccess = [line.split(':') for line in \
+ self.repository.entries["useraccess"].data.split()]
+ users = [user for (user, host) in \
+ useraccess if host == metadata.hostname.split('.')[0]]
+ perms = {'owner': 'root',
+ 'group': 'root',
+ 'perms': '0600'}
+ [entry.attrib.__setitem__(key, value) for (key, value) in \
+ list(perms.items())]
+ entry.text += "".join(["%s hard maxlogins 1024\n" % uname for uname in superusers + users])
+ if "*" not in users:
+ entry.text += "* hard maxlogins 0\n"
+
+ def gen_root_keys_cb(self, entry, metadata):
+ """Build root authorized keys file based on current ACLs."""
+ superusers = self.repository.entries['superusers'].data.split()
+ try:
+ rootlike = [line.split(':', 1) for line in \
+ self.repository.entries['rootlike'].data.split()]
+ superusers += [user for (user, host) in rootlike \
+ if host == metadata.hostname.split('.')[0]]
+ except:
+ pass
+ rdata = self.repository.entries
+ entry.text = "".join([rdata["%s.key" % user].data for user \
+ in superusers if \
+ ("%s.key" % user) in rdata])
+ perms = {'owner': 'root',
+ 'group': 'root',
+ 'perms': '0600'}
+ [entry.attrib.__setitem__(key, value) for (key, value) \
+ in list(perms.items())]
+
+ def gen_sudoers(self, entry, metadata):
+ """Build root authorized keys file based on current ACLs."""
+ superusers = self.repository.entries['superusers'].data.split()
+ try:
+ rootlike = [line.split(':', 1) for line in \
+ self.repository.entries['rootlike'].data.split()]
+ superusers += [user for (user, host) in rootlike \
+ if host == metadata.hostname.split('.')[0]]
+ except:
+ pass
+ entry.text = self.repository.entries['static.sudoers'].data
+ entry.text += "".join(["%s ALL=(ALL) ALL\n" % uname \
+ for uname in superusers])
+ perms = {'owner': 'root',
+ 'group': 'root',
+ 'perms': '0440'}
+ [entry.attrib.__setitem__(key, value) for (key, value) \
+ in list(perms.items())]
diff --git a/src/lib/Bcfg2/Server/Plugins/BB.py b/src/lib/Bcfg2/Server/Plugins/BB.py
new file mode 100644
index 000000000..c015ec47c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/BB.py
@@ -0,0 +1,83 @@
+import lxml.etree
+import Bcfg2.Server.Plugin
+import glob
+import os
+import socket
+
+#manage boot symlinks
+ #add statistics check to do build->boot mods
+
+#map profiles: first array is not empty we replace the -p with a determined profile.
+logger = Bcfg2.Server.Plugin.logger
+
+class BBfile(Bcfg2.Server.Plugin.XMLFileBacked):
+ """Class for bb files."""
+ def Index(self):
+ """Build data into an xml object."""
+
+ try:
+ self.data = lxml.etree.XML(self.data)
+ except lxml.etree.XMLSyntaxError:
+ Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
+ return
+ self.tftppath = self.data.get('tftp', '/tftpboot')
+ self.macs = {}
+ self.users = {}
+ self.actions = {}
+ self.bootlinks = []
+
+ for node in self.data.findall('Node'):
+ iface = node.find('Interface')
+ if iface != None:
+ mac = "01-%s" % (iface.get('mac'.replace(':','-').lower()))
+ self.actions[node.get('name')] = node.get('action')
+ self.bootlinks.append((mac, node.get('action')))
+ try:
+ ip = socket.gethostbyname(node.get('name'))
+ except:
+ logger.error("failed host resolution for %s" % node.get('name'))
+
+ self.macs[node.get('name')] = (iface.get('mac'), ip)
+ else:
+ logger.error("%s" % lxml.etree.tostring(node))
+ self.users[node.get('name')] = node.get('user',"").split(':')
+
+ def enforce_bootlinks(self):
+ for mac, target in self.bootlinks:
+ path = self.tftppath + '/' + mac
+ if not os.path.islink(path):
+ logger.error("Boot file %s not a link" % path)
+ if target != os.readlink(path):
+ try:
+ os.unlink(path)
+ os.symlink(target, path)
+ except:
+ logger.error("Failed to modify link %s" % path)
+
+class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
+ __child__ = BBfile
+
+
+class BB(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector):
+ """The BB plugin maps users to machines and metadata to machines."""
+ name = 'BB'
+ deprecated = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.store = BBDirectoryBacked(self.data, core.fam)
+
+ def get_additional_data(self, metadata):
+
+ users = {}
+ for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []):
+ pubkeys = []
+ for fname in glob.glob('/home/%s/.ssh/*.pub'%user):
+ pubkeys.append(open(fname).read())
+
+ users[user] = pubkeys
+
+ return dict([('users', users),
+ ('macs', self.store.entries['bb.xml'].macs)])
diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py
new file mode 100644
index 000000000..389ca7a95
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Base.py
@@ -0,0 +1,43 @@
+"""This module sets up a base list of configuration entries."""
+
+import copy
+import lxml.etree
+import sys
+# py3k compatibility
+if sys.hexversion >= 0x03000000:
+ from functools import reduce
+
+import Bcfg2.Server.Plugin
+
+
+class Base(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure,
+ Bcfg2.Server.Plugin.XMLDirectoryBacked):
+ """This Structure is good for the pile of independent configs
+ needed for most actual systems.
+ """
+ name = 'Base'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = Bcfg2.Server.Plugin.StructFile
+ deprecated = True
+
+ """Base creates independent clauses based on client metadata."""
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ try:
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self,
+ self.data,
+ self.core.fam)
+ except OSError:
+ self.logger.error("Failed to load Base repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def BuildStructures(self, metadata):
+ """Build structures for client described by metadata."""
+ ret = lxml.etree.Element("Independent", version='2.0')
+ fragments = reduce(lambda x, y: x + y,
+ [base.Match(metadata) for base
+ in list(self.entries.values())], [])
+ [ret.append(copy.copy(frag)) for frag in fragments]
+ return [ret]
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
new file mode 100644
index 000000000..ccb99481e
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -0,0 +1,99 @@
+"""This provides bundle clauses with translation functionality."""
+
+import copy
+import lxml.etree
+import os
+import os.path
+import re
+import sys
+
+import Bcfg2.Server.Plugin
+
+try:
+ import genshi.template
+ import genshi.template.base
+ import Bcfg2.Server.Plugins.SGenshi
+ have_genshi = True
+except:
+ have_genshi = False
+
+
+class BundleFile(Bcfg2.Server.Plugin.StructFile):
+
+ def get_xml_value(self, metadata):
+ bundlename = os.path.splitext(os.path.basename(self.name))[0]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ [bundle.append(copy.copy(item)) for item in self.Match(metadata)]
+ return bundle
+
+
+class Bundler(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure,
+ Bcfg2.Server.Plugin.XMLDirectoryBacked):
+ """The bundler creates dependent clauses based on the
+ bundle/translation scheme from Bcfg1.
+ """
+ name = 'Bundler'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ patterns = re.compile('^(?P<name>.*)\.(xml|genshi)$')
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ self.encoding = core.encoding
+ self.__child__ = self.template_dispatch
+ try:
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self,
+ self.data,
+ self.core.fam)
+ except OSError:
+ self.logger.error("Failed to load Bundle repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def template_dispatch(self, name):
+ bundle = lxml.etree.parse(name)
+ nsmap = bundle.getroot().nsmap
+ if name.endswith('.xml'):
+ if have_genshi and \
+ (nsmap == {'py': 'http://genshi.edgewall.org/'}):
+ # allow for genshi bundles with .xml extensions
+ spec = Bcfg2.Server.Plugin.Specificity()
+ return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
+ spec,
+ self.encoding)
+ else:
+ return BundleFile(name)
+ elif name.endswith('.genshi'):
+ if have_genshi:
+ spec = Bcfg2.Server.Plugin.Specificity()
+ return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
+ spec,
+ self.encoding)
+
+ def BuildStructures(self, metadata):
+ """Build all structures for client (metadata)."""
+ bundleset = []
+
+ bundle_entries = {}
+ for key, item in self.entries.items():
+ bundle_entries.setdefault(self.patterns.match(os.path.basename(key)).group('name'),
+ []).append(item)
+
+ for bundlename in metadata.bundles:
+ try:
+ entries = bundle_entries[bundlename]
+ except KeyError:
+ self.logger.error("Bundler: Bundle %s does not exist" %
+ bundlename)
+ continue
+ try:
+ bundleset.append(entries[0].get_xml_value(metadata))
+ except genshi.template.base.TemplateError:
+ t = sys.exc_info()[1]
+ self.logger.error("Bundler: Failed to template genshi bundle %s"
+ % bundlename)
+ self.logger.error(t)
+ except:
+ self.logger.error("Bundler: Unexpected bundler error for %s" %
+ bundlename, exc_info=1)
+ return bundleset
diff --git a/src/lib/Bcfg2/Server/Plugins/Bzr.py b/src/lib/Bcfg2/Server/Plugins/Bzr.py
new file mode 100644
index 000000000..a71021cb5
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Bzr.py
@@ -0,0 +1,35 @@
+import Bcfg2.Server.Plugin
+from bzrlib.workingtree import WorkingTree
+from bzrlib import errors
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Bzr')
+
+class Bzr(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Bzr is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Bzr'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # Read revision from bcfg2 repo
+ revision = self.get_revision()
+
+ logger.debug("Initialized Bazaar plugin with directory = %(dir)s at revision = %(rev)s" % {'dir': datastore, 'rev': revision})
+
+ def get_revision(self):
+ """Read Bazaar revision information for the Bcfg2 repository."""
+ try:
+ working_tree = WorkingTree.open(self.datastore)
+ revision = str(working_tree.branch.revno())
+ if working_tree.has_changes(working_tree.basis_tree()) or working_tree.unknowns():
+ revision += "+"
+ except errors.NotBranchError:
+ logger.error("Failed to read Bazaar branch; disabling Bazaar support")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg.py b/src/lib/Bcfg2/Server/Plugins/Cfg.py
new file mode 100644
index 000000000..8ec31bbae
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg.py
@@ -0,0 +1,283 @@
+"""This module implements a config file repository."""
+
+import binascii
+import logging
+import lxml
+import operator
+import os
+import os.path
+import re
+import stat
+import sys
+import tempfile
+from subprocess import Popen, PIPE
+from Bcfg2.Bcfg2Py3k import u_str
+
+import Bcfg2.Server.Plugin
+
+try:
+ import genshi.core
+ import genshi.input
+ from genshi.template import TemplateLoader, NewTextTemplate
+ have_genshi = True
+except:
+ have_genshi = False
+
+try:
+ import Cheetah.Template
+ import Cheetah.Parser
+ have_cheetah = True
+except:
+ have_cheetah = False
+
+# setup logging
+logger = logging.getLogger('Bcfg2.Plugins.Cfg')
+
+
+# snipped from TGenshi
+def removecomment(stream):
+ """A genshi filter that removes comments from the stream."""
+ for kind, data, pos in stream:
+ if kind is genshi.core.COMMENT:
+ continue
+ yield kind, data, pos
+
+
+def process_delta(data, delta):
+ if not delta.specific.delta:
+ return data
+ if delta.specific.delta == 'cat':
+ datalines = data.strip().split('\n')
+ for line in delta.data.split('\n'):
+ if not line:
+ continue
+ if line[0] == '+':
+ datalines.append(line[1:])
+ elif line[0] == '-':
+ if line[1:] in datalines:
+ datalines.remove(line[1:])
+ return "\n".join(datalines) + "\n"
+ elif delta.specific.delta == 'diff':
+ basehandle, basename = tempfile.mkstemp()
+ basefile = open(basename, 'w')
+ basefile.write(data)
+ basefile.close()
+ os.close(basehandle)
+
+ cmd = ["patch", "-u", "-f", basefile.name]
+ patch = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ stderr = patch.communicate(input=delta.data)[1]
+ ret = patch.wait()
+ output = open(basefile.name, 'r').read()
+ os.unlink(basefile.name)
+ if ret >> 8 != 0:
+ logger.error("Error applying diff %s: %s" % (delta.name, stderr))
+ raise Bcfg2.Server.Plugin.PluginExecutionError('delta', delta)
+ return output
+
+
+class CfgMatcher:
+
+ def __init__(self, fname):
+ name = re.escape(fname)
+ self.basefile_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+?)|.G(?P<prio>\d+)_(?P<group>\S+?))((?P<genshi>\\.genshi)|(?P<cheetah>\\.cheetah))?$' % name)
+ self.delta_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|\\.G(?P<prio>\d+)_(?P<group>\S+))\\.(?P<delta>(cat|diff))$' % name)
+ self.cat_count = fname.count(".cat")
+ self.diff_count = fname.count(".diff")
+
+ def match(self, fname):
+ if fname.count(".cat") > self.cat_count \
+ or fname.count('.diff') > self.diff_count:
+ return self.delta_reg.match(fname)
+ return self.basefile_reg.match(fname)
+
+
+class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
+
+ def __init__(self, basename, path, entry_type, encoding):
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path,
+ entry_type, encoding)
+ self.specific = CfgMatcher(path.split('/')[-1])
+ path = path
+
+ def debug_log(self, message, flag=None):
+ if (flag is None and self.debug_flag) or flag:
+ logger.error(message)
+
+ def sort_by_specific(self, one, other):
+ return cmp(one.specific, other.specific)
+
+ def get_pertinent_entries(self, entry, metadata):
+ """return a list of all entries pertinent
+ to a client => [base, delta1, delta2]
+ """
+ matching = [ent for ent in list(self.entries.values()) if \
+ ent.specific.matches(metadata)]
+ matching.sort(key=operator.attrgetter('specific'))
+ # base entries which apply to a client
+ # (e.g. foo, foo.G##_groupname, foo.H_hostname)
+ base_files = [matching.index(m) for m in matching
+ if not m.specific.delta]
+ if not base_files:
+ logger.error("No base file found for %s" % entry.get('name'))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ base = min(base_files)
+ used = matching[:base + 1]
+ used.reverse()
+ return used
+
+ def bind_entry(self, entry, metadata):
+ self.bind_info_to_entry(entry, metadata)
+ used = self.get_pertinent_entries(entry, metadata)
+ basefile = used.pop(0)
+ if entry.get('perms').lower() == 'inherit':
+ # use on-disk permissions
+ fname = "%s/%s" % (self.path, entry.get('name'))
+ entry.set('perms',
+ str(oct(stat.S_IMODE(os.stat(fname).st_mode))))
+ if entry.tag == 'Path':
+ entry.set('type', 'file')
+ if basefile.name.endswith(".genshi"):
+ if not have_genshi:
+ logger.error("Cfg: Genshi is not available")
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ template_cls = NewTextTemplate
+ loader = TemplateLoader()
+ template = loader.load(basefile.name, cls=template_cls,
+ encoding=self.encoding)
+ fname = entry.get('realname', entry.get('name'))
+ stream = template.generate(name=fname,
+ metadata=metadata,
+ path=basefile.name).filter(removecomment)
+ try:
+ data = stream.render('text', encoding=self.encoding,
+ strip_whitespace=False)
+ except TypeError:
+ data = stream.render('text', encoding=self.encoding)
+ if data == '':
+ entry.set('empty', 'true')
+ except Exception:
+ e = sys.exc_info()[1]
+ logger.error("Cfg: genshi exception: %s" % e)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ elif basefile.name.endswith(".cheetah"):
+ if not have_cheetah:
+ logger.error("Cfg: Cheetah is not available")
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ fname = entry.get('realname', entry.get('name'))
+ s = {'useStackFrames': False}
+ template = Cheetah.Template.Template(open(basefile.name).read(),
+ compilerSettings=s)
+ template.metadata = metadata
+ template.path = fname
+ template.source_path = basefile.name
+ data = template.respond()
+ if data == '':
+ entry.set('empty', 'true')
+ except Exception:
+ e = sys.exc_info()[1]
+ logger.error("Cfg: cheetah exception: %s" % e)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ else:
+ data = basefile.data
+ for delta in used:
+ data = process_delta(data, delta)
+ if entry.get('encoding') == 'base64':
+ entry.text = binascii.b2a_base64(data)
+ else:
+ try:
+ entry.text = u_str(data, self.encoding)
+ except UnicodeDecodeError:
+ e = sys.exc_info()[1]
+ logger.error("Failed to decode %s: %s" % (entry.get('name'), e))
+ logger.error("Please verify you are using the proper encoding.")
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ except ValueError:
+ e = sys.exc_info()[1]
+ logger.error("Error in specification for %s" % entry.get('name'))
+ logger.error("%s" % e)
+ logger.error("You need to specify base64 encoding for %s." %
+ entry.get('name'))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ if entry.text in ['', None]:
+ entry.set('empty', 'true')
+
+ def list_accept_choices(self, entry, metadata):
+ '''return a list of candidate pull locations'''
+ used = self.get_pertinent_entries(entry, metadata)
+ ret = []
+ if used:
+ ret.append(used[0].specific)
+ if not ret[0].hostname:
+ ret.append(Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname))
+ return ret
+
+ def build_filename(self, specific):
+ bfname = self.path + '/' + self.path.split('/')[-1]
+ if specific.all:
+ return bfname
+ elif specific.group:
+ return "%s.G%02d_%s" % (bfname, specific.prio, specific.group)
+ elif specific.hostname:
+ return "%s.H_%s" % (bfname, specific.hostname)
+
+ def write_update(self, specific, new_entry, log):
+ if 'text' in new_entry:
+ name = self.build_filename(specific)
+ if os.path.exists("%s.genshi" % name):
+ logger.error("Cfg: Unable to pull data for genshi types")
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ elif os.path.exists("%s.cheetah" % name):
+ logger.error("Cfg: Unable to pull data for cheetah types")
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ etext = new_entry['text'].encode(self.encoding)
+ except:
+ logger.error("Cfg: Cannot encode content of %s as %s" % (name, self.encoding))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ open(name, 'w').write(etext)
+ self.debug_log("Wrote file %s" % name, flag=log)
+ badattr = [attr for attr in ['owner', 'group', 'perms']
+ if attr in new_entry]
+ if badattr:
+ # check for info files and inform user of their removal
+ if os.path.exists(self.path + "/:info"):
+ logger.info("Removing :info file and replacing with "
+ "info.xml")
+ os.remove(self.path + "/:info")
+ if os.path.exists(self.path + "/info"):
+ logger.info("Removing info file and replacing with "
+ "info.xml")
+ os.remove(self.path + "/info")
+ metadata_updates = {}
+ metadata_updates.update(self.metadata)
+ for attr in badattr:
+ metadata_updates[attr] = new_entry.get(attr)
+ infoxml = lxml.etree.Element('FileInfo')
+ infotag = lxml.etree.SubElement(infoxml, 'Info')
+ [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \
+ for attr in metadata_updates]
+ ofile = open(self.path + "/info.xml", "w")
+ ofile.write(lxml.etree.tostring(infoxml, pretty_print=True))
+ ofile.close()
+ self.debug_log("Wrote file %s" % (self.path + "/info.xml"),
+ flag=log)
+
+
+class Cfg(Bcfg2.Server.Plugin.GroupSpool,
+ Bcfg2.Server.Plugin.PullTarget):
+ """This generator in the configuration file repository for Bcfg2."""
+ name = 'Cfg'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ es_cls = CfgEntrySet
+ es_child_cls = Bcfg2.Server.Plugin.SpecificData
+
+ def AcceptChoices(self, entry, metadata):
+ return self.entries[entry.get('name')].list_accept_choices(entry, metadata)
+
+ def AcceptPullData(self, specific, new_entry, log):
+ return self.entries[new_entry.get('name')].write_update(specific,
+ new_entry,
+ log)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cvs.py b/src/lib/Bcfg2/Server/Plugins/Cvs.py
new file mode 100644
index 000000000..6ce72acd2
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cvs.py
@@ -0,0 +1,46 @@
+import os
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Cvs')
+
+class Cvs(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """CVS is a version plugin for dealing with Bcfg2 repository."""
+ name = 'Cvs'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # path to cvs directory for Bcfg2 repo
+ cvs_dir = "%s/CVSROOT" % datastore
+
+ # Read revision from Bcfg2 repo
+ if os.path.isdir(cvs_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not a directory" % cvs_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized cvs plugin with cvs directory = %s" % cvs_dir)
+
+ def get_revision(self):
+ """Read cvs revision information for the Bcfg2 repository."""
+ try:
+ data = Popen("env LC_ALL=C cvs log",
+ shell=True,
+ cwd=self.datastore,
+ stdout=PIPE).stdout.readlines()
+ revision = data[3].strip('\n')
+ except IndexError:
+ logger.error("Failed to read cvs log; disabling cvs support")
+ logger.error('''Ran command "cvs log %s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
diff --git a/src/lib/Bcfg2/Server/Plugins/DBStats.py b/src/lib/Bcfg2/Server/Plugins/DBStats.py
new file mode 100644
index 000000000..95395f74e
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/DBStats.py
@@ -0,0 +1,113 @@
+import binascii
+import difflib
+import logging
+import lxml.etree
+import platform
+import time
+
+try:
+ from django.core.exceptions import MultipleObjectsReturned
+except ImportError:
+ pass
+
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Reports.importscript
+from Bcfg2.Server.Reports.reports.models import Client
+import Bcfg2.Server.Reports.settings
+from Bcfg2.Server.Reports.updatefix import update_database
+# for debugging output only
+logger = logging.getLogger('Bcfg2.Plugins.DBStats')
+
+class DBStats(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ThreadedStatistics,
+ Bcfg2.Server.Plugin.PullSource):
+ name = 'DBStats'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.PullSource.__init__(self)
+ self.cpath = "%s/Metadata/clients.xml" % datastore
+ self.core = core
+ logger.debug("Searching for new models to add to the statistics database")
+ try:
+ update_database()
+ except Exception:
+ inst = sys.exc_info()[1]
+ logger.debug(str(inst))
+ logger.debug(str(type(inst)))
+
+ def handle_statistic(self, metadata, data):
+ newstats = data.find("Statistics")
+ newstats.set('time', time.asctime(time.localtime()))
+ # ick
+ data = lxml.etree.tostring(newstats)
+ ndx = lxml.etree.XML(data)
+ e = lxml.etree.Element('Node', name=metadata.hostname)
+ e.append(ndx)
+ container = lxml.etree.Element("ConfigStatistics")
+ container.append(e)
+
+ # FIXME need to build a metadata interface to expose a list of clients
+ start = time.time()
+ for i in [1, 2, 3]:
+ try:
+ Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clients_xml.xdata,
+ container,
+ self.core.encoding,
+ 0,
+ logger,
+ True,
+ platform.node())
+ logger.info("Imported data for %s in %s seconds" \
+ % (metadata.hostname, time.time() - start))
+ return
+ except MultipleObjectsReturned:
+ e = sys.exc_info()[1]
+ logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \
+ (metadata.hostname, e))
+ logger.error("DBStats: Data is inconsistent")
+ break
+ except:
+ logger.error("DBStats: Failed to write to db (lock); retrying",
+ exc_info=1)
+ logger.error("DBStats: Retry limit failed for %s; aborting operation" \
+ % metadata.hostname)
+
+ def GetExtra(self, client):
+ c_inst = Client.objects.filter(name=client)[0]
+ return [(a.entry.kind, a.entry.name) for a in
+ c_inst.current_interaction.extra()]
+
+ def GetCurrentEntry(self, client, e_type, e_name):
+ try:
+ c_inst = Client.objects.filter(name=client)[0]
+ except IndexError:
+ self.logger.error("Unknown client: %s" % client)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ result = c_inst.current_interaction.bad().filter(entry__kind=e_type,
+ entry__name=e_name)
+ if not result:
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ entry = result[0]
+ ret = []
+ data = ('owner', 'group', 'perms')
+ for t in data:
+ if getattr(entry.reason, "current_%s" % t) == '':
+ ret.append(getattr(entry.reason, t))
+ else:
+ ret.append(getattr(entry.reason, "current_%s" % t))
+ if entry.reason.is_sensitive:
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ elif entry.reason.current_diff != '':
+ if entry.reason.is_binary:
+ ret.append(binascii.a2b_base64(entry.reason.current_diff))
+ else:
+ ret.append('\n'.join(difflib.restore(\
+ entry.reason.current_diff.split('\n'), 1)))
+ elif entry.reason.is_binary:
+ # If len is zero the object was too large to store
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ else:
+ ret.append(None)
+ return ret
diff --git a/src/lib/Bcfg2/Server/Plugins/Darcs.py b/src/lib/Bcfg2/Server/Plugins/Darcs.py
new file mode 100644
index 000000000..9fb9ff4f1
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Darcs.py
@@ -0,0 +1,48 @@
+import os
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Darcs')
+
+class Darcs(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Darcs is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Darcs'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # path to darcs directory for bcfg2 repo
+ darcs_dir = "%s/_darcs" % datastore
+
+ # Read changeset from bcfg2 repo
+ if os.path.isdir(darcs_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not present." % darcs_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized Darcs plugin with darcs directory = %s" % darcs_dir)
+
+ def get_revision(self):
+ """Read Darcs changeset information for the Bcfg2 repository."""
+ try:
+ data = Popen("env LC_ALL=C darcs changes",
+ shell=True,
+ cwd=self.datastore,
+ stdout=PIPE).stdout.readlines()
+ revision = data[0].strip('\n')
+ except:
+ logger.error("Failed to read darcs repository; disabling Darcs support")
+ logger.error('''Ran command "darcs changes" from directory "%s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py
new file mode 100644
index 000000000..b432474f2
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py
@@ -0,0 +1,66 @@
+import logging
+import lxml.etree
+import sys
+
+import Bcfg2.Server.Plugin
+logger = logging.getLogger('Bcfg2.Plugins.Decisions')
+
+class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
+ def handle_event(self, event):
+ Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
+ self.contents = lxml.etree.XML(self.data)
+
+ def get_decisions(self):
+ return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')]
+
+class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
+ def __init__(self, path, fam, encoding):
+ """Container for decision specification files.
+
+ Arguments:
+ - `path`: repository path
+ - `fam`: reference to the file monitor
+ - `encoding`: XML character encoding
+
+ """
+ pattern = '(white|black)list'
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \
+ DecisionFile, encoding)
+ try:
+ fam.AddMonitor(path, self)
+ except OSError:
+ e = sys.exc_info()[1]
+ logger.error('Adding filemonitor for %s failed. '
+ 'Make sure directory exists' % path)
+ raise Bcfg2.Server.Plugin.PluginInitError(e)
+
+ def HandleEvent(self, event):
+ if event.filename != self.path:
+ return self.handle_event(event)
+
+ def GetDecisions(self, metadata, mode):
+ ret = []
+ candidates = [c for c in self.get_matching(metadata)
+ if c.name.split('/')[-1].startswith(mode)]
+ for c in candidates:
+ ret += c.get_decisions()
+ return ret
+
+class Decisions(DecisionSet,
+ Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Decision):
+ name = 'Decisions'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ """Decisions plugins
+
+ Arguments:
+ - `core`: Bcfg2.Core instance
+ - `datastore`: File repository location
+
+ """
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Decision.__init__(self)
+ DecisionSet.__init__(self, self.data, core.fam, core.encoding)
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Defaults.py b/src/lib/Bcfg2/Server/Plugins/Defaults.py
new file mode 100644
index 000000000..718192e2a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Defaults.py
@@ -0,0 +1,53 @@
+"""This generator provides rule-based entry mappings."""
+
+import re
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Plugins.Rules
+
+class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
+ Bcfg2.Server.Plugin.StructureValidator):
+ """Set default attributes on bound entries"""
+ name = 'Defaults'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ # Rules is a Generator that happens to implement all of the
+ # functionality we want, so we overload it, but Defaults should
+ # _not_ handle any entries; it does its stuff in the structure
+ # validation phase. so we overload Handle(s)Entry and HandleEvent
+ # to ensure that Defaults handles no entries, even though it's a
+ # Generator.
+
+ def HandlesEntry(self, entry, metadata):
+ return False
+
+ def HandleEntry(self, entry, metadata):
+ raise PluginExecutionError
+
+ def HandleEvent(self, event):
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event)
+
+ def validate_structures(self, metadata, structures):
+ """ Apply defaults """
+ for struct in structures:
+ for entry in struct.iter():
+ if entry.tag.startswith("Bound"):
+ is_bound = True
+ entry.tag = entry.tag[5:]
+ else:
+ is_bound = False
+ try:
+ try:
+ self.BindEntry(entry, metadata)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ # either no matching defaults (which is okay),
+ # or multiple matching defaults (which is not
+ # okay, but is logged). either way, we don't
+ # care about the error.
+ pass
+ finally:
+ if is_bound:
+ entry.tag = "Bound" + entry.tag
+
+ def _regex_enabled(self):
+ """ Defaults depends on regex matching, so force it enabled """
+ return True
diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py
new file mode 100644
index 000000000..9b848baae
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Deps.py
@@ -0,0 +1,134 @@
+"""This plugin provides automatic dependency handling."""
+
+import lxml.etree
+
+import Bcfg2.Server.Plugin
+
+
+class DNode(Bcfg2.Server.Plugin.INode):
+ """DNode provides supports for single predicate types for dependencies."""
+ raw = {'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"}
+ containers = ['Group']
+
+ def __init__(self, data, idict, parent=None):
+ self.data = data
+ self.contents = {}
+ if parent == None:
+ self.predicate = lambda x, d: True
+ else:
+ predicate = parent.predicate
+ if data.tag in list(self.raw.keys()):
+ self.predicate = eval(self.raw[data.tag] %
+ {'name': data.get('name')},
+ {'predicate': predicate})
+ else:
+ raise Exception
+ mytype = self.__class__
+ self.children = []
+ for item in data.getchildren():
+ if item.tag in self.containers:
+ self.children.append(mytype(item, idict, self))
+ else:
+ data = [(child.tag, child.get('name'))
+ for child in item.getchildren()]
+ try:
+ self.contents[item.tag][item.get('name')] = data
+ except KeyError:
+ self.contents[item.tag] = {item.get('name'): data}
+
+
+class DepXMLSrc(Bcfg2.Server.Plugin.XMLSrc):
+ __node__ = DNode
+
+
+class Deps(Bcfg2.Server.Plugin.PrioDir,
+ Bcfg2.Server.Plugin.StructureValidator):
+ name = 'Deps'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = DepXMLSrc
+
+ # Override the default sort_order (of 500) so that this plugin
+ # gets handled after others running at the default. In particular,
+ # we want to run after Packages, so we can see the final set of
+ # packages that will be installed on the client.
+ sort_order = 750
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.StructureValidator.__init__(self)
+ self.cache = {}
+
+ def HandleEvent(self, event):
+ self.cache = {}
+ Bcfg2.Server.Plugin.PrioDir.HandleEvent(self, event)
+
+ def validate_structures(self, metadata, structures):
+ """Examine the passed structures and append any additional
+ prerequisite entries as defined by the files in Deps.
+ """
+ entries = []
+ for structure in structures:
+ for entry in structure.getchildren():
+ tag = entry.tag
+ if tag.startswith('Bound'):
+ tag = tag[5:]
+ if (tag, entry.get('name')) not in entries \
+ and not isinstance(entry, lxml.etree._Comment):
+ entries.append((tag, entry.get('name')))
+ entries.sort()
+ entries = tuple(entries)
+ gdata = list(metadata.groups)
+ gdata.sort()
+ gdata = tuple(gdata)
+
+ # Check to see if we have cached the prereqs already
+ if (entries, gdata) in self.cache:
+ prereqs = self.cache[(entries, gdata)]
+ else:
+ prereqs = self.calculate_prereqs(metadata, entries)
+ self.cache[(entries, gdata)] = prereqs
+
+ newstruct = lxml.etree.Element("Independent")
+ for tag, name in prereqs:
+ try:
+ lxml.etree.SubElement(newstruct, tag, name=name)
+ except:
+ self.logger.error("Failed to add dep entry for %s:%s" % (tag, name))
+ structures.append(newstruct)
+
+
+ def calculate_prereqs(self, metadata, entries):
+ """Calculate the prerequisites defined in Deps for the passed
+ set of entries.
+ """
+ prereqs = []
+ [src.Cache(metadata) for src in self.entries.values()]
+
+ toexamine = list(entries[:])
+ while toexamine:
+ entry = toexamine.pop()
+ matching = [src for src in list(self.entries.values())
+ if src.cache and entry[0] in src.cache[1]
+ and entry[1] in src.cache[1][entry[0]]]
+ if len(matching) > 1:
+ prio = [int(src.priority) for src in matching]
+ if prio.count(max(prio)) > 1:
+ self.logger.error("Found conflicting %s sources with same priority for %s, pkg %s" %
+ (entry[0].lower(), metadata.hostname, entry[1]))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ index = prio.index(max(prio))
+ matching = [matching[index]]
+ elif len(matching) == 1:
+ for prq in matching[0].cache[1][entry[0]][entry[1]]:
+ # XML comments seem to show up in the cache as a
+ # tuple with item 0 being callable. The logic
+ # below filters them out. Would be better to
+ # exclude them when we load the cache in the first
+ # place.
+ if prq not in prereqs and prq not in entries and not callable(prq[0]):
+ toexamine.append(prq)
+ prereqs.append(prq)
+ else:
+ continue
+
+ return prereqs
diff --git a/src/lib/Bcfg2/Server/Plugins/Editor.py b/src/lib/Bcfg2/Server/Plugins/Editor.py
new file mode 100644
index 000000000..c0d2cfbad
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Editor.py
@@ -0,0 +1,80 @@
+import Bcfg2.Server.Plugin
+import re
+import lxml.etree
+
+
+def linesub(pattern, repl, filestring):
+ """Substitutes instances of pattern with repl in filestring."""
+ if filestring == None:
+ filestring = ''
+ output = list()
+ fileread = filestring.split('\n')
+ for line in fileread:
+ output.append(re.sub(pattern, repl, filestring))
+ return '\n'.join(output)
+
+
+class EditDirectives(Bcfg2.Server.Plugin.SpecificData):
+ """This object handles the editing directives."""
+ def ProcessDirectives(self, input):
+ """Processes a list of edit directives on input."""
+ temp = input
+ for directive in self.data.split('\n'):
+ directive = directive.split(',')
+ temp = linesub(directive[0], directive[1], temp)
+ return temp
+
+
+class EditEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ def __init__(self, basename, path, entry_type, encoding):
+ self.ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|%s\.H_.*)$" % path.split('/')[-1])
+ Bcfg2.Server.Plugin.EntrySet.__init__(self,
+ basename,
+ path,
+ entry_type,
+ encoding)
+ self.inputs = dict()
+
+ def bind_entry(self, entry, metadata):
+ client = metadata.hostname
+ filename = entry.get('name')
+ permdata = {'owner': 'root',
+ 'group': 'root',
+ 'perms': '0644'}
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+ entry.text = self.entries['edits'].ProcessDirectives(self.get_client_data(client))
+ if not entry.text:
+ entry.set('empty', 'true')
+ try:
+ f = open('%s/%s.H_%s' % (self.path, filename.split('/')[-1], client), 'w')
+ f.write(entry.text)
+ f.close()
+ except:
+ pass
+
+ def get_client_data(self, client):
+ return self.inputs[client]
+
+
+class Editor(Bcfg2.Server.Plugin.GroupSpool,
+ Bcfg2.Server.Plugin.Probing):
+ name = 'Editor'
+ __author__ = 'bcfg2-dev@mcs.anl.gov'
+ filename_pattern = 'edits'
+ es_child_cls = EditDirectives
+ es_cls = EditEntrySet
+
+ def GetProbes(self, _):
+ '''Return a set of probes for execution on client'''
+ probelist = list()
+ for name in list(self.entries.keys()):
+ probe = lxml.etree.Element('probe')
+ probe.set('name', name)
+ probe.set('source', "Editor")
+ probe.text = "cat %s" % name
+ probelist.append(probe)
+ return probelist
+
+ def ReceiveData(self, client, datalist):
+ for data in datalist:
+ self.entries[data.get('name')].inputs[client.hostname] = data.text
diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
new file mode 100644
index 000000000..5beec7be0
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
@@ -0,0 +1,230 @@
+""" This module allows you to probe a client for a file, which is then
+added to the specification. On subsequent runs, the file will be
+replaced on the client if it is missing; if it has changed on the
+client, it can either be updated in the specification or replaced on
+the client """
+
+import os
+import sys
+import errno
+import binascii
+import lxml.etree
+import Bcfg2.Options
+import Bcfg2.Server.Plugin
+
+probecode = """#!/usr/bin/env python
+
+import os
+import pwd
+import grp
+import binascii
+import lxml.etree
+
+path = "%s"
+
+if not os.path.exists(path):
+ print "%%s does not exist" %% path
+ raise SystemExit(1)
+
+stat = os.stat(path)
+data = lxml.etree.Element("ProbedFileData",
+ name=path,
+ owner=pwd.getpwuid(stat[4])[0],
+ group=grp.getgrgid(stat[5])[0],
+ perms=oct(stat[0] & 07777))
+data.text = binascii.b2a_base64(open(path).read())
+print lxml.etree.tostring(data)
+"""
+
+class FileProbesConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
+ Bcfg2.Server.Plugin.StructFile):
+ """ Config file handler for FileProbes """
+ def __init__(self, filename, fam):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+
+
+class FileProbes(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Probing):
+ """ This module allows you to probe a client for a file, which is then
+ added to the specification. On subsequent runs, the file will be
+ replaced on the client if it is missing; if it has changed on the
+ client, it can either be updated in the specification or replaced on
+ the client """
+
+ name = 'FileProbes'
+ experimental = True
+ __author__ = 'chris.a.st.pierre@gmail.com'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+ self.config = FileProbesConfig(os.path.join(self.data, 'config.xml'),
+ core.fam)
+ self.entries = dict()
+ self.probes = dict()
+
+ def GetProbes(self, metadata):
+ """Return a set of probes for execution on client."""
+ if metadata.hostname not in self.probes:
+ cfg = self.core.plugins['Cfg']
+ self.entries[metadata.hostname] = dict()
+ self.probes[metadata.hostname] = []
+ for entry in self.config.Match(metadata):
+ path = entry.get("name")
+ # do not probe for files that are already in Cfg and
+ # for which update is false; we can't possibly do
+ # anything with the data we get from such a probe
+ try:
+ if (entry.get('update', 'false').lower() == "false" and
+ cfg.entries[path].get_pertinent_entries(entry,
+ metadata)):
+ continue
+ except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError):
+ pass
+ self.entries[metadata.hostname][path] = entry
+ probe = lxml.etree.Element('probe', name=path,
+ source=self.name,
+ interpreter="/usr/bin/env python")
+ probe.text = probecode % path
+ self.probes[metadata.hostname].append(probe)
+ self.debug_log("Adding file probe for %s to %s" %
+ (path, metadata.hostname))
+ return self.probes[metadata.hostname]
+
+ def ReceiveData(self, metadata, datalist):
+ """Receive data from probe."""
+ self.debug_log("Receiving file probe data from %s" % metadata.hostname)
+
+ for data in datalist:
+ if data.text is None:
+ self.logger.error("Got null response to %s file probe from %s" %
+ (data.get('name'), metadata.hostname))
+ else:
+ try:
+ self.write_data(lxml.etree.XML(data.text), metadata)
+ except lxml.etree.XMLSyntaxError:
+ # if we didn't get XML back from the probe, assume
+ # it's an error message
+ self.logger.error(data.text)
+
+ def write_data(self, data, metadata):
+ """Write the probed file data to the bcfg2 specification."""
+ filename = data.get("name")
+ contents = binascii.a2b_base64(data.text)
+ entry = self.entries[metadata.hostname][filename]
+ cfg = self.core.plugins['Cfg']
+ specific = "%s.H_%s" % (os.path.basename(filename), metadata.hostname)
+ # we can't use os.path.join() for this because specific
+ # already has a leading /, which confuses os.path.join()
+ fileloc = "%s%s" % (cfg.data, os.path.join(filename, specific))
+
+ create = False
+ try:
+ cfg.entries[filename].bind_entry(entry, metadata)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ create = True
+
+ # get current entry data
+ if entry.text and entry.get("encoding") == "base64":
+ entrydata = binascii.a2b_base64(entry.text)
+ else:
+ entrydata = entry.text
+
+ if create:
+ self.logger.info("Writing new probed file %s" % fileloc)
+ self.write_file(fileloc, contents)
+ self.verify_file(filename, contents, metadata)
+ infoxml = os.path.join("%s%s" % (cfg.data, filename), "info.xml")
+ self.write_infoxml(infoxml, entry, data)
+ elif entrydata == contents:
+ self.debug_log("Existing %s contents match probed contents" %
+ filename)
+ return
+ elif (entry.get('update', 'false').lower() == "true"):
+ self.logger.info("Writing updated probed file %s" % fileloc)
+ self.write_file(fileloc, contents)
+ self.verify_file(filename, contents, metadata)
+ else:
+ self.logger.info("Skipping updated probed file %s" % fileloc)
+ return
+
+ def write_file(self, fileloc, contents):
+ try:
+ os.makedirs(os.path.dirname(fileloc))
+ except OSError:
+ err = sys.exc_info()[1]
+ if err.errno == errno.EEXIST:
+ pass
+ else:
+ self.logger.error("Could not create parent directories for %s: "
+ "%s" % (fileloc, err))
+ return
+
+ try:
+ open(fileloc, 'wb').write(contents)
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Could not write %s: %s" % (fileloc, err))
+ return
+
+ def verify_file(self, filename, contents, metadata):
+ # Service the FAM events queued up by the key generation so
+ # the data structure entries will be available for binding.
+ #
+ # NOTE: We wait for up to ten seconds. There is some potential
+ # for race condition, because if the file monitor doesn't get
+ # notified about the new key files in time, those entries
+ # won't be available for binding. In practice, this seems
+ # "good enough".
+ entry = self.entries[metadata.hostname][filename]
+ cfg = self.core.plugins['Cfg']
+ tries = 0
+ updated = False
+ while not updated:
+ if tries >= 10:
+ self.logger.error("%s still not registered" % filename)
+ return
+ self.core.fam.handle_events_in_interval(1)
+ try:
+ cfg.entries[filename].bind_entry(entry, metadata)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ tries += 1
+ continue
+
+ # get current entry data
+ if entry.get("encoding") == "base64":
+ entrydata = binascii.a2b_base64(entry.text)
+ else:
+ entrydata = entry.text
+ if entrydata == contents:
+ updated = True
+ tries += 1
+
+ def write_infoxml(self, infoxml, entry, data):
+ """ write an info.xml for the file """
+ if os.path.exists(infoxml):
+ return
+
+ self.logger.info("Writing info.xml at %s for %s" %
+ (infoxml, data.get("name")))
+ info = \
+ lxml.etree.Element("Info",
+ owner=data.get("owner",
+ Bcfg2.Options.MDATA_OWNER.value),
+ group=data.get("group",
+ Bcfg2.Options.MDATA_GROUP.value),
+ perms=data.get("perms",
+ Bcfg2.Options.MDATA_PERMS.value),
+ encoding=entry.get("encoding",
+ Bcfg2.Options.ENCODING.value))
+
+ root = lxml.etree.Element("FileInfo")
+ root.append(info)
+ try:
+ open(infoxml, "w").write(lxml.etree.tostring(root,
+ pretty_print=True))
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Could not write %s: %s" % (fileloc, err))
+ return
diff --git a/src/lib/Bcfg2/Server/Plugins/Fossil.py b/src/lib/Bcfg2/Server/Plugins/Fossil.py
new file mode 100644
index 000000000..1b1627688
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Fossil.py
@@ -0,0 +1,51 @@
+import os
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Fossil')
+
+class Fossil(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Fossil is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Fossil'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # path to fossil file for bcfg2 repo
+ fossil_file = "%s/_FOSSIL_" % datastore
+
+ # Read revision from bcfg2 repo
+ if os.path.isfile(fossil_file):
+ revision = self.get_revision()
+ elif not os.path.isdir(datastore):
+ logger.error("%s is not a directory" % datastore)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ else:
+ logger.error("%s is not a file" % fossil_file)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized Fossil.py plugin with %(ffile)s at revision %(frev)s" \
+ % {'ffile': fossil_file, 'frev': revision})
+
+ def get_revision(self):
+ """Read fossil revision information for the Bcfg2 repository."""
+ try:
+ data = Popen("env LC_ALL=C fossil info",
+ shell=True,
+ cwd=self.datastore,
+ stdout=PIPE).stdout.readlines()
+ revline = [line.split(': ')[1].strip() for line in data if \
+ line.split(': ')[0].strip() == 'checkout'][-1]
+ revision = revline.split(' ')[0]
+ except IndexError:
+ logger.error("Failed to read fossil info; disabling fossil support")
+ logger.error('''Ran command "fossil info" from directory "%s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
diff --git a/src/lib/Bcfg2/Server/Plugins/Git.py b/src/lib/Bcfg2/Server/Plugins/Git.py
new file mode 100644
index 000000000..8f8ea87f1
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Git.py
@@ -0,0 +1,44 @@
+"""The Git plugin provides a revision interface for Bcfg2 repos using git."""
+
+import os
+from dulwich.repo import Repo
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Git')
+
+
+class Git(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Git is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Git'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # path to git directory for bcfg2 repo
+ git_dir = "%s/.git" % datastore
+
+ # Read revision from bcfg2 repo
+ if os.path.isdir(git_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not a directory" % git_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized git plugin with git directory %s" % git_dir)
+
+ def get_revision(self):
+ """Read git revision information for the Bcfg2 repository."""
+ try:
+ repo = Repo(self.datastore)
+ revision = repo.head()
+ except:
+ logger.error("Failed to read git repository; disabling git support")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
new file mode 100644
index 000000000..58b4d4afb
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -0,0 +1,124 @@
+import re
+import logging
+import lxml.etree
+import Bcfg2.Server.Plugin
+
+class PackedDigitRange(object):
+ def __init__(self, digit_range):
+ self.sparse = list()
+ self.ranges = list()
+ for item in digit_range.split(','):
+ if '-' in item:
+ self.ranges.append(tuple([int(x) for x in item.split('-')]))
+ else:
+ self.sparse.append(int(item))
+
+ def includes(self, other):
+ iother = int(other)
+ if iother in self.sparse:
+ return True
+ for (start, end) in self.ranges:
+ if iother in range(start, end + 1):
+ return True
+ return False
+
+
+class PatternMap(object):
+ range_finder = r'\[\[[\d\-,]+\]\]'
+
+ def __init__(self, pattern, rangestr, groups):
+ self.pattern = pattern
+ self.rangestr = rangestr
+ self.groups = groups
+ if pattern != None:
+ self.re = re.compile(pattern)
+ self.process = self.process_re
+ elif rangestr != None:
+ if '\\' in rangestr:
+ raise Exception("Backslashes are not allowed in NameRanges")
+ self.process = self.process_range
+ self.re = re.compile('^' + re.sub(self.range_finder, '(\d+)',
+ rangestr))
+ dmatcher = re.compile(re.sub(self.range_finder,
+ r'\[\[([\d\-,]+)\]\]',
+ rangestr))
+ self.dranges = [PackedDigitRange(x)
+ for x in dmatcher.match(rangestr).groups()]
+ else:
+ raise Exception("No pattern or range given")
+
+ def process_range(self, name):
+ match = self.re.match(name)
+ if not match:
+ return None
+ digits = match.groups()
+ for i in range(len(digits)):
+ if not self.dranges[i].includes(digits[i]):
+ return None
+ return self.groups
+
+ def process_re(self, name):
+ match = self.re.match(name)
+ if not match:
+ return None
+ ret = list()
+ sub = match.groups()
+ for group in self.groups:
+ newg = group
+ for idx in range(len(sub)):
+ newg = newg.replace('$%s' % (idx + 1), sub[idx])
+ ret.append(newg)
+ return ret
+
+
+class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+ __identifier__ = None
+
+ def __init__(self, filename, fam):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
+ self.patterns = []
+ self.logger = logging.getLogger(self.__class__.__name__)
+
+ def Index(self):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ self.patterns = []
+ for entry in self.xdata.xpath('//GroupPattern'):
+ try:
+ groups = [g.text for g in entry.findall('Group')]
+ for pat_ent in entry.findall('NamePattern'):
+ pat = pat_ent.text
+ self.patterns.append(PatternMap(pat, None, groups))
+ for range_ent in entry.findall('NameRange'):
+ rng = range_ent.text
+ self.patterns.append(PatternMap(None, rng, groups))
+ except:
+ self.logger.error("GroupPatterns: Failed to initialize pattern "
+ "%s" % entry.get('pattern'))
+
+ def process_patterns(self, hostname):
+ ret = []
+ for pattern in self.patterns:
+ try:
+ gn = pattern.process(hostname)
+ if gn is not None:
+ ret.extend(gn)
+ except:
+ self.logger.error("GroupPatterns: Failed to process pattern %s "
+ "for %s" % (pattern.pattern, hostname),
+ exc_info=1)
+ return ret
+
+
+class GroupPatterns(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector):
+ name = "GroupPatterns"
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.config = PatternFile(self.data + '/config.xml',
+ core.fam)
+
+ def get_additional_groups(self, metadata):
+ return self.config.process_patterns(metadata.hostname)
diff --git a/src/lib/Bcfg2/Server/Plugins/Guppy.py b/src/lib/Bcfg2/Server/Plugins/Guppy.py
new file mode 100644
index 000000000..eea92f30f
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Guppy.py
@@ -0,0 +1,62 @@
+"""
+This plugin is used to trace memory leaks within the bcfg2-server
+process using Guppy. By default the remote debugger is started
+when this plugin is enabled. The debugger can be shutoff in a running
+process using "bcfg2-admin xcmd Guppy.Disable" and reenabled using
+"bcfg2-admin xcmd Guppy.Enable".
+
+To attach the console run:
+
+python -c "from guppy import hpy;hpy().monitor()"
+
+For example:
+
+# python -c "from guppy import hpy;hpy().monitor()"
+<Monitor>
+*** Connection 1 opened ***
+<Monitor> lc
+CID PID ARGV
+ 1 25063 ['/usr/sbin/bcfg2-server', '-D', '/var/run/bcfg2-server.pid']
+<Monitor> sc 1
+Remote connection 1. To return to Monitor, type <Ctrl-C> or .<RETURN>
+<Annex> int
+Remote interactive console. To return to Annex, type '-'.
+>>> hp.heap()
+...
+
+
+"""
+import re
+import Bcfg2.Server.Plugin
+
+class Guppy(Bcfg2.Server.Plugin.Plugin):
+ """Guppy is a debugging plugin to help trace memory leaks"""
+ name = 'Guppy'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ experimental = True
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable','Disable']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+
+ self.Enable()
+
+ def Enable(self):
+ """Enable remote debugging"""
+ try:
+ from guppy.heapy import Remote
+ Remote.on()
+ except:
+ self.logger.error("Failed to create Heapy context")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def Disable(self):
+ """Disable remote debugging"""
+ try:
+ from guppy.heapy import Remote
+ Remote.off()
+ except:
+ self.logger.error("Failed to disable Heapy")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Hg.py b/src/lib/Bcfg2/Server/Plugins/Hg.py
new file mode 100644
index 000000000..0c3537613
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Hg.py
@@ -0,0 +1,45 @@
+import os
+from mercurial import ui, hg
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Mercurial')
+
+class Hg(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Mercurial is a version plugin for dealing with Bcfg2 repository."""
+ name = 'Mercurial'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # path to hg directory for Bcfg2 repo
+ hg_dir = "%s/.hg" % datastore
+
+ # Read changeset from bcfg2 repo
+ if os.path.isdir(hg_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not present." % hg_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized hg plugin with hg directory = %s" % hg_dir)
+
+ def get_revision(self):
+ """Read hg revision information for the Bcfg2 repository."""
+ try:
+ repo_path = "%s/" % self.datastore
+ repo = hg.repository(ui.ui(), repo_path)
+ tip = repo.changelog.tip()
+ revision = repo.changelog.rev(tip)
+ except:
+ logger.error("Failed to read hg repository; disabling mercurial support")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
new file mode 100644
index 000000000..e9c1c1cff
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
@@ -0,0 +1,593 @@
+"""
+This file provides the Hostbase plugin.
+It manages dns/dhcp/nis host information
+"""
+
+import os
+os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
+from lxml.etree import Element, SubElement
+import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
+from time import strftime
+from sets import Set
+from django.template import Context, loader
+from django.db import connection
+import re
+# Compatibility imports
+from Bcfg2.Bcfg2Py3k import StringIO
+
+
+class Hostbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure,
+ Bcfg2.Server.Plugin.Generator):
+ """The Hostbase plugin handles host/network info."""
+ name = 'Hostbase'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ filepath = '/my/adm/hostbase/files/bind'
+
+ def __init__(self, core, datastore):
+
+ self.ready = False
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ files = ['zone.tmpl',
+ 'reversesoa.tmpl',
+ 'named.tmpl',
+ 'reverseappend.tmpl',
+ 'dhcpd.tmpl',
+ 'hosts.tmpl',
+ 'hostsappend.tmpl']
+ self.filedata = {}
+ self.dnsservers = []
+ self.dhcpservers = []
+ self.templates = {'zone': loader.get_template('zone.tmpl'),
+ 'reversesoa': loader.get_template('reversesoa.tmpl'),
+ 'named': loader.get_template('named.tmpl'),
+ 'namedviews': loader.get_template('namedviews.tmpl'),
+ 'reverseapp': loader.get_template('reverseappend.tmpl'),
+ 'dhcp': loader.get_template('dhcpd.tmpl'),
+ 'hosts': loader.get_template('hosts.tmpl'),
+ 'hostsapp': loader.get_template('hostsappend.tmpl'),
+ }
+ self.Entries['ConfigFile'] = {}
+ self.__rmi__ = ['rebuildState']
+ try:
+ self.rebuildState(None)
+ except:
+ raise PluginInitError
+
+ def FetchFile(self, entry, metadata):
+ """Return prebuilt file data."""
+ fname = entry.get('name').split('/')[-1]
+ if not fname in self.filedata:
+ raise PluginExecutionError
+ perms = {'owner': 'root',
+ 'group': 'root',
+ 'perms': '644'}
+ [entry.attrib.__setitem__(key, value)
+ for (key, value) in list(perms.items())]
+ entry.text = self.filedata[fname]
+
+ def BuildStructures(self, metadata):
+ """Build hostbase bundle."""
+ if metadata.hostname not in self.dnsservers or metadata.hostname not in self.dhcpservers:
+ return []
+ output = Element("Bundle", name='hostbase')
+ if metadata.hostname in self.dnsservers:
+ for configfile in self.Entries['ConfigFile']:
+ if re.search('/etc/bind/', configfile):
+ SubElement(output, "ConfigFile", name=configfile)
+ if metadata.hostname in self.dhcpservers:
+ SubElement(output, "ConfigFile", name="/etc/dhcp3/dhcpd.conf")
+ return [output]
+
+ def rebuildState(self, _):
+ """Pre-cache all state information for hostbase config files
+ callable as an XMLRPC function.
+
+ """
+ self.buildZones()
+ self.buildDHCP()
+ self.buildHosts()
+ self.buildHostsLPD()
+ self.buildPrinters()
+ self.buildNetgroups()
+ return True
+
+ def buildZones(self):
+ """Pre-build and stash zone files."""
+ cursor = connection.cursor()
+
+ cursor.execute("SELECT id, serial FROM hostbase_zone")
+ zones = cursor.fetchall()
+
+ for zone in zones:
+ # update the serial number for all zone files
+ todaydate = (strftime('%Y%m%d'))
+ try:
+ if todaydate == str(zone[1])[:8]:
+ serial = zone[1] + 1
+ else:
+ serial = int(todaydate) * 100
+ except (KeyError):
+ serial = int(todaydate) * 100
+ cursor.execute("""UPDATE hostbase_zone SET serial = \'%s\' WHERE id = \'%s\'""" % (str(serial), zone[0]))
+
+ cursor.execute("SELECT * FROM hostbase_zone WHERE zone NOT LIKE \'%%.rev\'")
+ zones = cursor.fetchall()
+
+ iplist = []
+ hosts = {}
+
+ for zone in zones:
+ zonefile = StringIO()
+ externalzonefile = StringIO()
+ cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
+ INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
+ WHERE z.zone_id = \'%s\'""" % zone[0])
+ nameservers = cursor.fetchall()
+ cursor.execute("""SELECT i.ip_addr FROM hostbase_zone_addresses z
+ INNER JOIN hostbase_zoneaddress i ON z.zoneaddress_id = i.id
+ WHERE z.zone_id = \'%s\'""" % zone[0])
+ addresses = cursor.fetchall()
+ cursor.execute("""SELECT m.priority, m.mx FROM hostbase_zone_mxs z
+ INNER JOIN hostbase_mx m ON z.mx_id = m.id
+ WHERE z.zone_id = \'%s\'""" % zone[0])
+ mxs = cursor.fetchall()
+ context = Context({
+ 'zone': zone,
+ 'nameservers': nameservers,
+ 'addresses': addresses,
+ 'mxs': mxs
+ })
+ zonefile.write(self.templates['zone'].render(context))
+ externalzonefile.write(self.templates['zone'].render(context))
+
+ querystring = """SELECT h.hostname, p.ip_addr,
+ n.name, c.cname, m.priority, m.mx, n.dns_view
+ FROM (((((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON p.id = n.ip_id)
+ INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
+ INNER JOIN hostbase_mx m ON m.id = x.mx_id)
+ LEFT JOIN hostbase_cname c ON n.id = c.name_id
+ WHERE n.name LIKE '%%%%%s'
+ AND h.status = 'active'
+ ORDER BY h.hostname, n.name, p.ip_addr
+ """ % zone[1]
+ cursor.execute(querystring)
+ zonehosts = cursor.fetchall()
+ prevhost = (None, None, None, None)
+ cnames = StringIO()
+ cnamesexternal = StringIO()
+ for host in zonehosts:
+ if not host[2].split(".", 1)[1] == zone[1]:
+ zonefile.write(cnames.getvalue())
+ externalzonefile.write(cnamesexternal.getvalue())
+ cnames = StringIO()
+ cnamesexternal = StringIO()
+ continue
+ if not prevhost[1] == host[1] or not prevhost[2] == host[2]:
+ zonefile.write(cnames.getvalue())
+ externalzonefile.write(cnamesexternal.getvalue())
+ cnames = StringIO()
+ cnamesexternal = StringIO()
+ zonefile.write("%-32s%-10s%-32s\n" %
+ (host[2].split(".", 1)[0], 'A', host[1]))
+ zonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+ if host[6] == 'global':
+ externalzonefile.write("%-32s%-10s%-32s\n" %
+ (host[2].split(".", 1)[0], 'A', host[1]))
+ externalzonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+ elif not prevhost[5] == host[5]:
+ zonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+ if host[6] == 'global':
+ externalzonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+
+ if host[3]:
+ try:
+ if host[3].split(".", 1)[1] == zone[1]:
+ cnames.write("%-32s%-10s%-32s\n" %
+ (host[3].split(".", 1)[0],
+ 'CNAME', host[2].split(".", 1)[0]))
+ if host[6] == 'global':
+ cnamesexternal.write("%-32s%-10s%-32s\n" %
+ (host[3].split(".", 1)[0],
+ 'CNAME', host[2].split(".", 1)[0]))
+ else:
+ cnames.write("%-32s%-10s%-32s\n" %
+ (host[3] + ".",
+ 'CNAME',
+ host[2].split(".", 1)[0]))
+ if host[6] == 'global':
+ cnamesexternal.write("%-32s%-10s%-32s\n" %
+ (host[3] + ".",
+ 'CNAME',
+ host[2].split(".", 1)[0]))
+
+ except:
+ pass
+ prevhost = host
+ zonefile.write(cnames.getvalue())
+ externalzonefile.write(cnamesexternal.getvalue())
+ zonefile.write("\n\n%s" % zone[9])
+ externalzonefile.write("\n\n%s" % zone[9])
+ self.filedata[zone[1]] = zonefile.getvalue()
+ self.filedata[zone[1] + ".external"] = externalzonefile.getvalue()
+ zonefile.close()
+ externalzonefile.close()
+ self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone[1])] = self.FetchFile
+ self.Entries['ConfigFile']["%s/%s.external" % (self.filepath, zone[1])] = self.FetchFile
+
+ cursor.execute("SELECT * FROM hostbase_zone WHERE zone LIKE \'%%.rev\' AND zone <> \'.rev\'")
+ reversezones = cursor.fetchall()
+
+ reversenames = []
+ for reversezone in reversezones:
+ cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
+ INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
+ WHERE z.zone_id = \'%s\'""" % reversezone[0])
+ reverse_nameservers = cursor.fetchall()
+
+ context = Context({
+ 'inaddr': reversezone[1].rstrip('.rev'),
+ 'zone': reversezone,
+ 'nameservers': reverse_nameservers,
+ })
+
+ self.filedata[reversezone[1]] = self.templates['reversesoa'].render(context)
+ self.filedata[reversezone[1] + '.external'] = self.templates['reversesoa'].render(context)
+ self.filedata[reversezone[1]] += reversezone[9]
+ self.filedata[reversezone[1] + '.external'] += reversezone[9]
+
+ subnet = reversezone[1].split(".")
+ subnet.reverse()
+ reversenames.append((reversezone[1].rstrip('.rev'), ".".join(subnet[1:])))
+
+ for filename in reversenames:
+ cursor.execute("""
+ SELECT DISTINCT h.hostname, p.ip_addr, n.dns_view FROM ((hostbase_host h
+ INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON n.ip_id = p.id
+ WHERE p.ip_addr LIKE '%s%%%%' AND h.status = 'active' ORDER BY p.ip_addr
+ """ % filename[1])
+ reversehosts = cursor.fetchall()
+ zonefile = StringIO()
+ externalzonefile = StringIO()
+ if len(filename[0].split(".")) == 2:
+ originlist = []
+ [originlist.append((".".join([ip[1].split(".")[2], filename[0]]),
+ ".".join([filename[1], ip[1].split(".")[2]])))
+ for ip in reversehosts
+ if (".".join([ip[1].split(".")[2], filename[0]]),
+ ".".join([filename[1], ip[1].split(".")[2]])) not in originlist]
+ for origin in originlist:
+ hosts = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if host[1].rstrip('0123456789').rstrip('.') == origin[1]]
+ hosts_external = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if (host[1].rstrip('0123456789').rstrip('.') == origin[1]
+ and host[2] == 'global')]
+ context = Context({
+ 'hosts': hosts,
+ 'inaddr': origin[0],
+ 'fileorigin': filename[0],
+ })
+ zonefile.write(self.templates['reverseapp'].render(context))
+ context = Context({
+ 'hosts': hosts_external,
+ 'inaddr': origin[0],
+ 'fileorigin': filename[0],
+ })
+ externalzonefile.write(self.templates['reverseapp'].render(context))
+ else:
+ originlist = [filename[0]]
+ hosts = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if (host[1].split("."), host[0]) not in hosts]
+ hosts_external = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if ((host[1].split("."), host[0]) not in hosts_external
+ and host[2] == 'global')]
+ context = Context({
+ 'hosts': hosts,
+ 'inaddr': filename[0],
+ 'fileorigin': None,
+ })
+ zonefile.write(self.templates['reverseapp'].render(context))
+ context = Context({
+ 'hosts': hosts_external,
+ 'inaddr': filename[0],
+ 'fileorigin': None,
+ })
+ externalzonefile.write(self.templates['reverseapp'].render(context))
+ self.filedata['%s.rev' % filename[0]] += zonefile.getvalue()
+ self.filedata['%s.rev.external' % filename[0]] += externalzonefile.getvalue()
+ zonefile.close()
+ externalzonefile.close()
+ self.Entries['ConfigFile']['%s/%s.rev' % (self.filepath, filename[0])] = self.FetchFile
+ self.Entries['ConfigFile']['%s/%s.rev.external' % (self.filepath, filename[0])] = self.FetchFile
+
+ ## here's where the named.conf file gets written
+ context = Context({
+ 'zones': zones,
+ 'reverses': reversenames,
+ })
+ self.filedata['named.conf'] = self.templates['named'].render(context)
+ self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf'] = self.FetchFile
+ self.filedata['named.conf.views'] = self.templates['namedviews'].render(context)
+ self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf.views'] = self.FetchFile
+
+ def buildDHCP(self):
+ """Pre-build dhcpd.conf and stash in the filedata table."""
+
+ # fetches all the hosts with DHCP == True
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT hostname, mac_addr, ip_addr
+ FROM (hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip ip ON i.id = ip.interface_id
+ WHERE i.dhcp=1 AND h.status='active' AND i.mac_addr <> ''
+ AND i.mac_addr <> 'float' AND i.mac_addr <> 'unknown'
+ ORDER BY h.hostname, i.mac_addr
+ """)
+
+ dhcphosts = cursor.fetchall()
+ count = 0
+ hosts = []
+ hostdata = [dhcphosts[0][0], dhcphosts[0][1], dhcphosts[0][2]]
+ if len(dhcphosts) > 1:
+ for x in range(1, len(dhcphosts)):
+ # if an interface has 2 or more ip addresses
+ # adds the ip to the current interface
+ if hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0] and hostdata[1] == dhcphosts[x][1]:
+ hostdata[2] = ", ".join([hostdata[2], dhcphosts[x][2]])
+ # if a host has 2 or more interfaces
+ # writes the current one and grabs the next
+ elif hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0]:
+ hosts.append(hostdata)
+ count += 1
+ hostdata = ["-".join([dhcphosts[x][0], str(count)]), dhcphosts[x][1], dhcphosts[x][2]]
+ # new host found, writes current data to the template
+ else:
+ hosts.append(hostdata)
+ count = 0
+ hostdata = [dhcphosts[x][0], dhcphosts[x][1], dhcphosts[x][2]]
+ #makes sure the last of the data gets written out
+ if hostdata not in hosts:
+ hosts.append(hostdata)
+
+ context = Context({
+ 'hosts': hosts,
+ 'numips': len(hosts),
+ })
+
+ self.filedata['dhcpd.conf'] = self.templates['dhcp'].render(context)
+ self.Entries['ConfigFile']['/my/adm/hostbase/files/dhcpd.conf'] = self.FetchFile
+
+ def buildHosts(self):
+ """Pre-build and stash /etc/hosts file."""
+
+ append_data = []
+
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT hostname FROM hostbase_host ORDER BY hostname
+ """)
+ hostbase = cursor.fetchall()
+ domains = [host[0].split(".", 1)[1] for host in hostbase]
+ domains_set = Set(domains)
+ domain_data = [(domain, domains.count(domain)) for domain in domains_set]
+ domain_data.sort()
+
+ cursor.execute("""
+ SELECT ip_addr FROM hostbase_ip ORDER BY ip_addr
+ """)
+ ips = cursor.fetchall()
+ three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
+ for ip in ips]
+ three_octets_set = Set(three_octets)
+ three_octets_data = [(octet, three_octets.count(octet)) \
+ for octet in three_octets_set]
+ three_octets_data.sort()
+
+ for three_octet in three_octets_data:
+ querystring = """SELECT h.hostname, h.primary_user,
+ p.ip_addr, n.name, c.cname
+ FROM (((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON p.id = n.ip_id)
+ LEFT JOIN hostbase_cname c ON n.id = c.name_id
+ WHERE p.ip_addr LIKE \'%s.%%%%\' AND h.status = 'active'""" % three_octet[0]
+ cursor.execute(querystring)
+ tosort = list(cursor.fetchall())
+ tosort.sort(lambda x, y: cmp(int(x[2].split(".")[-1]), int(y[2].split(".")[-1])))
+ append_data.append((three_octet, tuple(tosort)))
+
+ two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
+ two_octets_set = Set(two_octets)
+ two_octets_data = [(octet, two_octets.count(octet))
+ for octet in two_octets_set]
+ two_octets_data.sort()
+
+ context = Context({
+ 'domain_data': domain_data,
+ 'three_octets_data': three_octets_data,
+ 'two_octets_data': two_octets_data,
+ 'three_octets': three_octets,
+ 'num_ips': len(three_octets),
+ })
+
+ self.filedata['hosts'] = self.templates['hosts'].render(context)
+
+ for subnet in append_data:
+ ips = []
+ simple = True
+ namelist = [name.split('.', 1)[0] for name in [subnet[1][0][3]]]
+ cnamelist = []
+ if subnet[1][0][4]:
+ cnamelist.append(subnet[1][0][4].split('.', 1)[0])
+ simple = False
+ appenddata = subnet[1][0]
+ for ip in subnet[1][1:]:
+ if appenddata[2] == ip[2]:
+ namelist.append(ip[3].split('.', 1)[0])
+ if ip[4]:
+ cnamelist.append(ip[4].split('.', 1)[0])
+ simple = False
+ appenddata = ip
+ else:
+ if appenddata[0] == ip[0]:
+ simple = False
+ ips.append((appenddata[2], appenddata[0], Set(namelist),
+ cnamelist, simple, appenddata[1]))
+ appenddata = ip
+ simple = True
+ namelist = [ip[3].split('.', 1)[0]]
+ cnamelist = []
+ if ip[4]:
+ cnamelist.append(ip[4].split('.', 1)[0])
+ simple = False
+ ips.append((appenddata[2], appenddata[0], Set(namelist),
+ cnamelist, simple, appenddata[1]))
+ context = Context({
+ 'subnet': subnet[0],
+ 'ips': ips,
+ })
+ self.filedata['hosts'] += self.templates['hostsapp'].render(context)
+ self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile
+
+ def buildPrinters(self):
+ """The /mcs/etc/printers.data file"""
+ header = """# This file is automatically generated. DO NOT EDIT IT!
+#
+Name Room User Type Notes
+============== ========== ============================== ======================== ====================
+"""
+
+ cursor = connection.cursor()
+ # fetches all the printers from the database
+ cursor.execute("""
+ SELECT printq, location, primary_user, comments
+ FROM hostbase_host
+ WHERE whatami='printer' AND printq <> '' AND status = 'active'
+ ORDER BY printq
+ """)
+ printers = cursor.fetchall()
+
+ printersfile = header
+ for printer in printers:
+ # splits up the printq line and gets the
+ # correct description out of the comments section
+ temp = printer[3].split('\n')
+ for printq in re.split(',[ ]*', printer[0]):
+ if len(temp) > 1:
+ printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
+ (printq, printer[1], printer[2], temp[1], temp[0]))
+ else:
+ printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
+ (printq, printer[1], printer[2], '', printer[3]))
+ self.filedata['printers.data'] = printersfile
+ self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile
+
+ def buildHostsLPD(self):
+ """Creates the /mcs/etc/hosts.lpd file"""
+
+ # this header needs to be changed to be more generic
+ header = """+@machines
++@all-machines
+achilles.ctd.anl.gov
+raven.ops.anl.gov
+seagull.hr.anl.gov
+parrot.ops.anl.gov
+condor.ops.anl.gov
+delphi.esh.anl.gov
+anlcv1.ctd.anl.gov
+anlvms.ctd.anl.gov
+olivia.ctd.anl.gov\n\n"""
+
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT hostname FROM hostbase_host WHERE netgroup=\"red\" AND status = 'active'
+ ORDER BY hostname""")
+ redmachines = list(cursor.fetchall())
+ cursor.execute("""
+ SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
+ WHERE netgroup=\"red\" AND n.only=1 AND h.status = 'active'
+ """)
+ redmachines.extend(list(cursor.fetchall()))
+ cursor.execute("""
+ SELECT hostname FROM hostbase_host WHERE netgroup=\"win\" AND status = 'active'
+ ORDER BY hostname""")
+ winmachines = list(cursor.fetchall())
+ cursor.execute("""
+ SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
+ WHERE netgroup=\"win\" AND n.only=1 AND h.status = 'active'
+ """)
+ winmachines.__add__(list(cursor.fetchall()))
+ hostslpdfile = header
+ for machine in redmachines:
+ hostslpdfile += machine[0] + "\n"
+ hostslpdfile += "\n"
+ for machine in winmachines:
+ hostslpdfile += machine[0] + "\n"
+ self.filedata['hosts.lpd'] = hostslpdfile
+ self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile
+
+ def buildNetgroups(self):
+ """Makes the *-machine files"""
+ header = """###################################################################
+# This file lists hosts in the '%s' machine netgroup, it is
+# automatically generated. DO NOT EDIT THIS FILE!
+#
+# Number of hosts in '%s' machine netgroup: %i
+#\n\n"""
+
+ cursor = connection.cursor()
+ # fetches all the hosts that with valid netgroup entries
+ cursor.execute("""
+ SELECT h.hostname, n.name, h.netgroup, n.only FROM ((hostbase_host h
+ INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON p.id = n.ip_id
+ WHERE h.netgroup <> '' AND h.netgroup <> 'none' AND h.status = 'active'
+ ORDER BY h.netgroup, h.hostname
+ """)
+ nameslist = cursor.fetchall()
+ # gets the first host and initializes the hash
+ hostdata = nameslist[0]
+ netgroups = {hostdata[2]: [hostdata[0]]}
+ for row in nameslist:
+ # if new netgroup, create it
+ if row[2] not in netgroups:
+ netgroups.update({row[2]: []})
+ # if it belongs in the netgroup and has multiple interfaces, put them in
+ if hostdata[0] == row[0] and row[3]:
+ netgroups[row[2]].append(row[1])
+ hostdata = row
+ # if its a new host, write the old one to the hash
+ elif hostdata[0] != row[0]:
+ netgroups[row[2]].append(row[0])
+ hostdata = row
+
+ for netgroup in netgroups:
+ fileoutput = StringIO()
+ fileoutput.write(header % (netgroup, netgroup, len(netgroups[netgroup])))
+ for each in netgroups[netgroup]:
+ fileoutput.write(each + "\n")
+ self.filedata['%s-machines' % netgroup] = fileoutput.getvalue()
+ fileoutput.close()
+ self.Entries['ConfigFile']['/my/adm/hostbase/makenets/machines/%s-machines' % netgroup] = self.FetchFile
+
+ cursor.execute("""
+ UPDATE hostbase_host SET dirty=0
+ """)
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
new file mode 100644
index 000000000..29abf5b13
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -0,0 +1,245 @@
+import imp
+import logging
+import sys
+import time
+import traceback
+import Bcfg2.Options
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger('Bcfg2.Plugins.Ldap')
+
+try:
+ import ldap
+except:
+ logger.error("Unable to load ldap module. Is python-ldap installed?")
+ raise ImportError
+
+# time in seconds between retries after failed LDAP connection
+RETRY_DELAY = 5
+# how many times to try reaching the LDAP server if a connection is broken
+# at the very minimum, one retry is needed to handle a restarted LDAP daemon
+RETRY_COUNT = 3
+
+SCOPE_MAP = {
+ "base" : ldap.SCOPE_BASE,
+ "one" : ldap.SCOPE_ONELEVEL,
+ "sub" : ldap.SCOPE_SUBTREE,
+}
+
+LDAP_QUERIES = []
+
+def register_query(query):
+ LDAP_QUERIES.append(query)
+
+class ConfigFile(Bcfg2.Server.Plugin.FileBacked):
+ """
+ Config file for the Ldap plugin
+
+ The config file cannot be 'parsed' in the traditional sense as we would
+ need some serious type checking ugliness to just get the LdapQuery
+ subclasses. The alternative would be to have the user create a list with
+ a predefined name that contains all queries.
+ The approach implemented here is having the user call a registering
+ decorator that updates a global variable in this module.
+ """
+ def __init__(self, filename, fam):
+ self.filename = filename
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, self.filename)
+ fam.AddMonitor(self.filename, self)
+
+ def Index(self):
+ """
+ Reregisters the queries in the config file
+
+ The config will take care of actually registering the queries,
+ so we just load it once and don't keep it.
+ """
+ global LDAP_QUERIES
+ LDAP_QUERIES = []
+ imp.load_source("ldap_cfg", self.filename)
+
+class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
+ """
+ The Ldap plugin allows adding data from an LDAP server to your metadata.
+ """
+ name = "Ldap"
+ experimental = True
+ debug_flag = False
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.config = ConfigFile(self.data + "/config.py", core.fam)
+
+ def debug_log(self, message, flag = None):
+ if (flag is None) and self.debug_flag or flag:
+ self.logger.error(message)
+
+ def get_additional_data(self, metadata):
+ query = None
+ try:
+ data = {}
+ self.debug_log("LdapPlugin debug: found queries " +
+ str(LDAP_QUERIES))
+ for QueryClass in LDAP_QUERIES:
+ query = QueryClass()
+ if query.is_applicable(metadata):
+ self.debug_log("LdapPlugin debug: processing query '" +
+ query.name + "'")
+ data[query.name] = query.get_result(metadata)
+ else:
+ self.debug_log("LdapPlugin debug: query '" + query.name +
+ "' not applicable to host '" + metadata.hostname + "'")
+ return data
+ except Exception:
+ if hasattr(query, "name"):
+ Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " +
+ "Exception during processing of query named '" +
+ str(query.name) +
+ "', query results will be empty" +
+ " and may cause bind failures")
+ for line in traceback.format_exception(sys.exc_info()[0],
+ sys.exc_info()[1],
+ sys.exc_info()[2]):
+ Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " +
+ line.replace("\n", ""))
+ return {}
+
+class LdapConnection(object):
+ """
+ Connection to an LDAP server.
+ """
+ def __init__(self, host = "localhost", port = 389,
+ binddn = None, bindpw = None):
+ self.host = host
+ self.port = port
+ self.binddn = binddn
+ self.bindpw = bindpw
+ self.conn = None
+
+ def __del__(self):
+ if self.conn:
+ self.conn.unbind()
+
+ def init_conn(self):
+ self.conn = ldap.initialize(self.url)
+ if self.binddn is not None and self.bindpw is not None:
+ self.conn.simple_bind_s(self.binddn, self.bindpw)
+
+ def run_query(self, query):
+ result = None
+ for attempt in range(RETRY_COUNT + 1):
+ if attempt >= 1:
+ Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " +
+ "LDAP server down (retry " + str(attempt) + "/" +
+ str(RETRY_COUNT) + ")")
+ try:
+ if not self.conn:
+ self.init_conn()
+ result = self.conn.search_s(
+ query.base,
+ SCOPE_MAP[query.scope],
+ query.filter,
+ query.attrs,
+ )
+ break
+ except ldap.SERVER_DOWN:
+ self.conn = None
+ time.sleep(RETRY_DELAY)
+ return result
+
+ @property
+ def url(self):
+ return "ldap://" + self.host + ":" + str(self.port)
+
+class LdapQuery(object):
+ """
+ Query referencing an LdapConnection and providing several
+ methods for query manipulation.
+ """
+
+ name = "unknown"
+ base = ""
+ scope = "sub"
+ filter = "(objectClass=*)"
+ attrs = None
+ connection = None
+ result = None
+
+ def __unicode__(self):
+ return "LdapQuery:" + self.name
+
+ def is_applicable(self, metadata):
+ """
+ Overrideable method to determine if the query is to be executed for
+ the given metadata object.
+ Defaults to true.
+ """
+ return True
+
+ def prepare_query(self, metadata):
+ """
+ Overrideable method to alter the query based on metadata.
+ Defaults to doing nothing.
+
+ In most cases, you will do something like
+
+ self.filter = "(cn=" + metadata.hostname + ")"
+
+ here.
+ """
+ pass
+
+ def process_result(self, metadata):
+ """
+ Overrideable method to post-process the query result.
+ Defaults to returning the unaltered result.
+ """
+ return self.result
+
+ def get_result(self, metadata):
+ """
+ Method to handle preparing, executing and processing the query.
+ """
+ if isinstance(self.connection, LdapConnection):
+ self.prepare_query(metadata)
+ self.result = self.connection.run_query(self)
+ self.result = self.process_result(metadata)
+ return self.result
+ else:
+ Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " +
+ "No valid connection defined for query " + str(self))
+ return None
+
+class LdapSubQuery(LdapQuery):
+ """
+ SubQueries are meant for internal use only and are not added
+ to the metadata object. They are useful for situations where
+ you need to run more than one query to obtain some data.
+ """
+ def prepare_query(self, metadata, **kwargs):
+ """
+ Overrideable method to alter the query based on metadata.
+ Defaults to doing nothing.
+ """
+ pass
+
+ def process_result(self, metadata, **kwargs):
+ """
+ Overrideable method to post-process the query result.
+ Defaults to returning the unaltered result.
+ """
+ return self.result
+
+ def get_result(self, metadata, **kwargs):
+ """
+ Method to handle preparing, executing and processing the query.
+ """
+ if isinstance(self.connection, LdapConnection):
+ self.prepare_query(metadata, **kwargs)
+ self.result = self.connection.run_query(self)
+ return self.process_result(metadata, **kwargs)
+ else:
+ Bcfg2.Server.Plugin.logger.error("LdapPlugin error: " +
+ "No valid connection defined for query " + str(self))
+ return None
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
new file mode 100644
index 000000000..f39993496
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -0,0 +1,866 @@
+"""
+This file stores persistent metadata for the Bcfg2 Configuration Repository.
+"""
+
+import copy
+import fcntl
+import lxml.etree
+import os
+import os.path
+import socket
+import sys
+import time
+
+import Bcfg2.Server.FileMonitor
+import Bcfg2.Server.Plugin
+
+
+def locked(fd):
+ """Aquire a lock on a file"""
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ return True
+ return False
+
+
+class MetadataConsistencyError(Exception):
+ """This error gets raised when metadata is internally inconsistent."""
+ pass
+
+
+class MetadataRuntimeError(Exception):
+ """This error is raised when the metadata engine
+ is called prior to reading enough data.
+ """
+ pass
+
+
+class XMLMetadataConfig(object):
+ """Handles xml config files and all XInclude statements"""
+ def __init__(self, metadata, watch_clients, basefile):
+ self.metadata = metadata
+ self.basefile = basefile
+ self.should_monitor = watch_clients
+ self.extras = []
+ self.data = None
+ self.basedata = None
+ self.basedir = metadata.data
+ self.logger = metadata.logger
+ self.pseudo_monitor = isinstance(metadata.core.fam,
+ Bcfg2.Server.FileMonitor.Pseudo)
+
+ @property
+ def xdata(self):
+ if not self.data:
+ raise MetadataRuntimeError
+ return self.data
+
+ @property
+ def base_xdata(self):
+ if not self.basedata:
+ raise MetadataRuntimeError
+ return self.basedata
+
+ def add_monitor(self, fname):
+ """Add a fam monitor for an included file"""
+ if self.should_monitor:
+ self.metadata.core.fam.AddMonitor(os.path.join(self.basedir, fname),
+ self.metadata)
+ self.extras.append(fname)
+
+ def load_xml(self):
+ """Load changes from XML"""
+ try:
+ xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile))
+ except lxml.etree.XMLSyntaxError:
+ self.logger.error('Failed to parse %s' % self.basefile)
+ return
+ self.basedata = copy.copy(xdata)
+ included = [ent.get('href') for ent in \
+ xdata.findall('./{http://www.w3.org/2001/XInclude}include')]
+ if included:
+ for name in included:
+ if name not in self.extras:
+ self.add_monitor(name)
+ try:
+ xdata.xinclude()
+ except lxml.etree.XIncludeError:
+ self.logger.error("Failed to process XInclude for file %s" %
+ self.basefile)
+ self.data = xdata
+
+ def write(self):
+ """Write changes to xml back to disk."""
+ self.write_xml(os.path.join(self.basedir, self.basefile),
+ self.basedata)
+
+ def write_xml(self, fname, xmltree):
+ """Write changes to xml back to disk."""
+ tmpfile = "%s.new" % fname
+ try:
+ datafile = open(tmpfile, 'w')
+ except IOError:
+ e = sys.exc_info()[1]
+ self.logger.error("Failed to write %s: %s" % (tmpfile, e))
+ raise MetadataRuntimeError
+ # prep data
+ dataroot = xmltree.getroot()
+ newcontents = lxml.etree.tostring(dataroot, pretty_print=True)
+
+ fd = datafile.fileno()
+ while locked(fd) == True:
+ pass
+ try:
+ datafile.write(newcontents)
+ except:
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ self.logger.error("Metadata: Failed to write new xml data to %s" %
+ tmpfile, exc_info=1)
+ os.unlink(tmpfile)
+ raise MetadataRuntimeError
+ datafile.close()
+
+ # check if clients.xml is a symlink
+ if os.path.islink(fname):
+ fname = os.readlink(fname)
+
+ try:
+ os.rename(tmpfile, fname)
+ except:
+ self.logger.error("Metadata: Failed to rename %s" % tmpfile)
+ raise MetadataRuntimeError
+
+ def find_xml_for_xpath(self, xpath):
+ """Find and load xml file containing the xpath query"""
+ if self.pseudo_monitor:
+ # Reload xml if we don't have a real monitor
+ self.load_xml()
+ cli = self.basedata.xpath(xpath)
+ if len(cli) > 0:
+ return {'filename': os.path.join(self.basedir, self.basefile),
+ 'xmltree': self.basedata,
+ 'xquery': cli}
+ else:
+ """Try to find the data in included files"""
+ for included in self.extras:
+ try:
+ xdata = lxml.etree.parse(os.path.join(self.basedir,
+ included))
+ cli = xdata.xpath(xpath)
+ if len(cli) > 0:
+ return {'filename': os.path.join(self.basedir,
+ included),
+ 'xmltree': xdata,
+ 'xquery': cli}
+ except lxml.etree.XMLSyntaxError:
+ self.logger.error('Failed to parse %s' % (included))
+ return {}
+
+ def HandleEvent(self, event):
+ """Handle fam events"""
+ filename = event.filename.split('/')[-1]
+ if filename in self.extras:
+ if event.code2str() == 'exists':
+ return False
+ elif filename != self.basefile:
+ return False
+ if event.code2str() == 'endExist':
+ return False
+ self.load_xml()
+ return True
+
+
+class ClientMetadata(object):
+ """This object contains client metadata."""
+ def __init__(self, client, profile, groups, bundles,
+ aliases, addresses, categories, uuid, password, query):
+ self.hostname = client
+ self.profile = profile
+ self.bundles = bundles
+ self.aliases = aliases
+ self.addresses = addresses
+ self.groups = groups
+ self.categories = categories
+ self.uuid = uuid
+ self.password = password
+ self.connectors = []
+ self.query = query
+
+ def inGroup(self, group):
+ """Test to see if client is a member of group."""
+ return group in self.groups
+
+ def group_in_category(self, category):
+ for grp in self.query.all_groups_in_category(category):
+ if grp in self.groups:
+ return grp
+ return ''
+
+
+class MetadataQuery(object):
+ def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category):
+ # resolver is set later
+ self.by_name = by_name
+ self.names_by_groups = by_groups
+ self.names_by_profiles = by_profiles
+ self.all_clients = get_clients
+ self.all_groups = all_groups
+ self.all_groups_in_category = all_groups_in_category
+
+ def by_groups(self, groups):
+ return [self.by_name(name) for name in self.names_by_groups(groups)]
+
+ def by_profiles(self, profiles):
+ return [self.by_name(name) for name in self.names_by_profiles(profiles)]
+
+ def all(self):
+ return [self.by_name(name) for name in self.all_clients()]
+
+
+class Metadata(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Metadata,
+ Bcfg2.Server.Plugin.Statistics):
+ """This class contains data for bcfg2 server metadata."""
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ name = "Metadata"
+ sort_order = 500
+
+ def __init__(self, core, datastore, watch_clients=True):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Metadata.__init__(self)
+ Bcfg2.Server.Plugin.Statistics.__init__(self)
+ if watch_clients:
+ try:
+ core.fam.AddMonitor(os.path.join(self.data, "groups.xml"), self)
+ core.fam.AddMonitor(os.path.join(self.data, "clients.xml"), self)
+ except:
+ print("Unable to add file monitor for groups.xml or clients.xml")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ self.clients_xml = XMLMetadataConfig(self, watch_clients, 'clients.xml')
+ self.groups_xml = XMLMetadataConfig(self, watch_clients, 'groups.xml')
+ self.states = {}
+ if watch_clients:
+ self.states = {"groups.xml": False,
+ "clients.xml": False}
+ self.addresses = {}
+ self.auth = dict()
+ self.clients = {}
+ self.aliases = {}
+ self.groups = {}
+ self.cgroups = {}
+ self.public = []
+ self.private = []
+ self.profiles = []
+ self.categories = {}
+ self.bad_clients = {}
+ self.uuid = {}
+ self.secure = []
+ self.floating = []
+ self.passwords = {}
+ self.session_cache = {}
+ self.default = None
+ self.pdirty = False
+ self.extra = {'groups.xml': [],
+ 'clients.xml': []}
+ self.password = core.password
+ self.query = MetadataQuery(core.build_metadata,
+ lambda: list(self.clients.keys()),
+ self.get_client_names_by_groups,
+ self.get_client_names_by_profiles,
+ self.get_all_group_names,
+ self.get_all_groups_in_category)
+
+ @classmethod
+ def init_repo(cls, repo, groups, os_selection, clients):
+ path = os.path.join(repo, cls.name)
+ os.makedirs(path)
+ open(os.path.join(repo, "Metadata", "groups.xml"),
+ "w").write(groups % os_selection)
+ open(os.path.join(repo, "Metadata", "clients.xml"),
+ "w").write(clients % socket.getfqdn())
+
+ def get_groups(self):
+ '''return groups xml tree'''
+ groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
+ root = groups_tree.getroot()
+ return root
+
+ def _search_xdata(self, tag, name, tree, alias=False):
+ for node in tree.findall("//%s" % tag):
+ if node.get("name") == name:
+ return node
+ elif alias:
+ for child in node:
+ if (child.tag == "Alias" and
+ child.attrib["name"] == name):
+ return node
+ return None
+
+ def search_group(self, group_name, tree):
+ """Find a group."""
+ return self._search_xdata("Group", group_name, tree)
+
+ def search_bundle(self, bundle_name, tree):
+ """Find a bundle."""
+ return self._search_xdata("Bundle", bundle_name, tree)
+
+ def search_client(self, client_name, tree):
+ return self._search_xdata("Client", client_name, tree, alias=True)
+
+ def _add_xdata(self, config, tag, name, attribs=None, alias=False):
+ node = self._search_xdata(tag, name, config.xdata, alias=alias)
+ if node != None:
+ self.logger.error("%s \"%s\" already exists" % (tag, name))
+ raise MetadataConsistencyError
+ element = lxml.etree.SubElement(config.base_xdata.getroot(),
+ tag, name=name)
+ if attribs:
+ for key, val in list(attribs.items()):
+ element.set(key, val)
+ config.write()
+
+ def add_group(self, group_name, attribs):
+ """Add group to groups.xml."""
+ return self._add_xdata(self.groups_xml, "Group", group_name,
+ attribs=attribs)
+
+ def add_bundle(self, bundle_name):
+ """Add bundle to groups.xml."""
+ return self._add_xdata(self.groups_xml, "Bundle", bundle_name)
+
+ def add_client(self, client_name, attribs):
+ """Add client to clients.xml."""
+ return self._add_xdata(self.clients_xml, "Client", client_name,
+ attribs=attribs, alias=True)
+
+ def _update_xdata(self, config, tag, name, attribs, alias=False):
+ node = self._search_xdata(tag, name, config.xdata, alias=alias)
+ if node == None:
+ self.logger.error("%s \"%s\" does not exist" % (tag, name))
+ raise MetadataConsistencyError
+ xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
+ (tag, node.get('name')))
+ if not xdict:
+ self.logger.error("Unexpected error finding %s \"%s\"" %
+ (tag, name))
+ raise MetadataConsistencyError
+ for key, val in list(attribs.items()):
+ xdict['xquery'][0].set(key, val)
+ config.write_xml(xdict['filename'], xdict['xmltree'])
+
+ def update_group(self, group_name, attribs):
+ """Update a groups attributes."""
+ return self._update_xdata(self.groups_xml, "Group", group_name, attribs)
+
+ def update_client(self, client_name, attribs):
+ """Update a clients attributes."""
+ return self._update_xdata(self.clients_xml, "Client", client_name,
+ attribs, alias=True)
+
+ def _remove_xdata(self, config, tag, name, alias=False):
+ node = self._search_xdata(tag, name, config.xdata)
+ if node == None:
+ self.logger.error("%s \"%s\" does not exist" % (tag, name))
+ raise MetadataConsistencyError
+ xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
+ (tag, node.get('name')))
+ if not xdict:
+ self.logger.error("Unexpected error finding %s \"%s\"" %
+ (tag, name))
+ raise MetadataConsistencyError
+ xdict['xquery'][0].getparent().remove(xdict['xquery'][0])
+ self.groups_xml.write_xml(xdict['filename'], xdict['xmltree'])
+
+ def remove_group(self, group_name):
+ """Remove a group."""
+ return self._remove_xdata(self.groups_xml, "Group", group_name)
+
+ def remove_bundle(self, bundle_name):
+ """Remove a bundle."""
+ return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
+
+ def _handle_clients_xml_event(self, event):
+ xdata = self.clients_xml.xdata
+ self.clients = {}
+ self.aliases = {}
+ self.raliases = {}
+ self.bad_clients = {}
+ self.secure = []
+ self.floating = []
+ self.addresses = {}
+ self.raddresses = {}
+ for client in xdata.findall('.//Client'):
+ clname = client.get('name').lower()
+ if 'address' in client.attrib:
+ caddr = client.get('address')
+ if caddr in self.addresses:
+ self.addresses[caddr].append(clname)
+ else:
+ self.addresses[caddr] = [clname]
+ if clname not in self.raddresses:
+ self.raddresses[clname] = set()
+ self.raddresses[clname].add(caddr)
+ if 'auth' in client.attrib:
+ self.auth[client.get('name')] = client.get('auth',
+ 'cert+password')
+ if 'uuid' in client.attrib:
+ self.uuid[client.get('uuid')] = clname
+ if client.get('secure', 'false') == 'true':
+ self.secure.append(clname)
+ if client.get('location', 'fixed') == 'floating':
+ self.floating.append(clname)
+ if 'password' in client.attrib:
+ self.passwords[clname] = client.get('password')
+
+ self.raliases[clname] = set()
+ for alias in client.findall('Alias'):
+ self.aliases.update({alias.get('name'): clname})
+ self.raliases[clname].add(alias.get('name'))
+ if 'address' not in alias.attrib:
+ continue
+ if alias.get('address') in self.addresses:
+ self.addresses[alias.get('address')].append(clname)
+ else:
+ self.addresses[alias.get('address')] = [clname]
+ if clname not in self.raddresses:
+ self.raddresses[clname] = set()
+ self.raddresses[clname].add(alias.get('address'))
+ self.clients.update({clname: client.get('profile')})
+ self.states['clients.xml'] = True
+
+ def _handle_groups_xml_event(self, event):
+ xdata = self.groups_xml.xdata
+ self.public = []
+ self.private = []
+ self.profiles = []
+ self.groups = {}
+ grouptmp = {}
+ self.categories = {}
+ groupseen = list()
+ for group in xdata.xpath('//Groups/Group'):
+ if group.get('name') not in groupseen:
+ groupseen.append(group.get('name'))
+ else:
+ self.logger.error("Metadata: Group %s defined multiply" %
+ group.get('name'))
+ grouptmp[group.get('name')] = \
+ ([item.get('name') for item in group.findall('./Bundle')],
+ [item.get('name') for item in group.findall('./Group')])
+ grouptmp[group.get('name')][1].append(group.get('name'))
+ if group.get('default', 'false') == 'true':
+ self.default = group.get('name')
+ if group.get('profile', 'false') == 'true':
+ self.profiles.append(group.get('name'))
+ if group.get('public', 'false') == 'true':
+ self.public.append(group.get('name'))
+ elif group.get('public', 'true') == 'false':
+ self.private.append(group.get('name'))
+ if 'category' in group.attrib:
+ self.categories[group.get('name')] = group.get('category')
+
+ for group in grouptmp:
+ # self.groups[group] => (bundles, groups, categories)
+ self.groups[group] = (set(), set(), {})
+ tocheck = [group]
+ group_cat = self.groups[group][2]
+ while tocheck:
+ now = tocheck.pop()
+ self.groups[group][1].add(now)
+ if now in grouptmp:
+ (bundles, groups) = grouptmp[now]
+ for ggg in groups:
+ if ggg in self.groups[group][1]:
+ continue
+ if (ggg not in self.categories or \
+ self.categories[ggg] not in self.groups[group][2]):
+ self.groups[group][1].add(ggg)
+ tocheck.append(ggg)
+ if ggg in self.categories:
+ group_cat[self.categories[ggg]] = ggg
+ elif ggg in self.categories:
+ self.logger.info("Group %s: %s cat-suppressed %s" % \
+ (group,
+ group_cat[self.categories[ggg]],
+ ggg))
+ [self.groups[group][0].add(bund) for bund in bundles]
+ self.states['groups.xml'] = True
+
+ def HandleEvent(self, event):
+ """Handle update events for data files."""
+ if self.clients_xml.HandleEvent(event):
+ self._handle_clients_xml_event(event)
+ elif self.groups_xml.HandleEvent(event):
+ self._handle_groups_xml_event(event)
+
+ if False not in list(self.states.values()):
+ # check that all client groups are real and complete
+ real = list(self.groups.keys())
+ for client in list(self.clients.keys()):
+ if self.clients[client] not in self.profiles:
+ self.logger.error("Client %s set as nonexistent or "
+ "incomplete group %s" %
+ (client, self.clients[client]))
+ self.logger.error("Removing client mapping for %s" % client)
+ self.bad_clients[client] = self.clients[client]
+ del self.clients[client]
+ for bclient in list(self.bad_clients.keys()):
+ if self.bad_clients[bclient] in self.profiles:
+ self.logger.info("Restored profile mapping for client %s" %
+ bclient)
+ self.clients[bclient] = self.bad_clients[bclient]
+ del self.bad_clients[bclient]
+
+ def set_profile(self, client, profile, addresspair):
+ """Set group parameter for provided client."""
+ self.logger.info("Asserting client %s profile to %s" % (client, profile))
+ if False in list(self.states.values()):
+ raise MetadataRuntimeError
+ if profile not in self.public:
+ self.logger.error("Failed to set client %s to private group %s" %
+ (client, profile))
+ raise MetadataConsistencyError
+ if client in self.clients:
+ self.logger.info("Changing %s group from %s to %s" %
+ (client, self.clients[client], profile))
+ self.update_client(client, dict(profile=profile))
+ else:
+ self.logger.info("Creating new client: %s, profile %s" %
+ (client, profile))
+ if addresspair in self.session_cache:
+ # we are working with a uuid'd client
+ self.add_client(self.session_cache[addresspair][1],
+ dict(uuid=client, profile=profile,
+ address=addresspair[0]))
+ else:
+ self.add_client(client, dict(profile=profile))
+ self.clients[client] = profile
+ self.clients_xml.write()
+
+ def resolve_client(self, addresspair, cleanup_cache=False):
+ """Lookup address locally or in DNS to get a hostname."""
+ if addresspair in self.session_cache:
+ # client _was_ cached, so there can be some expired
+ # entries. we need to clean them up to avoid potentially
+ # infinite memory swell
+ cache_ttl = 90
+ if cleanup_cache:
+ # remove entries for this client's IP address with
+ # _any_ port numbers - perhaps a priority queue could
+ # be faster?
+ curtime = time.time()
+ for addrpair in self.session_cache.keys():
+ if addresspair[0] == addrpair[0]:
+ (stamp, _) = self.session_cache[addrpair]
+ if curtime - stamp > cache_ttl:
+ del self.session_cache[addrpair]
+ # return the cached data
+ try:
+ (stamp, uuid) = self.session_cache[addresspair]
+ if time.time() - stamp < cache_ttl:
+ return self.session_cache[addresspair][1]
+ except KeyError:
+ # we cleaned all cached data for this client in cleanup_cache
+ pass
+ address = addresspair[0]
+ if address in self.addresses:
+ if len(self.addresses[address]) != 1:
+ self.logger.error("Address %s has multiple reverse assignments; "
+ "a uuid must be used" % (address))
+ raise MetadataConsistencyError
+ return self.addresses[address][0]
+ try:
+ cname = socket.gethostbyaddr(address)[0].lower()
+ if cname in self.aliases:
+ return self.aliases[cname]
+ return cname
+ except socket.herror:
+ warning = "address resolution error for %s" % (address)
+ self.logger.warning(warning)
+ raise MetadataConsistencyError
+
+ def get_initial_metadata(self, client):
+ """Return the metadata for a given client."""
+ if False in list(self.states.values()):
+ raise MetadataRuntimeError
+ client = client.lower()
+ if client in self.aliases:
+ client = self.aliases[client]
+ if client in self.clients:
+ profile = self.clients[client]
+ (bundles, groups, categories) = self.groups[profile]
+ else:
+ if self.default == None:
+ self.logger.error("Cannot set group for client %s; "
+ "no default group set" % client)
+ raise MetadataConsistencyError
+ self.set_profile(client, self.default, (None, None))
+ profile = self.default
+ [bundles, groups, categories] = self.groups[self.default]
+ aliases = self.raliases.get(client, set())
+ addresses = self.raddresses.get(client, set())
+ newgroups = set(groups)
+ newbundles = set(bundles)
+ newcategories = {}
+ newcategories.update(categories)
+ if client in self.passwords:
+ password = self.passwords[client]
+ else:
+ password = None
+ uuids = [item for item, value in list(self.uuid.items())
+ if value == client]
+ if uuids:
+ uuid = uuids[0]
+ else:
+ uuid = None
+ for group in self.cgroups.get(client, []):
+ if group in self.groups:
+ nbundles, ngroups, ncategories = self.groups[group]
+ else:
+ nbundles, ngroups, ncategories = ([], [group], {})
+ [newbundles.add(b) for b in nbundles if b not in newbundles]
+ [newgroups.add(g) for g in ngroups if g not in newgroups]
+ newcategories.update(ncategories)
+ return ClientMetadata(client, profile, newgroups, newbundles, aliases,
+ addresses, newcategories, uuid, password,
+ self.query)
+
+ def get_all_group_names(self):
+ all_groups = set()
+ [all_groups.update(g[1]) for g in list(self.groups.values())]
+ return all_groups
+
+ def get_all_groups_in_category(self, category):
+ all_groups = set()
+ [all_groups.add(g) for g in self.categories \
+ if self.categories[g] == category]
+ return all_groups
+
+ def get_client_names_by_profiles(self, profiles):
+ return [client for client, profile in list(self.clients.items()) \
+ if profile in profiles]
+
+ def get_client_names_by_groups(self, groups):
+ mdata = [self.core.build_metadata(client)
+ for client in list(self.clients.keys())]
+ return [md.hostname for md in mdata if md.groups.issuperset(groups)]
+
+ def merge_additional_groups(self, imd, groups):
+ for group in groups:
+ if (group in self.categories and
+ self.categories[group] in imd.categories):
+ continue
+ newbundles, newgroups, _ = self.groups.get(group,
+ (list(),
+ [group],
+ dict()))
+ for newbundle in newbundles:
+ if newbundle not in imd.bundles:
+ imd.bundles.add(newbundle)
+ for newgroup in newgroups:
+ if newgroup not in imd.groups:
+ if (newgroup in self.categories and
+ self.categories[newgroup] in imd.categories):
+ continue
+ if newgroup in self.private:
+ self.logger.error("Refusing to add dynamic membership "
+ "in private group %s for client %s" %
+ (newgroup, imd.hostname))
+ continue
+ imd.groups.add(newgroup)
+
+ def merge_additional_data(self, imd, source, data):
+ if not hasattr(imd, source):
+ setattr(imd, source, data)
+ imd.connectors.append(source)
+
+ def validate_client_address(self, client, addresspair):
+ """Check address against client."""
+ address = addresspair[0]
+ if client in self.floating:
+ self.debug_log("Client %s is floating" % client)
+ return True
+ if address in self.addresses:
+ if client in self.addresses[address]:
+ self.debug_log("Client %s matches address %s" %
+ (client, address))
+ return True
+ else:
+ self.logger.error("Got request for non-float client %s from %s" %
+ (client, address))
+ return False
+ resolved = self.resolve_client(addresspair)
+ if resolved.lower() == client.lower():
+ return True
+ else:
+ self.logger.error("Got request for %s from incorrect address %s" %
+ (client, address))
+ self.logger.error("Resolved to %s" % resolved)
+ return False
+
+ def AuthenticateConnection(self, cert, user, password, address):
+ """This function checks auth creds."""
+ if cert:
+ id_method = 'cert'
+ certinfo = dict([x[0] for x in cert['subject']])
+ # look at cert.cN
+ client = certinfo['commonName']
+ self.debug_log("Got cN %s; using as client name" % client)
+ auth_type = self.auth.get(client, 'cert+password')
+ elif user == 'root':
+ id_method = 'address'
+ try:
+ client = self.resolve_client(address)
+ except MetadataConsistencyError:
+ self.logger.error("Client %s failed to resolve; metadata problem"
+ % address[0])
+ return False
+ else:
+ id_method = 'uuid'
+ # user maps to client
+ if user not in self.uuid:
+ client = user
+ self.uuid[user] = user
+ else:
+ client = self.uuid[user]
+
+ # we have the client name
+ self.debug_log("Authenticating client %s" % client)
+
+ # next we validate the address
+ if id_method == 'uuid':
+ addr_is_valid = True
+ else:
+ addr_is_valid = self.validate_client_address(client, address)
+
+ if not addr_is_valid:
+ return False
+
+ if id_method == 'cert' and auth_type != 'cert+password':
+ # remember the cert-derived client name for this connection
+ if client in self.floating:
+ self.session_cache[address] = (time.time(), client)
+ # we are done if cert+password not required
+ return True
+
+ if client not in self.passwords:
+ if client in self.secure:
+ self.logger.error("Client %s in secure mode but has no password"
+ % address[0])
+ return False
+ if password != self.password:
+ self.logger.error("Client %s used incorrect global password" %
+ address[0])
+ return False
+ if client not in self.secure:
+ if client in self.passwords:
+ plist = [self.password, self.passwords[client]]
+ else:
+ plist = [self.password]
+ if password not in plist:
+ self.logger.error("Client %s failed to use either allowed "
+ "password" % address[0])
+ return False
+ else:
+ # client in secure mode and has a client password
+ if password != self.passwords[client]:
+ self.logger.error("Client %s failed to use client password in "
+ "secure mode" % address[0])
+ return False
+ # populate the session cache
+ if user.decode('utf-8') != 'root':
+ self.session_cache[address] = (time.time(), client)
+ return True
+
+ def process_statistics(self, meta, _):
+ """Hook into statistics interface to toggle clients in bootstrap mode."""
+ client = meta.hostname
+ if client in self.auth and self.auth[client] == 'bootstrap':
+ self.update_client(client, dict(auth='cert'))
+
+ def viz(self, hosts, bundles, key, only_client, colors):
+ """Admin mode viz support."""
+ if only_client:
+ clientmeta = self.core.build_metadata(only_client)
+
+ def include_client(client):
+ return not only_client or client != only_client
+
+ def include_bundle(bundle):
+ return not only_client or bundle in clientmeta.bundles
+
+ def include_group(group):
+ return not only_client or group in clientmeta.groups
+
+ groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
+ try:
+ groups_tree.xinclude()
+ except lxml.etree.XIncludeError:
+ self.logger.error("Failed to process XInclude for file %s" % dest)
+ groups = groups_tree.getroot()
+ categories = {'default': 'grey83'}
+ viz_str = []
+ egroups = groups.findall("Group") + groups.findall('.//Groups/Group')
+ for group in egroups:
+ if not group.get('category') in categories:
+ categories[group.get('category')] = colors.pop()
+ group.set('color', categories[group.get('category')])
+ if None in categories:
+ del categories[None]
+ if hosts:
+ instances = {}
+ clients = self.clients
+ for client, profile in list(clients.items()):
+ if include_client(client):
+ continue
+ if profile in instances:
+ instances[profile].append(client)
+ else:
+ instances[profile] = [client]
+ for profile, clist in list(instances.items()):
+ clist.sort()
+ viz_str.append('"%s-instances" [ label="%s", shape="record" ];' %
+ (profile, '|'.join(clist)))
+ viz_str.append('"%s-instances" -> "group-%s";' %
+ (profile, profile))
+ if bundles:
+ bundles = []
+ [bundles.append(bund.get('name')) \
+ for bund in groups.findall('.//Bundle') \
+ if bund.get('name') not in bundles \
+ and include_bundle(bund.get('name'))]
+ bundles.sort()
+ for bundle in bundles:
+ viz_str.append('"bundle-%s" [ label="%s", shape="septagon"];' %
+ (bundle, bundle))
+ gseen = []
+ for group in egroups:
+ if group.get('profile', 'false') == 'true':
+ style = "filled, bold"
+ else:
+ style = "filled"
+ gseen.append(group.get('name'))
+ if include_group(group.get('name')):
+ viz_str.append('"group-%s" [label="%s", style="%s", fillcolor=%s];' %
+ (group.get('name'), group.get('name'), style,
+ group.get('color')))
+ if bundles:
+ for bundle in group.findall('Bundle'):
+ viz_str.append('"group-%s" -> "bundle-%s";' %
+ (group.get('name'), bundle.get('name')))
+ gfmt = '"group-%s" [label="%s", style="filled", fillcolor="grey83"];'
+ for group in egroups:
+ for parent in group.findall('Group'):
+ if parent.get('name') not in gseen and include_group(parent.get('name')):
+ viz_str.append(gfmt % (parent.get('name'),
+ parent.get('name')))
+ gseen.append(parent.get("name"))
+ if include_group(group.get('name')):
+ viz_str.append('"group-%s" -> "group-%s";' %
+ (group.get('name'), parent.get('name')))
+ if key:
+ for category in categories:
+ viz_str.append('"%s" [label="%s", shape="record", style="filled", fillcolor="%s"];' %
+ (category, category, categories[category]))
+ return "\n".join("\t" + s for s in viz_str)
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
new file mode 100644
index 000000000..4dbd57d16
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -0,0 +1,151 @@
+'''This module implements a Nagios configuration generator'''
+
+import os
+import re
+import sys
+import glob
+import socket
+import logging
+import lxml.etree
+
+import Bcfg2.Server.Plugin
+
+LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen')
+
+line_fmt = '\t%-32s %s'
+
+class NagiosGenConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
+ Bcfg2.Server.Plugin.StructFile):
+ def __init__(self, filename, fam):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+
+
+class NagiosGen(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Generator):
+ """NagiosGen is a Bcfg2 plugin that dynamically generates
+ Nagios configuration file based on Bcfg2 data.
+ """
+ name = 'NagiosGen'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ self.config = NagiosGenConfig(os.path.join(self.data, 'config.xml'),
+ core.fam)
+ self.Entries = {'Path':
+ {'/etc/nagiosgen.status': self.createhostconfig,
+ '/etc/nagios/nagiosgen.cfg': self.createserverconfig}}
+
+ self.client_attrib = {'encoding': 'ascii',
+ 'owner': 'root',
+ 'group': 'root',
+ 'type': 'file',
+ 'perms': '0400'}
+ self.server_attrib = {'encoding': 'ascii',
+ 'owner': 'nagios',
+ 'group': 'nagios',
+ 'type': 'file',
+ 'perms': '0440'}
+
+ def createhostconfig(self, entry, metadata):
+ """Build host specific configuration file."""
+ host_address = socket.gethostbyname(metadata.hostname)
+ host_groups = [grp for grp in metadata.groups
+ if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
+ host_config = ['define host {',
+ line_fmt % ('host_name', metadata.hostname),
+ line_fmt % ('alias', metadata.hostname),
+ line_fmt % ('address', host_address)]
+
+ if host_groups:
+ host_config.append(line_fmt % ("hostgroups",
+ ",".join(host_groups)))
+
+ # read the old-style Properties config, but emit a warning.
+ xtra = dict()
+ props = None
+ if (hasattr(metadata, 'Properties') and
+ 'NagiosGen.xml' in metadata.Properties):
+ props = metadata.Properties['NagiosGen.xml'].data
+ if props is not None:
+ LOGGER.warn("Parsing deprecated Properties/NagiosGen.xml. "
+ "Update to the new-style config with "
+ "nagiosgen-convert.py.")
+ xtra = dict((el.tag, el.text)
+ for el in props.find(metadata.hostname))
+ # hold off on parsing the defaults until we've checked for
+ # a new-style config
+
+ # read the old-style parents.xml, but emit a warning
+ pfile = os.path.join(self.data, "parents.xml")
+ if os.path.exists(pfile):
+ LOGGER.warn("Parsing deprecated NagiosGen/parents.xml. "
+ "Update to the new-style config with "
+ "nagiosgen-convert.py.")
+ parents = lxml.etree.parse(pfile)
+ for el in parents.xpath("//Depend[@name='%s']" % metadata.hostname):
+ if 'parent' in xtra:
+ xtra['parent'] += "," + el.get("on")
+ else:
+ xtra['parent'] = el.get("on")
+
+ # read the new-style config and overwrite the old-style config
+ for el in self.config.Match(metadata):
+ if el.tag == 'Option':
+ xtra[el.get("name")] = el.text
+
+ # if we haven't found anything in the new- or old-style
+ # configs, finally read defaults from old-style config
+ if not xtra and props is not None:
+ xtra = dict((el.tag, el.text) for el in props.find('default'))
+
+ if xtra:
+ host_config.extend([line_fmt % (opt, val)
+ for opt, val in list(xtra.items())])
+ else:
+ host_config.append(line_fmt % ('use', 'default'))
+
+ host_config.append('}')
+ entry.text = "%s\n" % "\n".join(host_config)
+ [entry.attrib.__setitem__(key, value)
+ for (key, value) in list(self.client_attrib.items())]
+ try:
+ fileh = open("%s/%s-host.cfg" %
+ (self.data, metadata.hostname), 'w')
+ fileh.write(entry.text)
+ fileh.close()
+ except OSError:
+ ioerr = sys.exc_info()[1]
+ LOGGER.error("Failed to write %s/%s-host.cfg" %
+ (self.data, metadata.hostname))
+ LOGGER.error(ioerr)
+
+ def createserverconfig(self, entry, _):
+ """Build monolithic server configuration file."""
+ host_configs = glob.glob('%s/*-host.cfg' % self.data)
+ group_configs = glob.glob('%s/*-group.cfg' % self.data)
+ host_data = []
+ group_data = []
+ for host in host_configs:
+ host_data.append(open(host, 'r').read())
+
+ for group in group_configs:
+ group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group)
+ if "\n".join(host_data).find(group_name) != -1:
+ groupfile = open(group, 'r')
+ group_data.append(groupfile.read())
+ groupfile.close()
+
+ entry.text = "%s\n\n%s" % ("\n".join(group_data), "\n".join(host_data))
+ [entry.attrib.__setitem__(key, value)
+ for (key, value) in list(self.server_attrib.items())]
+ try:
+ fileh = open("%s/nagiosgen.cfg" % self.data, 'w')
+ fileh.write(entry.text)
+ fileh.close()
+ except OSError:
+ ioerr = sys.exc_info()[1]
+ LOGGER.error("Failed to write %s/nagiosgen.cfg" % self.data)
+ LOGGER.error(ioerr)
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
new file mode 100644
index 000000000..5fff20d98
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -0,0 +1,92 @@
+import lxml.etree
+import os
+
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Ohai')
+
+import Bcfg2.Server.Plugin
+
+try:
+ import json
+except:
+ # FIXME: can be removed when server prereq is >= python 2.6
+ # necessary for clients without the in-tree json module
+ try:
+ import simplejson as json
+ except:
+ logger.error("Unable to load any json modules. Make sure "
+ "python-simplejson is installed.")
+ raise ImportError
+
+
+probecode = """#!/bin/sh
+
+export PATH=$PATH:/sbin:/usr/sbin
+
+if type ohai >& /dev/null; then
+ ohai
+else
+ # an empty dict, so "'foo' in metadata.Ohai" tests succeed
+ echo '{}'
+fi
+"""
+
+class OhaiCache(object):
+ def __init__(self, dirname):
+ self.dirname = dirname
+ self.cache = dict()
+
+ def __setitem__(self, item, value):
+ if value == None:
+ # simply return if the client returned nothing
+ return
+ self.cache[item] = json.loads(value)
+ file("%s/%s.json" % (self.dirname, item), 'w').write(value)
+
+ def __getitem__(self, item):
+ if item not in self.cache:
+ try:
+ data = open("%s/%s.json" % (self.dirname, item)).read()
+ except:
+ raise KeyError(item)
+ self.cache[item] = json.loads(data)
+ return self.cache[item]
+
+ def __iter__(self):
+ data = list(self.cache.keys())
+ data.extend([x[:-5] for x in os.listdir(self.dirname)])
+ return data.__iter__()
+
+
+class Ohai(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Connector):
+ """The Ohai plugin is used to detect information
+ about the client operating system.
+ """
+ name = 'Ohai'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai',
+ interpreter='/bin/sh')
+ self.probe.text = probecode
+ try:
+ os.stat(self.data)
+ except:
+ os.makedirs(self.data)
+ self.cache = OhaiCache(self.data)
+
+ def GetProbes(self, meta, force=False):
+ return [self.probe]
+
+ def ReceiveData(self, meta, datalist):
+ self.cache[meta.hostname] = datalist[0].text
+
+ def get_additional_data(self, meta):
+ if meta.hostname in self.cache:
+ return self.cache[meta.hostname]
+ return dict()
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
new file mode 100644
index 000000000..f76bf7fa1
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -0,0 +1,139 @@
+import re
+import gzip
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+from Bcfg2.Bcfg2Py3k import cPickle, file
+
+class AptCollection(Collection):
+ def get_group(self, group):
+ self.logger.warning("Packages: Package groups are not supported by APT")
+ return []
+
+class AptSource(Source):
+ basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
+ ptype = 'deb'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
+ 'version': self.version,
+ 'components': self.components, 'arches': self.arches}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ rv = []
+ for part in self.components:
+ for arch in self.arches:
+ rv.append("%sdists/%s/%s/binary-%s/Packages.gz" %
+ (self.url, self.version, part, arch))
+ return rv
+ else:
+ return ["%sPackages.gz" % self.rawurl]
+ urls = property(get_urls)
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x
+ for x in fname.split('@')
+ if x.startswith('binary-')][0][7:]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ reader = gzip.GzipFile(fname)
+ except:
+ self.logger.error("Packages: Failed to read file %s" % fname)
+ raise
+ for line in reader.readlines():
+ words = str(line.strip()).split(':', 1)
+ if words[0] == 'Package':
+ pkgname = words[1].strip().rstrip()
+ self.pkgnames.add(pkgname)
+ bdeps[barch][pkgname] = []
+ elif words[0] in depfnames:
+ vindex = 0
+ for dep in words[1].split(','):
+ if '|' in dep:
+ cdeps = [re.sub('\s+', '',
+ re.sub('\(.*\)', '', cdep))
+ for cdep in dep.split('|')]
+ dyn_dname = "choice-%s-%s-%s" % (pkgname,
+ barch,
+ vindex)
+ vindex += 1
+ bdeps[barch][pkgname].append(dyn_dname)
+ bprov[barch][dyn_dname] = set(cdeps)
+ else:
+ raw_dep = re.sub('\(.*\)', '', dep)
+ raw_dep = raw_dep.rstrip().strip()
+ bdeps[barch][pkgname].append(raw_dep)
+ elif words[0] == 'Provides':
+ for pkg in words[1].split(','):
+ dname = pkg.rstrip().strip()
+ if dname not in bprov[barch]:
+ bprov[barch][dname] = set()
+ bprov[barch][dname].add(pkgname)
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in list(bprov.values()):
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return (pkg in self.pkgnames and
+ pkg not in self.blacklist and
+ (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
new file mode 100644
index 000000000..59c50fe8b
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -0,0 +1,343 @@
+import copy
+import logging
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger(__name__)
+
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+# we have to cache Collection objects so that calling Packages.Refresh
+# or .Reload can tell the collection objects to clean up their cache,
+# but we don't actually use the cache to return a Collection object
+# when one is requested, because that prevents new machines from
+# working, since a Collection object gets created by
+# get_additional_data(), which is called for all clients at server
+# startup. (It would also prevent machines that change groups from
+# working properly; e.g., if you reinstall a machine with a new OS,
+# then returning a cached Collection object would give the wrong
+# sources to that client.)
+collections = dict()
+
+class Collection(Bcfg2.Server.Plugin.Debuggable):
+ def __init__(self, metadata, sources, basepath, debug=False):
+ """ don't call this directly; use the factory function """
+ Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ self.debug_flag = debug
+ self.metadata = metadata
+ self.sources = sources
+ self.basepath = basepath
+ self.virt_pkgs = dict()
+
+ try:
+ self.config = sources[0].config
+ self.cachepath = sources[0].basepath
+ self.ptype = sources[0].ptype
+ except IndexError:
+ self.config = None
+ self.cachepath = None
+ self.ptype = "unknown"
+
+ self.cachefile = None
+
+ @property
+ def cachekey(self):
+ return md5(self.get_config()).hexdigest()
+
+ def get_config(self):
+ self.logger.error("Packages: Cannot generate config for host with "
+ "multiple source types (%s)" % self.metadata.hostname)
+ return ""
+
+ def get_relevant_groups(self):
+ groups = []
+ for source in self.sources:
+ groups.extend(source.get_relevant_groups(self.metadata))
+ return sorted(list(set(groups)))
+
+ @property
+ def basegroups(self):
+ groups = set()
+ for source in self.sources:
+ groups.update(source.basegroups)
+ return list(groups)
+
+ @property
+ def cachefiles(self):
+ cachefiles = set([self.cachefile])
+ for source in self.sources:
+ cachefiles.add(source.cachefile)
+ return list(cachefiles)
+
+ def get_group(self, group, ptype=None):
+ for source in self.sources:
+ pkgs = source.get_group(self.metadata, group, ptype=ptype)
+ if pkgs:
+ return pkgs
+ self.logger.warning("Packages: '%s' is not a valid group" % group)
+ return []
+
+ def is_package(self, package):
+ for source in self.sources:
+ if source.is_package(self.metadata, package):
+ return True
+ return False
+
+ def is_virtual_package(self, package):
+ for source in self.sources:
+ if source.is_virtual_package(self.metadata, package):
+ return True
+ return False
+
+ def get_deps(self, package):
+ for source in self.sources:
+ if source.is_package(self.metadata, package):
+ return source.get_deps(self.metadata, package)
+ return []
+
+ def get_provides(self, package):
+ for source in self.sources:
+ providers = source.get_provides(self.metadata, package)
+ if providers:
+ return providers
+ return []
+
+ def get_vpkgs(self):
+ """ get virtual packages """
+ vpkgs = dict()
+ for source in self.sources:
+ s_vpkgs = source.get_vpkgs(self.metadata)
+ for name, prov_set in list(s_vpkgs.items()):
+ if name not in vpkgs:
+ vpkgs[name] = set(prov_set)
+ else:
+ vpkgs[name].update(prov_set)
+ return vpkgs
+
+ def filter_unknown(self, unknown):
+ for source in self.sources:
+ source.filter_unknown(unknown)
+
+ def magic_groups_match(self):
+ for source in self.sources:
+ if source.magic_groups_match(self.metadata):
+ return True
+
+ def build_extra_structures(self, independent):
+ pass
+
+ def get_additional_data(self):
+ sdata = []
+ for source in self.sources:
+ sdata.extend(copy.deepcopy(source.url_map))
+ return sdata
+
+ def setup_data(self, force_update=False):
+ """ do any collection-level data setup tasks """
+ pass
+
+ def complete(self, packagelist):
+ '''Build the transitive closure of all package dependencies
+
+ Arguments:
+ packageslist - set of package names
+ returns => (set(packages), set(unsatisfied requirements))
+ '''
+
+ # setup vpkg cache
+ pgrps = tuple(self.get_relevant_groups())
+ if pgrps not in self.virt_pkgs:
+ self.virt_pkgs[pgrps] = self.get_vpkgs()
+ vpkg_cache = self.virt_pkgs[pgrps]
+
+ # unclassified is set of unsatisfied requirements (may be pkg
+ # for vpkg)
+ unclassified = set(packagelist)
+ vpkgs = set()
+ both = set()
+ pkgs = set(packagelist)
+
+ packages = set()
+ examined = set()
+ unknown = set()
+
+ final_pass = False
+ really_done = False
+ # do while unclassified or vpkgs or both or pkgs
+ while unclassified or pkgs or both or final_pass:
+ if really_done:
+ break
+ if len(unclassified) + len(pkgs) + len(both) == 0:
+ # one more pass then exit
+ really_done = True
+
+ while unclassified:
+ current = unclassified.pop()
+ examined.add(current)
+ is_pkg = False
+ if self.is_package(current):
+ is_pkg = True
+
+ is_vpkg = current in vpkg_cache
+
+ if is_pkg and is_vpkg:
+ both.add(current)
+ elif is_pkg and not is_vpkg:
+ pkgs.add(current)
+ elif is_vpkg and not is_pkg:
+ vpkgs.add(current)
+ elif not is_vpkg and not is_pkg:
+ unknown.add(current)
+
+ while pkgs:
+ # direct packages; current can be added, and all deps
+ # should be resolved
+ current = pkgs.pop()
+ self.debug_log("Packages: handling package requirement %s" %
+ current)
+ packages.add(current)
+ deps = self.get_deps(current)
+ newdeps = set(deps).difference(examined)
+ if newdeps:
+ self.debug_log("Packages: Package %s added requirements %s"
+ % (current, newdeps))
+ unclassified.update(newdeps)
+
+ satisfied_vpkgs = set()
+ for current in vpkgs:
+ # virtual dependencies, satisfied if one of N in the
+ # config, or can be forced if only one provider
+ if len(vpkg_cache[current]) == 1:
+ self.debug_log("Packages: requirement %s satisfied by %s" %
+ (current, vpkg_cache[current]))
+ unclassified.update(vpkg_cache[current].difference(examined))
+ satisfied_vpkgs.add(current)
+ else:
+ satisfiers = [item for item in vpkg_cache[current]
+ if item in packages]
+ self.debug_log("Packages: requirement %s satisfied by %s" %
+ (current, satisfiers))
+ satisfied_vpkgs.add(current)
+ vpkgs.difference_update(satisfied_vpkgs)
+
+ satisfied_both = set()
+ for current in both:
+ # packages that are both have virtual providers as
+ # well as a package with that name. allow use of virt
+ # through explicit specification, then fall back to
+ # forcing current on last pass
+ satisfiers = [item for item in vpkg_cache[current]
+ if item in packages]
+ if satisfiers:
+ self.debug_log("Packages: requirement %s satisfied by %s" %
+ (current, satisfiers))
+ satisfied_both.add(current)
+ elif current in packagelist or final_pass:
+ pkgs.add(current)
+ satisfied_both.add(current)
+ both.difference_update(satisfied_both)
+
+ if len(unclassified) + len(pkgs) == 0:
+ final_pass = True
+ else:
+ final_pass = False
+
+ self.filter_unknown(unknown)
+
+ return packages, unknown
+
+ def __len__(self):
+ return len(self.sources)
+
+ def __getitem__(self, item):
+ return self.sources[item]
+
+ def __setitem__(self, item, value):
+ self.sources[item] = value
+
+ def __delitem__(self, item):
+ del self.sources[item]
+
+ def append(self, item):
+ self.sources.append(item)
+
+ def count(self):
+ return self.sources.count()
+
+ def index(self, item):
+ return self.sources.index(item)
+
+ def extend(self, items):
+ self.sources.extend(items)
+
+ def insert(self, index, item):
+ self.sources.insert(index, item)
+
+ def pop(self, index=None):
+ self.sources.pop(index)
+
+ def remove(self, item):
+ self.sources.remove(item)
+
+ def sort(self, cmp=None, key=None, reverse=False):
+ self.sources.sort(cmp, key, reverse)
+
+def clear_cache():
+ global collections
+ collections = dict()
+
+def factory(metadata, sources, basepath, debug=False):
+ global collections
+
+ if not sources.loaded:
+ # if sources.xml has not received a FAM event yet, defer;
+ # instantiate a dummy Collection object
+ return Collection(metadata, [], basepath)
+
+ sclasses = set()
+ relevant = list()
+
+ for source in sources:
+ if source.applies(metadata):
+ relevant.append(source)
+ sclasses.update([source.__class__])
+
+ if len(sclasses) > 1:
+ logger.warning("Packages: Multiple source types found for %s: %s" %
+ ",".join([s.__name__ for s in sclasses]))
+ cclass = Collection
+ elif len(sclasses) == 0:
+ # you'd think this should be a warning, but it happens all the
+ # freaking time if you have a) machines in your clients.xml
+ # that do not have the proper groups set up yet (e.g., if you
+ # have multiple Bcfg2 servers and Packages-relevant groups set
+ # by probes); and b) templates that query all or multiple
+ # machines (e.g., with metadata.query.all_clients())
+ if debug:
+ logger.error("Packages: No sources found for %s" %
+ metadata.hostname)
+ cclass = Collection
+ else:
+ stype = sclasses.pop().__name__.replace("Source", "")
+ try:
+ module = \
+ getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
+ stype.title()).Server.Plugins.Packages,
+ stype.title())
+ cclass = getattr(module, "%sCollection" % stype.title())
+ except ImportError:
+ logger.error("Packages: Unknown source type %s" % stype)
+ except AttributeError:
+ logger.warning("Packages: No collection class found for %s sources"
+ % stype)
+
+ if debug:
+ logger.error("Packages: Using %s for Collection of sources for %s" %
+ (cclass.__name__, metadata.hostname))
+
+ collection = cclass(metadata, relevant, basepath, debug=debug)
+ collections[metadata.hostname] = collection
+ return collection
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
new file mode 100644
index 000000000..486c094a6
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -0,0 +1,120 @@
+import gzip
+import tarfile
+from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+
+class PacCollection(Collection):
+ def get_group(self, group):
+ self.logger.warning("Packages: Package groups are not supported by APT")
+ return []
+
+class PacSource(Source):
+ basegroups = ['arch', 'parabola']
+ ptype = 'pacman'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
+ 'version': self.version,
+ 'components': self.components, 'arches': self.arches}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ rv = []
+ for part in self.components:
+ for arch in self.arches:
+ rv.append("%s%s/os/%s/%s.db.tar.gz" %
+ (self.url, part, arch, part))
+ return rv
+ else:
+ raise Exception("PacSource : RAWUrl not supported (yet)")
+ urls = property(get_urls)
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x for x in fname.split('@') if x in self.arches][0]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ self.debug_log("Packages: try to read %s" % fname)
+ tar = tarfile.open(fname, "r")
+ reader = gzip.GzipFile(fname)
+ except:
+ self.logger.error("Packages: Failed to read file %s" % fname)
+ raise
+
+ for tarinfo in tar:
+ if tarinfo.isdir():
+ self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
+ self.debug_log("Packages: added %s" %
+ tarinfo.name.rsplit("-", 2)[0])
+ tar.close()
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in list(bprov.values()):
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return (pkg in self.pkgnames and
+ pkg not in self.blacklist and
+ (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesConfig.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesConfig.py
new file mode 100644
index 000000000..3846c06ce
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesConfig.py
@@ -0,0 +1,15 @@
+import Bcfg2.Server.Plugin
+
+class PackagesConfig(Bcfg2.Server.Plugin.SimpleConfig):
+ _required = False
+
+ def Index(self):
+ """ Build local data structures """
+ Bcfg2.Server.Plugin.SimpleConfig.Index(self)
+
+ if hasattr(self.plugin, "sources") and self.plugin.sources.loaded:
+ # only reload Packages plugin if sources have been loaded.
+ # otherwise, this is getting called on server startup, and
+ # we have to wait until all sources have been indexed
+ # before we can call Packages.Reload()
+ self.plugin.Reload()
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
new file mode 100644
index 000000000..3511cfc3d
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -0,0 +1,97 @@
+import os
+import sys
+import lxml.etree
+import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugins.Packages.Source import SourceInitError
+
+class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
+ Bcfg2.Server.Plugin.StructFile,
+ Bcfg2.Server.Plugin.Debuggable):
+ __identifier__ = None
+
+ def __init__(self, filename, cachepath, fam, packages, config):
+ Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ try:
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
+ filename,
+ fam)
+ except OSError:
+ err = sys.exc_info()[1]
+ msg = "Packages: Failed to read configuration file: %s" % err
+ if not os.path.exists(self.name):
+ msg += " Have you created it?"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+ self.cachepath = cachepath
+ self.config = config
+ if not os.path.exists(self.cachepath):
+ # create cache directory if needed
+ try:
+ os.makedirs(self.cachepath)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("Could not create Packages cache at %s: %s" %
+ (self.cachepath, err))
+ self.pkg_obj = packages
+ self.parsed = set()
+ self.loaded = False
+
+ def toggle_debug(self):
+ Bcfg2.Server.Plugin.Debuggable.toggle_debug(self)
+ for source in self.entries:
+ source.toggle_debug()
+
+ def HandleEvent(self, event=None):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.HandleEvent(self, event=event)
+ if event.filename != self.name:
+ self.parsed.add(os.path.basename(event.filename))
+
+ if sorted(list(self.parsed)) == sorted(self.extras):
+ self.logger.info("Reloading Packages plugin")
+ self.pkg_obj.Reload()
+ self.loaded = True
+
+ def Index(self):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ self.entries = []
+ for xsource in self.xdata.findall('.//Source'):
+ source = self.source_from_xml(xsource)
+ if source is not None:
+ self.entries.append(source)
+
+ def source_from_xml(self, xsource):
+ """ create a *Source object from its XML representation in
+ sources.xml """
+ stype = xsource.get("type")
+ if stype is None:
+ self.logger.error("Packages: No type specified for source, "
+ "skipping")
+ return None
+
+ try:
+ module = getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
+ stype.title()).Server.Plugins.Packages,
+ stype.title())
+ cls = getattr(module, "%sSource" % stype.title())
+ except (ImportError, AttributeError):
+ self.logger.error("Packages: Unknown source type %s" % stype)
+ return None
+
+ try:
+ source = cls(self.cachepath, xsource, self.config)
+ except SourceInitError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: %s" % err)
+ source = None
+
+ return source
+
+ def __getitem__(self, key):
+ return self.entries[key]
+
+ def __repr__(self):
+ return "PackagesSources: %s" % repr(self.entries)
+
+ def __str__(self):
+ return "PackagesSources: %s" % str(self.entries)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
new file mode 100644
index 000000000..b33fde60a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -0,0 +1,278 @@
+import os
+import re
+import sys
+import base64
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \
+ HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \
+ urlopen, file, cPickle
+
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+def fetch_url(url):
+ if '@' in url:
+ mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
+ if not mobj:
+ raise ValueError
+ user = mobj.group(2)
+ passwd = mobj.group(3)
+ url = mobj.group(1) + mobj.group(4)
+ auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm())
+ auth.add_password(None, url, user, passwd)
+ install_opener(build_opener(auth))
+ return urlopen(url).read()
+
+
+class SourceInitError(Exception):
+ pass
+
+
+class Source(Bcfg2.Server.Plugin.Debuggable):
+ mrepo_re = re.compile(r'/RPMS\.([^/]+)')
+ pulprepo_re = re.compile(r'pulp/repos/([^/]+)')
+ genericrepo_re = re.compile(r'https?://[^/]+/(.+?)/?$')
+ basegroups = []
+
+ def __init__(self, basepath, xsource, config):
+ Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ self.basepath = basepath
+ self.xsource = xsource
+ self.config = config
+
+ try:
+ self.version = xsource.find('Version').text
+ except AttributeError:
+ pass
+
+ for key, tag in [('components', 'Component'), ('arches', 'Arch'),
+ ('blacklist', 'Blacklist'),
+ ('whitelist', 'Whitelist')]:
+ self.__dict__[key] = [item.text for item in xsource.findall(tag)]
+
+ self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
+
+ self.recommended = xsource.get('recommended', 'false').lower() == 'true'
+
+ self.rawurl = xsource.get('rawurl', '')
+ if self.rawurl and not self.rawurl.endswith("/"):
+ self.rawurl += "/"
+ self.url = xsource.get('url', '')
+ if self.url and not self.url.endswith("/"):
+ self.url += "/"
+ self.version = xsource.get('version', '')
+
+ # build the set of conditions to see if this source applies to
+ # a given set of metadata
+ self.conditions = []
+ self.groups = [] # provided for some limited backwards compat
+ for el in xsource.iterancestors():
+ if el.tag == "Group":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") not in m.groups)
+ else:
+ self.groups.append(el.get("name"))
+ self.conditions.append(lambda m, el=el:
+ el.get("name") in m.groups)
+ elif el.tag == "Client":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") != m.hostname)
+ else:
+ self.conditions.append(lambda m, el=el:
+ el.get("name") == m.hostname)
+
+ self.deps = dict()
+ self.provides = dict()
+
+ self.cachefile = os.path.join(self.basepath,
+ "cache-%s" % self.cachekey)
+ self.url_map = []
+
+ @property
+ def cachekey(self):
+ return md5(cPickle.dumps([self.version, self.components, self.url,
+ self.rawurl, self.arches])).hexdigest()
+
+ def get_relevant_groups(self, metadata):
+ return sorted(list(set([g for g in metadata.groups
+ if (g in self.basegroups or
+ g in self.groups or
+ g in self.arches)])))
+
+ def load_state(self):
+ pass
+
+ def setup_data(self, force_update=False):
+ should_read = True
+ should_download = False
+ if os.path.exists(self.cachefile):
+ try:
+ self.load_state()
+ should_read = False
+ except:
+ self.logger.error("Packages: Cachefile %s load failed; "
+ "falling back to file read" % self.cachefile)
+ if should_read:
+ try:
+ self.read_files()
+ except:
+ self.logger.error("Packages: File read failed; "
+ "falling back to file download")
+ should_download = True
+
+ if should_download or force_update:
+ try:
+ self.update()
+ self.read_files()
+ except:
+ self.logger.error("Packages: Failed to load data for Source "
+ "of %s. Some Packages will be missing." %
+ self.urls)
+
+ def get_repo_name(self, url_map):
+ # try to find a sensible name for a repo
+ if url_map['component']:
+ rname = url_map['component']
+ else:
+ name = None
+ for repo_re in (self.mrepo_re,
+ self.pulprepo_re,
+ self.genericrepo_re):
+ match = repo_re.search(url_map['url'])
+ if match:
+ break
+ if name is None:
+ # couldn't figure out the name from the URL or URL map
+ # (which probably means its a screwy URL), so we just
+ # generate a random one
+ name = base64.b64encode(os.urandom(16))[:-2]
+ rname = "%s-%s" % (self.groups[0], name)
+ # see yum/__init__.py in the yum source, lines 441-449, for
+ # the source of this regex. yum doesn't like anything but
+ # string.ascii_letters, string.digits, and [-_.:]. There
+ # doesn't seem to be a reason for this, because yum.
+ return re.sub(r'[^A-Za-z0-9-_.:]', '-', rname)
+
+ def __str__(self):
+ if self.rawurl:
+ return "%s at %s" % (self.__class__.__name__, self.rawurl)
+ elif self.url:
+ return "%s at %s" % (self.__class__.__name__, self.url)
+ else:
+ return self.__class__.__name__
+
+ def get_urls(self):
+ return []
+ urls = property(get_urls)
+
+ def get_files(self):
+ return [self.escape_url(url) for url in self.urls]
+ files = property(get_files)
+
+ def get_vpkgs(self, metadata):
+ agroups = ['global'] + [a for a in self.arches
+ if a in metadata.groups]
+ vdict = dict()
+ for agrp in agroups:
+ for key, value in list(self.provides[agrp].items()):
+ if key not in vdict:
+ vdict[key] = set(value)
+ else:
+ vdict[key].update(value)
+ return vdict
+
+ def is_virtual_package(self, metadata, package):
+ """ called to determine if a package is a virtual package.
+ this is only invoked if the package is not listed in the dict
+ returned by get_vpkgs """
+ return False
+
+ def escape_url(self, url):
+ return os.path.join(self.basepath, url.replace('/', '@'))
+
+ def file_init(self):
+ pass
+
+ def read_files(self):
+ pass
+
+ def filter_unknown(self, unknown):
+ pass
+
+ def update(self):
+ for url in self.urls:
+ self.logger.info("Packages: Updating %s" % url)
+ fname = self.escape_url(url)
+ try:
+ data = fetch_url(url)
+ file(fname, 'w').write(data)
+ except ValueError:
+ self.logger.error("Packages: Bad url string %s" % url)
+ raise
+ except HTTPError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Failed to fetch url %s. HTTP "
+ "response code=%s" % (url, err.code))
+ raise
+
+ def applies(self, metadata):
+ # check base groups
+ if not self.magic_groups_match(metadata):
+ return False
+
+ # check Group/Client tags from sources.xml
+ for condition in self.conditions:
+ if not condition(metadata):
+ return False
+
+ return True
+
+ def get_arches(self, metadata):
+ return ['global'] + [a for a in self.arches if a in metadata.groups]
+
+ def get_deps(self, metadata, pkgname):
+ for arch in self.get_arches(metadata):
+ if pkgname in self.deps[arch]:
+ return self.deps[arch][pkgname]
+ return []
+
+ def get_provides(self, metadata, required):
+ for arch in self.get_arches(metadata):
+ if required in self.provides[arch]:
+ return self.provides[arch][required]
+ return []
+
+ def is_package(self, metadata, _):
+ return False
+
+ def get_package(self, metadata, package):
+ return package
+
+ def get_group(self, metadata, group, ptype=None):
+ return []
+
+ def magic_groups_match(self, metadata):
+ """ check to see if this source applies to the given host
+ metadata by checking 'magic' (base) groups only, or if magic
+ groups are off """
+ # we always check that arch matches
+ found_arch = False
+ for arch in self.arches:
+ if arch in metadata.groups:
+ found_arch = True
+ break
+ if not found_arch:
+ return False
+
+ if self.config.getboolean("global", "magic_groups",
+ default=True) == False:
+ return True
+ else:
+ for group in self.basegroups:
+ if group in metadata.groups:
+ return True
+ return False
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
new file mode 100644
index 000000000..b39b6aed2
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -0,0 +1,688 @@
+import os
+import sys
+import time
+import copy
+import glob
+import socket
+import random
+import logging
+import threading
+import lxml.etree
+from UserDict import DictMixin
+from subprocess import Popen, PIPE, STDOUT
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \
+ fetch_url
+
+logger = logging.getLogger(__name__)
+
+try:
+ from pulp.client.consumer.config import ConsumerConfig
+ from pulp.client.api.repository import RepositoryAPI
+ from pulp.client.api.consumer import ConsumerAPI
+ from pulp.client.api import server
+ has_pulp = True
+except ImportError:
+ has_pulp = False
+
+try:
+ import yum
+ has_yum = True
+except ImportError:
+ has_yum = False
+ logger.info("Packages: No yum libraries found; forcing use of internal "
+ "dependency resolver")
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+XP = '{http://linux.duke.edu/metadata/common}'
+RP = '{http://linux.duke.edu/metadata/rpm}'
+RPO = '{http://linux.duke.edu/metadata/repo}'
+FL = '{http://linux.duke.edu/metadata/filelists}'
+
+PULPSERVER = None
+PULPCONFIG = None
+
+
+def _setup_pulp(config):
+ global PULPSERVER, PULPCONFIG
+ if not has_pulp:
+ logger.error("Packages: Cannot create Pulp collection: Pulp libraries not "
+ "found")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ if PULPSERVER is None:
+ try:
+ username = config.get("pulp", "username")
+ password = config.get("pulp", "password")
+ except ConfigParser.NoSectionError:
+ logger.error("Packages: No [pulp] section found in Packages/packages.conf")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except ConfigParser.NoOptionError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Required option not found in "
+ "Packages/packages.conf: %s" % err)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ PULPCONFIG = ConsumerConfig()
+ serveropts = PULPCONFIG.server
+
+ PULPSERVER = server.PulpServer(serveropts['host'],
+ int(serveropts['port']),
+ serveropts['scheme'],
+ serveropts['path'])
+ PULPSERVER.set_basic_auth_credentials(username, password)
+ server.set_active_server(PULPSERVER)
+ return PULPSERVER
+
+
+class YumCollection(Collection):
+ # options that are included in the [yum] section but that should
+ # not be included in the temporary yum.conf we write out
+ option_blacklist = ["use_yum_libraries", "helper"]
+
+ def __init__(self, metadata, sources, basepath, debug=False):
+ Collection.__init__(self, metadata, sources, basepath, debug=debug)
+ self.keypath = os.path.join(self.basepath, "keys")
+
+ if len(sources):
+ config = sources[0].config
+ self.use_yum = has_yum and config.getboolean("yum",
+ "use_yum_libraries",
+ default=False)
+ else:
+ self.use_yum = False
+
+ if self.use_yum:
+ self.cachefile = os.path.join(self.cachepath,
+ "cache-%s" % self.cachekey)
+ if not os.path.exists(self.cachefile):
+ os.mkdir(self.cachefile)
+
+ self.configdir = os.path.join(self.basepath, "yum")
+ if not os.path.exists(self.configdir):
+ os.mkdir(self.configdir)
+ self.cfgfile = os.path.join(self.configdir,
+ "%s-yum.conf" % self.cachekey)
+ self.write_config()
+
+ self.helper = self.config.get("yum", "helper",
+ default="/usr/sbin/bcfg2-yum-helper")
+ if has_pulp:
+ _setup_pulp(self.config)
+
+ def write_config(self):
+ if not os.path.exists(self.cfgfile):
+ yumconf = self.get_config(raw=True)
+ yumconf.add_section("main")
+
+ mainopts = dict(cachedir=self.cachefile,
+ keepcache="0",
+ sslverify="0",
+ debuglevel="0",
+ reposdir="/dev/null")
+ try:
+ for opt in self.config.options("yum"):
+ if opt not in self.option_blacklist:
+ mainopts[opt] = self.config.get("yum", opt)
+ except ConfigParser.NoSectionError:
+ pass
+
+ for opt, val in list(mainopts.items()):
+ yumconf.set("main", opt, val)
+
+ yumconf.write(open(self.cfgfile, 'w'))
+
+ def get_config(self, raw=False):
+ config = ConfigParser.SafeConfigParser()
+ for source in self.sources:
+ # get_urls() loads url_map as a side-effect
+ source.get_urls()
+ for url_map in source.url_map:
+ if url_map['arch'] in self.metadata.groups:
+ basereponame = source.get_repo_name(url_map)
+ reponame = basereponame
+
+ added = False
+ while not added:
+ try:
+ config.add_section(reponame)
+ added = True
+ except ConfigParser.DuplicateSectionError:
+ match = re.match("-(\d)", reponame)
+ if match:
+ rid = int(match.group(1)) + 1
+ else:
+ rid = 1
+ reponame = "%s-%d" % (basereponame, rid)
+
+ config.set(reponame, "name", reponame)
+ config.set(reponame, "baseurl", url_map['url'])
+ config.set(reponame, "enabled", "1")
+ if len(source.gpgkeys):
+ config.set(reponame, "gpgcheck", "1")
+ config.set(reponame, "gpgkey",
+ " ".join(source.gpgkeys))
+ else:
+ config.set(reponame, "gpgcheck", "0")
+
+ if len(source.blacklist):
+ config.set(reponame, "exclude",
+ " ".join(source.blacklist))
+ if len(source.whitelist):
+ config.set(reponame, "includepkgs",
+ " ".join(source.whitelist))
+
+ if raw:
+ return config
+ else:
+ # configparser only writes to file, so we have to use a
+ # StringIO object to get the data out as a string
+ buf = StringIO()
+ config.write(buf)
+ return "# This config was generated automatically by the Bcfg2 " \
+ "Packages plugin\n\n" + buf.getvalue()
+
+ def build_extra_structures(self, independent):
+ """ build list of gpg keys to be added to the specification by
+ validate_structures() """
+ needkeys = set()
+ for source in self.sources:
+ for key in source.gpgkeys:
+ needkeys.add(key)
+
+ if len(needkeys):
+ if has_yum:
+ # this must be be has_yum, not use_yum, because
+ # regardless of whether the user wants to use the yum
+ # resolver we want to include gpg key data
+ keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey",
+ type=self.ptype, origin='Packages')
+ else:
+ self.logger.warning("GPGKeys were specified for yum sources in "
+ "sources.xml, but no yum libraries were "
+ "found")
+ self.logger.warning("GPG key version/release data cannot be "
+ "determined automatically")
+ self.logger.warning("Install yum libraries, or manage GPG keys "
+ "manually")
+ keypkg = None
+
+ for key in needkeys:
+ # figure out the path of the key on the client
+ keydir = self.config.get("global", "gpg_keypath",
+ default="/etc/pki/rpm-gpg")
+ remotekey = os.path.join(keydir, os.path.basename(key))
+ localkey = os.path.join(self.keypath, os.path.basename(key))
+ kdata = open(localkey).read()
+
+ # copy the key to the client
+ keypath = lxml.etree.Element("BoundPath", name=remotekey,
+ encoding='ascii',
+ owner='root', group='root',
+ type='file', perms='0644',
+ important='true')
+ keypath.text = kdata
+
+ # hook to add version/release info if possible
+ self._add_gpg_instances(keypkg, kdata, localkey, remotekey)
+ independent.append(keypath)
+ if keypkg is not None:
+ independent.append(keypkg)
+
+ # see if there are any pulp sources to handle
+ has_pulp_sources = False
+ for source in self.sources:
+ if source.pulp_id:
+ has_pulp_sources = True
+ break
+
+ if has_pulp_sources:
+ consumerapi = ConsumerAPI()
+ consumer = self._get_pulp_consumer(consumerapi=consumerapi)
+ if consumer is None:
+ consumer = consumerapi.create(self.metadata.hostname,
+ self.metadata.hostname)
+ lxml.etree.SubElement(independent, "BoundAction",
+ name="pulp-update", timing="pre",
+ when="always", status="check",
+ command="pulp-consumer consumer update")
+
+ for source in self.sources:
+ # each pulp source can only have one arch, so we don't
+ # have to check the arch in url_map
+ if (source.pulp_id and
+ source.pulp_id not in consumer['repoids']):
+ consumerapi.bind(self.metadata.hostname, source.pulp_id)
+
+ crt = lxml.etree.SubElement(independent, "BoundPath",
+ name="/etc/pki/consumer/cert.pem",
+ type="file", owner="root",
+ group="root", perms="0644")
+ crt.text = consumerapi.certificate(self.metadata.hostname)
+
+ def _get_pulp_consumer(self, consumerapi=None):
+ if consumerapi is None:
+ consumerapi = ConsumerAPI()
+ consumer = None
+ try:
+ consumer = consumerapi.consumer(self.metadata.hostname)
+ except server.ServerRequestError:
+ # consumer does not exist
+ pass
+ except socket.error:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Could not contact Pulp server: %s" %
+ err)
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Unknown error querying Pulp server: %s"
+ % err)
+ return consumer
+
+ def _add_gpg_instances(self, keyentry, keydata, localkey, remotekey):
+ """ add gpg keys to the specification to ensure they get
+ installed """
+ # this must be be has_yum, not use_yum, because regardless of
+ # whether the user wants to use the yum resolver we want to
+ # include gpg key data
+ if not has_yum:
+ return
+
+ try:
+ kinfo = yum.misc.getgpgkeyinfo(keydata)
+ version = yum.misc.keyIdToRPMVer(kinfo['keyid'])
+ release = yum.misc.keyIdToRPMVer(kinfo['timestamp'])
+
+ lxml.etree.SubElement(keyentry, 'Instance',
+ version=version,
+ release=release,
+ simplefile=remotekey)
+ except ValueError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Could not read GPG key %s: %s" %
+ (localkey, err))
+
+ def is_package(self, package):
+ if not self.use_yum:
+ return Collection.is_package(self, package)
+ elif isinstance(package, tuple):
+ if package[1] is None and package[2] == (None, None, None):
+ package = package[0]
+ else:
+ return None
+ else:
+ # this should really never get called; it's just provided
+ # for API completeness
+ return self.call_helper("is_package", package)
+
+ def is_virtual_package(self, package):
+ if not self.use_yum:
+ return Collection.is_virtual_package(self, package)
+ else:
+ # this should really never get called; it's just provided
+ # for API completeness
+ return self.call_helper("is_virtual_package", package)
+
+ def get_deps(self, package):
+ if not self.use_yum:
+ return Collection.get_deps(self, package)
+ else:
+ # this should really never get called; it's just provided
+ # for API completeness
+ return self.call_helper("get_deps", package)
+
+ def get_provides(self, required, all=False, silent=False):
+ if not self.use_yum:
+ return Collection.get_provides(self, package)
+ else:
+ # this should really never get called; it's just provided
+ # for API completeness
+ return self.call_helper("get_provides", package)
+
+ def get_group(self, group, ptype="default"):
+ if not self.use_yum:
+ self.logger.warning("Packages: Package groups are not supported by "
+ "Bcfg2's internal Yum dependency generator")
+ return []
+
+ if group.startswith("@"):
+ group = group[1:]
+
+ pkgs = self.call_helper("get_group", dict(group=group, type=ptype))
+ return pkgs
+
+ def complete(self, packagelist):
+ if not self.use_yum:
+ return Collection.complete(self, packagelist)
+
+ packages = set()
+ unknown = set(packagelist)
+
+ if unknown:
+ result = \
+ self.call_helper("complete",
+ dict(packages=list(unknown),
+ groups=list(self.get_relevant_groups())))
+ if result and "packages" in result and "unknown" in result:
+ # we stringify every package because it gets returned
+ # in unicode; set.update() doesn't work if some
+ # elements are unicode and other are strings. (I.e.,
+ # u'foo' and 'foo' get treated as unique elements.)
+ packages.update([str(p) for p in result['packages']])
+ unknown = set([str(p) for p in result['unknown']])
+
+ self.filter_unknown(unknown)
+
+ return packages, unknown
+
+ def call_helper(self, command, input=None):
+ """ Make a call to bcfg2-yum-helper. The yum libs have
+ horrific memory leaks, so apparently the right way to get
+ around that in long-running processes it to have a short-lived
+ helper. No, seriously -- check out the yum-updatesd code.
+ It's pure madness. """
+ # it'd be nice if we could change this to be more verbose if
+ # -v was given to bcfg2-server, but Collection objects don't
+ # get the 'setup' variable, so we don't know how verbose
+ # bcfg2-server is. It'd also be nice if we could tell yum to
+ # log to syslog. So would a unicorn.
+ cmd = [self.helper, "-c", self.cfgfile]
+ if self.debug_flag:
+ cmd.append("-v")
+ cmd.append(command)
+ self.debug_log("Packages: running %s" % " ".join(cmd))
+ try:
+ helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Failed to execute %s: %s" %
+ (" ".join(cmd), err))
+ return None
+
+ if input:
+ idata = json.dumps(input)
+ (stdout, stderr) = helper.communicate(idata)
+ else:
+ (stdout, stderr) = helper.communicate()
+ rv = helper.wait()
+ if rv:
+ self.logger.error("Packages: error running bcfg2-yum-helper "
+ "(returned %d): %s" % (rv, stderr))
+ elif self.debug_flag:
+ self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
+ stderr)
+ try:
+ return json.loads(stdout)
+ except ValueError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: error reading bcfg2-yum-helper "
+ "output: %s" % err)
+ return None
+
+ def setup_data(self, force_update=False):
+ if not self.use_yum:
+ return Collection.setup_data(self, force_update)
+
+ if force_update:
+ # we call this twice: one to clean up data from the old
+ # config, and once to clean up data from the new config
+ self.call_helper("clean")
+
+ os.unlink(self.cfgfile)
+ self.write_config()
+
+ if force_update:
+ self.call_helper("clean")
+
+
+class YumSource(Source):
+ basegroups = ['yum', 'redhat', 'centos', 'fedora']
+ ptype = 'yum'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pulp_id = None
+ if has_pulp and xsource.get("pulp_id"):
+ self.pulp_id = xsource.get("pulp_id")
+
+ _setup_pulp(self.config)
+ repoapi = RepositoryAPI()
+ try:
+ self.repo = repoapi.repository(self.pulp_id)
+ self.gpgkeys = [os.path.join(PULPCONFIG.cds['keyurl'], key)
+ for key in repoapi.listkeys(self.pulp_id)]
+ except server.ServerRequestError:
+ err = sys.exc_info()[1]
+ if err[0] == 401:
+ msg = "Packages: Error authenticating to Pulp: %s" % err[1]
+ elif err[0] == 404:
+ msg = "Packages: Pulp repo id %s not found: %s" % \
+ (self.pulp_id, err[1])
+ else:
+ msg = "Packages: Error %d fetching pulp repo %s: %s" % \
+ (err[0], self.pulp_id, err[1])
+ raise SourceInitError(msg)
+ except socket.error:
+ err = sys.exc_info()[1]
+ raise SourceInitError("Could not contact Pulp server: %s" % err)
+ except:
+ err = sys.exc_info()[1]
+ raise SourceInitError("Unknown error querying Pulp server: %s" %
+ err)
+ self.rawurl = "%s/%s" % (PULPCONFIG.cds['baseurl'],
+ self.repo['relative_path'])
+ self.arches = [self.repo['arch']]
+
+ if not self.rawurl:
+ self.baseurl = self.url + "%(version)s/%(component)s/%(arch)s/"
+ else:
+ self.baseurl = self.rawurl
+ self.packages = dict()
+ self.deps = dict([('global', dict())])
+ self.provides = dict([('global', dict())])
+ self.filemap = dict([(x, dict())
+ for x in ['global'] + self.arches])
+ self.needed_paths = set()
+ self.file_to_arch = dict()
+
+ self.use_yum = has_yum and config.getboolean("yum", "use_yum_libraries",
+ default=False)
+
+ def save_state(self):
+ if not self.use_yum:
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.packages, self.deps, self.provides,
+ self.filemap, self.url_map), cache, 2)
+ cache.close()
+
+
+ def load_state(self):
+ if not self.use_yum:
+ data = file(self.cachefile)
+ (self.packages, self.deps, self.provides,
+ self.filemap, self.url_map) = cPickle.load(data)
+
+ def get_urls(self):
+ surls = list()
+ self.url_map = []
+ for arch in self.arches:
+ if self.url:
+ usettings = [{'version':self.version, 'component':comp,
+ 'arch':arch}
+ for comp in self.components]
+ else: # rawurl given
+ usettings = [{'version':self.version, 'component':None,
+ 'arch':arch}]
+
+ for setting in usettings:
+ setting['url'] = self.baseurl % setting
+ self.url_map.append(copy.deepcopy(setting))
+ surls.append((arch, [setting['url'] for setting in usettings]))
+ urls = []
+ for (sarch, surl_list) in surls:
+ for surl in surl_list:
+ urls.extend(self._get_urls_from_repodata(surl, sarch))
+ return urls
+ urls = property(get_urls)
+
+ def _get_urls_from_repodata(self, url, arch):
+ if self.use_yum:
+ return [url]
+
+ rmdurl = '%srepodata/repomd.xml' % url
+ try:
+ repomd = fetch_url(rmdurl)
+ xdata = lxml.etree.XML(repomd)
+ except ValueError:
+ self.logger.error("Packages: Bad url string %s" % rmdurl)
+ return []
+ except HTTPError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Failed to fetch url %s. code=%s" %
+ (rmdurl, err.code))
+ return []
+ except lxml.etree.XMLSyntaxError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Failed to process metadata at %s: %s" %
+ (rmdurl, err))
+ return []
+
+ urls = []
+ for elt in xdata.findall(RPO + 'data'):
+ if elt.get('type') in ['filelists', 'primary']:
+ floc = elt.find(RPO + 'location')
+ fullurl = url + floc.get('href')
+ urls.append(fullurl)
+ self.file_to_arch[self.escape_url(fullurl)] = arch
+ return urls
+
+ def read_files(self):
+ # we have to read primary.xml first, and filelists.xml afterwards;
+ primaries = list()
+ filelists = list()
+ for fname in self.files:
+ if fname.endswith('primary.xml.gz'):
+ primaries.append(fname)
+ elif fname.endswith('filelists.xml.gz'):
+ filelists.append(fname)
+
+ for fname in primaries:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_primary(fdata, farch)
+ for fname in filelists:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_filelist(fdata, farch)
+
+ # merge data
+ sdata = list(self.packages.values())
+ try:
+ self.packages['global'] = copy.deepcopy(sdata.pop())
+ except IndexError:
+ logger.error("Packages: No packages in repo")
+ while sdata:
+ self.packages['global'] = \
+ self.packages['global'].intersection(sdata.pop())
+
+ for key in self.packages:
+ if key == 'global':
+ continue
+ self.packages[key] = \
+ self.packages[key].difference(self.packages['global'])
+ self.save_state()
+
+ def parse_filelist(self, data, arch):
+ if arch not in self.filemap:
+ self.filemap[arch] = dict()
+ for pkg in data.findall(FL + 'package'):
+ for fentry in pkg.findall(FL + 'file'):
+ if fentry.text in self.needed_paths:
+ if fentry.text in self.filemap[arch]:
+ self.filemap[arch][fentry.text].add(pkg.get('name'))
+ else:
+ self.filemap[arch][fentry.text] = \
+ set([pkg.get('name')])
+
+ def parse_primary(self, data, arch):
+ if arch not in self.packages:
+ self.packages[arch] = set()
+ if arch not in self.deps:
+ self.deps[arch] = dict()
+ if arch not in self.provides:
+ self.provides[arch] = dict()
+ for pkg in data.getchildren():
+ if not pkg.tag.endswith('package'):
+ continue
+ pkgname = pkg.find(XP + 'name').text
+ self.packages[arch].add(pkgname)
+
+ pdata = pkg.find(XP + 'format')
+ self.deps[arch][pkgname] = set()
+ pre = pdata.find(RP + 'requires')
+ if pre is not None:
+ for entry in pre.getchildren():
+ self.deps[arch][pkgname].add(entry.get('name'))
+ if entry.get('name').startswith('/'):
+ self.needed_paths.add(entry.get('name'))
+ pro = pdata.find(RP + 'provides')
+ if pro != None:
+ for entry in pro.getchildren():
+ prov = entry.get('name')
+ if prov not in self.provides[arch]:
+ self.provides[arch][prov] = list()
+ self.provides[arch][prov].append(pkgname)
+
+ def is_package(self, metadata, item):
+ arch = [a for a in self.arches if a in metadata.groups]
+ if not arch:
+ return False
+ return ((item in self.packages['global'] or
+ item in self.packages[arch[0]]) and
+ item not in self.blacklist and
+ (len(self.whitelist) == 0 or item in self.whitelist))
+
+ def get_vpkgs(self, metadata):
+ if self.use_yum:
+ return dict()
+
+ rv = Source.get_vpkgs(self, metadata)
+ for arch, fmdata in list(self.filemap.items()):
+ if arch not in metadata.groups and arch != 'global':
+ continue
+ for filename, pkgs in list(fmdata.items()):
+ rv[filename] = pkgs
+ return rv
+
+ def filter_unknown(self, unknown):
+ if self.use_yum:
+ filtered = set()
+ for unk in unknown:
+ try:
+ if unk.startswith('rpmlib'):
+ filtered.update(unk)
+ except AttributeError:
+ try:
+ if unk[0].startswith('rpmlib'):
+ filtered.update(unk)
+ except (IndexError, AttributeError):
+ pass
+ else:
+ filtered = set([u for u in unknown if u.startswith('rpmlib')])
+ unknown.difference_update(filtered)
+
+ def setup_data(self, force_update=False):
+ if not self.use_yum:
+ Source.setup_data(self, force_update=force_update)
+
+ def get_repo_name(self, url_map):
+ if self.pulp_id:
+ return self.pulp_id
+ else:
+ return Source.get_repo_name(self, url_map)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
new file mode 100644
index 000000000..da5832e90
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -0,0 +1,263 @@
+import os
+import sys
+import time
+import copy
+import glob
+import shutil
+import lxml.etree
+import Bcfg2.Logger
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen
+from Bcfg2.Server.Plugins.Packages import Collection
+from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+from Bcfg2.Server.Plugins.Packages.PackagesConfig import PackagesConfig
+
+class Packages(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.StructureValidator,
+ Bcfg2.Server.Plugin.Generator,
+ Bcfg2.Server.Plugin.Connector):
+ name = 'Packages'
+ conflicts = ['Pkgmgr']
+ experimental = True
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.StructureValidator.__init__(self)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+
+ self.sentinels = set()
+ self.cachepath = os.path.join(self.data, 'cache')
+ self.keypath = os.path.join(self.data, 'keys')
+ if not os.path.exists(self.keypath):
+ # create key directory if needed
+ os.makedirs(self.keypath)
+
+ # set up config files
+ self.config = PackagesConfig(self)
+ self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
+ self.cachepath, core.fam, self,
+ self.config)
+
+ def toggle_debug(self):
+ Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
+ self.sources.toggle_debug()
+
+ @property
+ def disableResolver(self):
+ try:
+ return not self.config.getboolean("global", "resolver")
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+ except ValueError:
+ # for historical reasons we also accept "enabled" and
+ # "disabled", which are not handled according to the
+ # Python docs but appear to be handled properly by
+ # ConfigParser in at least some versions
+ return self.config.get("global", "resolver",
+ default="enabled").lower() == "disabled"
+
+ @property
+ def disableMetaData(self):
+ try:
+ return not self.config.getboolean("global", "resolver")
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+ except ValueError:
+ # for historical reasons we also accept "enabled" and
+ # "disabled"
+ return self.config.get("global", "metadata",
+ default="enabled").lower() == "disabled"
+
+ def create_config(self, entry, metadata):
+ """ create yum/apt config for the specified host """
+ attrib = {'encoding': 'ascii',
+ 'owner': 'root',
+ 'group': 'root',
+ 'type': 'file',
+ 'perms': '0644'}
+
+ collection = self._get_collection(metadata)
+ entry.text = collection.get_config()
+ for (key, value) in list(attrib.items()):
+ entry.attrib.__setitem__(key, value)
+
+ def HandleEntry(self, entry, metadata):
+ if entry.tag == 'Package':
+ collection = self._get_collection(metadata)
+ entry.set('version', 'auto')
+ entry.set('type', collection.ptype)
+ elif entry.tag == 'Path':
+ if (entry.get("name") == self.config.get("global", "yum_config",
+ default="") or
+ entry.get("name") == self.config.get("global", "apt_config",
+ default="")):
+ self.create_config(entry, metadata)
+
+ def HandlesEntry(self, entry, metadata):
+ if entry.tag == 'Package':
+ if self.config.getboolean("global", "magic_groups",
+ default=True) == True:
+ collection = self._get_collection(metadata)
+ if collection.magic_groups_match():
+ return True
+ else:
+ return True
+ elif entry.tag == 'Path':
+ # managed entries for yum/apt configs
+ if (entry.get("name") == self.config.get("global", "yum_config",
+ default="") or
+ entry.get("name") == self.config.get("global", "apt_config",
+ default="")):
+ return True
+ return False
+
+ def validate_structures(self, metadata, structures):
+ '''Ensure client configurations include all needed prerequisites
+
+ Arguments:
+ metadata - client metadata instance
+ structures - a list of structure-stage entry combinations
+ '''
+ collection = self._get_collection(metadata)
+ indep = lxml.etree.Element('Independent')
+ self._build_packages(metadata, indep, structures,
+ collection=collection)
+ collection.build_extra_structures(indep)
+ structures.append(indep)
+
+ def _build_packages(self, metadata, independent, structures,
+ collection=None):
+ """ build list of packages that need to be included in the
+ specification by validate_structures() """
+ if self.disableResolver:
+ # Config requests no resolver
+ return
+
+ if collection is None:
+ collection = self._get_collection(metadata)
+ # initial is the set of packages that are explicitly specified
+ # in the configuration
+ initial = set()
+ # base is the set of initial packages with groups expanded
+ base = set()
+ to_remove = []
+ for struct in structures:
+ for pkg in struct.xpath('//Package | //BoundPackage'):
+ if pkg.get("name"):
+ initial.add(pkg.get("name"))
+ elif pkg.get("group"):
+ try:
+ if pkg.get("type"):
+ gpkgs = collection.get_group(pkg.get("group"),
+ ptype=pkg.get("type"))
+ else:
+ gpkgs = collection.get_group(pkg.get("group"))
+ base.update(gpkgs)
+ except TypeError:
+ raise
+ self.logger.error("Could not resolve group %s" %
+ pkg.get("group"))
+ to_remove.append(pkg)
+ else:
+ self.logger.error("Packages: Malformed Package: %s" %
+ lxml.etree.tostring(pkg))
+ base.update(initial)
+ for el in to_remove:
+ el.getparent().remove(el)
+
+ packages, unknown = collection.complete(base)
+ if unknown:
+ self.logger.info("Packages: Got %d unknown entries" % len(unknown))
+ self.logger.info("Packages: %s" % list(unknown))
+ newpkgs = list(packages.difference(initial))
+ self.debug_log("Packages: %d initial, %d complete, %d new" %
+ (len(initial), len(packages), len(newpkgs)))
+ newpkgs.sort()
+ for pkg in newpkgs:
+ lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
+ version='auto', type=collection.ptype,
+ origin='Packages')
+
+ def Refresh(self):
+ '''Packages.Refresh() => True|False\nReload configuration
+ specification and download sources\n'''
+ self._load_config(force_update=True)
+ return True
+
+ def Reload(self):
+ '''Packages.Refresh() => True|False\nReload configuration
+ specification and sources\n'''
+ self._load_config()
+ return True
+
+ def _load_config(self, force_update=False):
+ '''
+ Load the configuration data and setup sources
+
+ Keyword args:
+ force_update Force downloading repo data
+ '''
+ self._load_sources(force_update)
+ self._load_gpg_keys(force_update)
+
+ def _load_sources(self, force_update):
+ """ Load sources from the config """
+ self.sentinels = set()
+ cachefiles = set()
+
+ for collection in list(Collection.collections.values()):
+ cachefiles.update(collection.cachefiles)
+ if not self.disableMetaData:
+ collection.setup_data(force_update)
+ self.sentinels.update(collection.basegroups)
+
+ Collection.clear_cache()
+
+ for source in self.sources:
+ cachefiles.add(source.cachefile)
+ if not self.disableMetaData:
+ source.setup_data(force_update)
+
+ for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")):
+ if cfile not in cachefiles:
+ try:
+ if os.path.isdir(cfile):
+ shutil.rmtree(cfile)
+ else:
+ os.unlink(cfile)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Could not remove cache file "
+ "%s: %s" % (cfile, err))
+
+ def _load_gpg_keys(self, force_update):
+ """ Load gpg keys from the config """
+ keyfiles = []
+ keys = []
+ for source in self.sources:
+ for key in source.gpgkeys:
+ localfile = os.path.join(self.keypath,
+ os.path.basename(key.rstrip("/")))
+ if localfile not in keyfiles:
+ keyfiles.append(localfile)
+ if ((force_update and key not in keys) or
+ not os.path.exists(localfile)):
+ self.logger.info("Packages: Downloading and parsing %s" % key)
+ response = urlopen(key)
+ open(localfile, 'w').write(response.read())
+ keys.append(key)
+
+ for kfile in glob.glob(os.path.join(self.keypath, "*")):
+ if kfile not in keyfiles:
+ os.unlink(kfile)
+
+ def _get_collection(self, metadata):
+ return Collection.factory(metadata, self.sources, self.data,
+ debug=self.debug_flag)
+
+ def get_additional_data(self, metadata):
+ collection = self._get_collection(metadata)
+ return dict(sources=collection.get_additional_data())
diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
new file mode 100644
index 000000000..e9254cdcc
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -0,0 +1,169 @@
+'''This module implements a package management scheme for all images'''
+
+import logging
+import re
+import Bcfg2.Server.Plugin
+import lxml
+try:
+ set
+except NameError:
+ from sets import Set as set
+
+logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
+
+
+class FuzzyDict(dict):
+ fuzzy = re.compile('(?P<name>.*):(?P<alist>\S+(,\S+)*)')
+
+ def __getitem__(self, key):
+ if isinstance(key, str):
+ mdata = self.fuzzy.match(key)
+ if mdata:
+ return dict.__getitem__(self, mdata.groupdict()['name'])
+ else:
+ print("got non-string key %s" % str(key))
+ return dict.__getitem__(self, key)
+
+ def has_key(self, key):
+ if isinstance(key, str):
+ mdata = self.fuzzy.match(key)
+ if self.fuzzy.match(key):
+ return dict.has_key(self, mdata.groupdict()['name'])
+ return dict.has_key(self, key)
+
+ def get(self, key, default=None):
+ try:
+ return self.__getitem__(key)
+ except:
+ if default:
+ return default
+ raise
+
+
+class PNode(Bcfg2.Server.Plugin.INode):
+ """PNode has a list of packages available at a
+ particular group intersection.
+ """
+ splitters = {'rpm': re.compile('^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
+ '(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'),
+ 'encap': re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')}
+ ignore = ['Package']
+
+ def Match(self, metadata, data, entry=lxml.etree.Element("None")):
+ """Return a dictionary of package mappings."""
+ if self.predicate(metadata, entry):
+ for key in self.contents:
+ try:
+ data[key].update(self.contents[key])
+ except:
+ data[key] = FuzzyDict()
+ data[key].update(self.contents[key])
+ for child in self.children:
+ child.Match(metadata, data)
+
+ def __init__(self, data, pdict, parent=None):
+ # copy local attributes to all child nodes if no local attribute exists
+ if 'Package' not in pdict:
+ pdict['Package'] = set()
+ for child in data.getchildren():
+ attrs = set(data.attrib.keys()).difference(child.attrib.keys() + ['name'])
+ for attr in attrs:
+ try:
+ child.set(attr, data.get(attr))
+ except:
+ # don't fail on things like comments and other immutable elements
+ pass
+ Bcfg2.Server.Plugin.INode.__init__(self, data, pdict, parent)
+ if 'Package' not in self.contents:
+ self.contents['Package'] = FuzzyDict()
+ for pkg in data.findall('./Package'):
+ if 'name' in pkg.attrib and pkg.get('name') not in pdict['Package']:
+ pdict['Package'].add(pkg.get('name'))
+ if pkg.get('name') != None:
+ self.contents['Package'][pkg.get('name')] = {}
+ if pkg.getchildren():
+ self.contents['Package'][pkg.get('name')]['__children__'] \
+ = pkg.getchildren()
+ if 'simplefile' in pkg.attrib:
+ pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
+ self.contents['Package'][pkg.get('name')].update(pkg.attrib)
+ else:
+ if 'file' in pkg.attrib:
+ if 'multiarch' in pkg.attrib:
+ archs = pkg.get('multiarch').split()
+ srcs = pkg.get('srcs', pkg.get('multiarch')).split()
+ url = ' '.join(["%s/%s" % (pkg.get('uri'),
+ pkg.get('file') % {'src':srcs[idx],
+ 'arch':archs[idx]})
+ for idx in range(len(archs))])
+ pkg.set('url', url)
+ else:
+ pkg.set('url', '%s/%s' % (pkg.get('uri'),
+ pkg.get('file')))
+ if pkg.get('type') in self.splitters and pkg.get('file') != None:
+ mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
+ if not mdata:
+ logger.error("Failed to match pkg %s" % pkg.get('file'))
+ continue
+ pkgname = mdata.group('name')
+ self.contents['Package'][pkgname] = mdata.groupdict()
+ self.contents['Package'][pkgname].update(pkg.attrib)
+ if pkg.attrib.get('file'):
+ self.contents['Package'][pkgname]['url'] = pkg.get('url')
+ self.contents['Package'][pkgname]['type'] = pkg.get('type')
+ if pkg.get('verify'):
+ self.contents['Package'][pkgname]['verify'] = pkg.get('verify')
+ if pkg.get('multiarch'):
+ self.contents['Package'][pkgname]['multiarch'] = pkg.get('multiarch')
+ if pkgname not in pdict['Package']:
+ pdict['Package'].add(pkgname)
+ if pkg.getchildren():
+ self.contents['Package'][pkgname]['__children__'] = pkg.getchildren()
+ else:
+ self.contents['Package'][pkg.get('name')].update(pkg.attrib)
+
+
+class PkgSrc(Bcfg2.Server.Plugin.XMLSrc):
+ """PkgSrc files contain a PNode hierarchy that
+ returns matching package entries.
+ """
+ __node__ = PNode
+ __cacheobj__ = FuzzyDict
+
+
+class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
+ """This is a generator that handles package assignments."""
+ name = 'Pkgmgr'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = PkgSrc
+ __element__ = 'Package'
+
+ def HandleEvent(self, event):
+ '''Handle events and update dispatch table'''
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event)
+ for src in list(self.entries.values()):
+ for itype, children in list(src.items.items()):
+ for child in children:
+ try:
+ self.Entries[itype][child] = self.BindEntry
+ except KeyError:
+ self.Entries[itype] = FuzzyDict([(child,
+ self.BindEntry)])
+
+ def BindEntry(self, entry, metadata):
+ """Bind data for entry, and remove instances that are not requested."""
+ pname = entry.get('name')
+ Bcfg2.Server.Plugin.PrioDir.BindEntry(self, entry, metadata)
+ if entry.findall('Instance'):
+ mdata = FuzzyDict.fuzzy.match(pname)
+ if mdata:
+ arches = mdata.group('alist').split(',')
+ [entry.remove(inst) for inst in \
+ entry.findall('Instance') \
+ if inst.get('arch') not in arches]
+
+ def HandlesEntry(self, entry, metadata):
+ return entry.tag == 'Package' and entry.get('name').split(':')[0] in list(self.Entries['Package'].keys())
+
+ def HandleEntry(self, entry, metadata):
+ self.BindEntry(entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
new file mode 100644
index 000000000..af908eee8
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -0,0 +1,285 @@
+import time
+import lxml.etree
+import operator
+import re
+
+try:
+ import json
+ has_json = True
+except ImportError:
+ try:
+ import simplejson as json
+ has_json = True
+ except ImportError:
+ has_json = False
+
+try:
+ import syck
+ has_syck = True
+except ImportError:
+ has_syck = False
+ try:
+ import yaml
+ has_yaml = True
+ except ImportError:
+ has_yaml = False
+
+import Bcfg2.Server.Plugin
+
+specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)")
+probe_matcher = re.compile("(.*/)?(?P<basename>\S+)")
+
+class ClientProbeDataSet(dict):
+ """ dict of probe => [probe data] that records a for each host """
+ def __init__(self, *args, **kwargs):
+ if "timestamp" in kwargs and kwargs['timestamp'] is not None:
+ self.timestamp = kwargs.pop("timestamp")
+ else:
+ self.timestamp = time.time()
+ dict.__init__(self, *args, **kwargs)
+
+
+class ProbeData(object):
+ """ a ProbeData object emulates a str object, but also has .xdata
+ and .json properties to provide convenient ways to use ProbeData
+ objects as XML or JSON data """
+ def __init__(self, data):
+ self.data = data
+ self._xdata = None
+ self._json = None
+ self._yaml = None
+
+ def __str__(self):
+ return str(self.data)
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __getattr__(self, name):
+ """ make ProbeData act like a str object """
+ return getattr(self.data, name)
+
+ def __complex__(self):
+ return complex(self.data)
+
+ def __int__(self):
+ return int(self.data)
+
+ def __long__(self):
+ return long(self.data)
+
+ def __float__(self):
+ return float(self.data)
+
+ def __eq__(self, other):
+ return str(self) == str(other)
+
+ def __ne__(self, other):
+ return str(self) != str(other)
+
+ def __gt__(self, other):
+ return str(self) > str(other)
+
+ def __lt__(self, other):
+ return str(self) < str(other)
+
+ def __ge__(self, other):
+ return self > other or self == other
+
+ def __le__(self, other):
+ return self < other or self == other
+
+ @property
+ def xdata(self):
+ if self._xdata is None:
+ try:
+ self._xdata = lxml.etree.XML(self.data)
+ except lxml.etree.XMLSyntaxError:
+ pass
+ return self._xdata
+
+ @property
+ def json(self):
+ if self._json is None and has_json:
+ try:
+ self._json = json.loads(self.data)
+ except ValueError:
+ pass
+ return self._json
+
+ @property
+ def yaml(self):
+ if self._yaml is None:
+ if has_yaml:
+ try:
+ self._yaml = yaml.load(self.data)
+ except yaml.YAMLError:
+ pass
+ elif has_syck:
+ try:
+ self._yaml = syck.load(self.data)
+ except syck.error:
+ pass
+ return self._yaml
+
+
+class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
+ ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$")
+
+ def __init__(self, path, fam, encoding, plugin_name):
+ fpattern = '[0-9A-Za-z_\-]+'
+ self.plugin_name = plugin_name
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ Bcfg2.Server.Plugin.SpecificData,
+ encoding)
+ fam.AddMonitor(path, self)
+ self.bangline = re.compile('^#!(?P<interpreter>.*)$')
+
+ def HandleEvent(self, event):
+ if event.filename != self.path:
+ if (event.code2str == 'changed' and
+ event.filename.endswith("probed.xml") and
+ event.filename not in self.entries):
+ # for some reason, probed.xml is particularly prone to
+ # getting changed events before created events,
+ # because gamin is the worst ever. anyhow, we
+ # specifically handle it here to avoid a warning on
+ # every single server startup.
+ self.entry_init(event)
+ return
+ return self.handle_event(event)
+
+ def get_probe_data(self, metadata):
+ ret = []
+ build = dict()
+ candidates = self.get_matching(metadata)
+ candidates.sort(key=operator.attrgetter('specific'))
+ for entry in candidates:
+ rem = specific_probe_matcher.match(entry.name)
+ if not rem:
+ rem = probe_matcher.match(entry.name)
+ pname = rem.group('basename')
+ if pname not in build:
+ build[pname] = entry
+
+ for (name, entry) in list(build.items()):
+ probe = lxml.etree.Element('probe')
+ probe.set('name', name.split('/')[-1])
+ probe.set('source', self.plugin_name)
+ probe.text = entry.data
+ match = self.bangline.match(entry.data.split('\n')[0])
+ if match:
+ probe.set('interpreter', match.group('interpreter'))
+ else:
+ probe.set('interpreter', '/bin/sh')
+ ret.append(probe)
+ return ret
+
+
+class Probes(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Connector):
+ """A plugin to gather information from a client machine."""
+ name = 'Probes'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+
+ try:
+ self.probes = ProbeSet(self.data, core.fam, core.encoding,
+ self.name)
+ except:
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ self.probedata = dict()
+ self.cgroups = dict()
+ self.load_data()
+
+ def write_data(self):
+ """Write probe data out for use with bcfg2-info."""
+ top = lxml.etree.Element("Probed")
+ for client, probed in sorted(self.probedata.items()):
+ cx = lxml.etree.SubElement(top, 'Client', name=client,
+ timestamp=str(int(probed.timestamp)))
+ for probe in sorted(probed):
+ lxml.etree.SubElement(cx, 'Probe', name=probe,
+ value=str(self.probedata[client][probe]))
+ for group in sorted(self.cgroups[client]):
+ lxml.etree.SubElement(cx, "Group", name=group)
+ data = lxml.etree.tostring(top, encoding='UTF-8',
+ xml_declaration=True,
+ pretty_print='true')
+ try:
+ datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w')
+ except IOError:
+ self.logger.error("Failed to write probed.xml")
+ datafile.write(data.decode('utf-8'))
+
+ def load_data(self):
+ try:
+ data = lxml.etree.parse(self.data + '/probed.xml').getroot()
+ except:
+ self.logger.error("Failed to read file probed.xml")
+ return
+ self.probedata = {}
+ self.cgroups = {}
+ for client in data.getchildren():
+ self.probedata[client.get('name')] = \
+ ClientProbeDataSet(timestamp=client.get("timestamp"))
+ self.cgroups[client.get('name')] = []
+ for pdata in client:
+ if (pdata.tag == 'Probe'):
+ self.probedata[client.get('name')][pdata.get('name')] = \
+ ProbeData(pdata.get('value'))
+ elif (pdata.tag == 'Group'):
+ self.cgroups[client.get('name')].append(pdata.get('name'))
+
+ def GetProbes(self, meta, force=False):
+ """Return a set of probes for execution on client."""
+ return self.probes.get_probe_data(meta)
+
+ def ReceiveData(self, client, datalist):
+ self.cgroups[client.hostname] = []
+ self.probedata[client.hostname] = ClientProbeDataSet()
+ for data in datalist:
+ self.ReceiveDataItem(client, data)
+ self.write_data()
+
+ def ReceiveDataItem(self, client, data):
+ """Receive probe results pertaining to client."""
+ if client.hostname not in self.cgroups:
+ self.cgroups[client.hostname] = []
+ if data.text == None:
+ self.logger.error("Got null response to probe %s from %s" % \
+ (data.get('name'), client.hostname))
+ try:
+ self.probedata[client.hostname].update({data.get('name'):
+ ProbeData('')})
+ except KeyError:
+ self.probedata[client.hostname] = \
+ ClientProbeDataSet([(data.get('name'), ProbeData(''))])
+ return
+ dlines = data.text.split('\n')
+ self.logger.debug("%s:probe:%s:%s" % (client.hostname,
+ data.get('name'), [line.strip() for line in dlines]))
+ for line in dlines[:]:
+ if line.split(':')[0] == 'group':
+ newgroup = line.split(':')[1].strip()
+ if newgroup not in self.cgroups[client.hostname]:
+ self.cgroups[client.hostname].append(newgroup)
+ dlines.remove(line)
+ dobj = ProbeData("\n".join(dlines))
+ try:
+ self.probedata[client.hostname].update({data.get('name'): dobj})
+ except KeyError:
+ self.probedata[client.hostname] = \
+ ClientProbeDataSet([(data.get('name'), dobj)])
+
+ def get_additional_groups(self, meta):
+ return self.cgroups.get(meta.hostname, list())
+
+ def get_additional_data(self, meta):
+ return self.probedata.get(meta.hostname, ClientProbeDataSet())
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
new file mode 100644
index 000000000..680881858
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -0,0 +1,76 @@
+import os
+import re
+import sys
+import copy
+import logging
+import lxml.etree
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger('Bcfg2.Plugins.Properties')
+
+class PropertyFile(Bcfg2.Server.Plugin.StructFile):
+ """Class for properties files."""
+ def write(self):
+ """ Write the data in this data structure back to the property
+ file """
+ if self.validate_data():
+ try:
+ open(self.name,
+ "wb").write(lxml.etree.tostring(self.xdata,
+ pretty_print=True))
+ return True
+ except IOError:
+ err = sys.exc_info()[1]
+ logger.error("Failed to write %s: %s" % (self.name, err))
+ return False
+ else:
+ return False
+
+ def validate_data(self):
+ """ ensure that the data in this object validates against the
+ XML schema for this property file (if a schema exists) """
+ schemafile = self.name.replace(".xml", ".xsd")
+ if os.path.exists(schemafile):
+ try:
+ schema = lxml.etree.XMLSchema(file=schemafile)
+ except:
+ logger.error("Failed to process schema for %s" % self.name)
+ return False
+ else:
+ # no schema exists
+ return True
+
+ if not schema.validate(self.xdata):
+ logger.error("Data for %s fails to validate; run bcfg2-lint for "
+ "more details" % self.name)
+ return False
+ else:
+ return True
+
+
+class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
+ __child__ = PropertyFile
+ patterns = re.compile(r'.*\.xml$')
+
+
+class Properties(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector):
+ """
+ The properties plugin maps property
+ files into client metadata instances.
+ """
+ name = 'Properties'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ try:
+ self.store = PropDirectoryBacked(self.data, core.fam)
+ except OSError:
+ e = sys.exc_info()[1]
+ self.logger.error("Error while creating Properties store: %s %s" %
+ (e.strerror, e.filename))
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def get_additional_data(self, _):
+ return copy.copy(self.store.entries)
diff --git a/src/lib/Bcfg2/Server/Plugins/Rules.py b/src/lib/Bcfg2/Server/Plugins/Rules.py
new file mode 100644
index 000000000..b80ef351a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Rules.py
@@ -0,0 +1,55 @@
+"""This generator provides rule-based entry mappings."""
+
+import re
+import Bcfg2.Server.Plugin
+
+class RulesConfig(Bcfg2.Server.Plugin.SimpleConfig):
+ _required = False
+
+class Rules(Bcfg2.Server.Plugin.PrioDir):
+ """This is a generator that handles service assignments."""
+ name = 'Rules'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore)
+ self.config = RulesConfig(self)
+ self._regex_cache = dict()
+
+ def HandlesEntry(self, entry, metadata):
+ if entry.tag in self.Entries:
+ return self._matches(entry, metadata,
+ self.Entries[entry.tag].keys())
+ return False
+
+ def HandleEntry(self, entry, metadata):
+ return self.BindEntry(entry, metadata)
+
+ def BindEntry(self, entry, metadata):
+ attrs = self.get_attrs(entry, metadata)
+ for key, val in list(attrs.items()):
+ if key not in entry.attrib:
+ entry.attrib[key] = val
+
+ def _matches(self, entry, metadata, rules):
+ if Bcfg2.Server.Plugin.PrioDir._matches(self, entry, metadata, rules):
+ return True
+ elif (entry.tag == "Path" and
+ ((entry.get('name').endswith("/") and
+ entry.get('name').rstrip("/") in rules) or
+ (not entry.get('name').endswith("/") and
+ entry.get('name') + '/' in rules))):
+ # special case for Path tags:
+ # http://trac.mcs.anl.gov/projects/bcfg2/ticket/967
+ return True
+ elif self._regex_enabled:
+ # attempt regular expression matching
+ for rule in rules:
+ if rule not in self._regex_cache:
+ self._regex_cache[rule] = re.compile("%s$" % rule)
+ if self._regex_cache[rule].match(entry.get('name')):
+ return True
+ return False
+
+ def _regex_enabled(self):
+ return self.config.getboolean("rules", "regex", default=False)
diff --git a/src/lib/Bcfg2/Server/Plugins/SGenshi.py b/src/lib/Bcfg2/Server/Plugins/SGenshi.py
new file mode 100644
index 000000000..0ba08125e
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/SGenshi.py
@@ -0,0 +1,97 @@
+'''This module implements a templating generator based on Genshi'''
+
+import genshi.input
+import genshi.template
+import lxml.etree
+import logging
+import copy
+import sys
+import os.path
+
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Plugins.TGenshi
+
+logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
+
+
+class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
+ Bcfg2.Server.Plugin.StructFile):
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
+ specific, encoding)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name)
+
+ def get_xml_value(self, metadata):
+ if not hasattr(self, 'template'):
+ logger.error("No parsed template information for %s" % (self.name))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ stream = self.template.generate(metadata=metadata).filter( \
+ Bcfg2.Server.Plugins.TGenshi.removecomment)
+ data = lxml.etree.XML(stream.render('xml', strip_whitespace=False))
+ bundlename = os.path.splitext(os.path.basename(self.name))[0]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ for item in self.Match(metadata, data):
+ bundle.append(copy.deepcopy(item))
+ return bundle
+ except LookupError:
+ lerror = sys.exc_info()[1]
+ logger.error('Genshi lookup error: %s' % lerror)
+ except genshi.template.TemplateError:
+ terror = sys.exc_info()[1]
+ logger.error('Genshi template error: %s' % terror)
+ raise
+ except genshi.input.ParseError:
+ perror = sys.exc_info()[1]
+ logger.error('Genshi parse error: %s' % perror)
+ raise
+
+ def Match(self, metadata, xdata):
+ """Return matching fragments of parsed template."""
+ rv = []
+ for child in xdata.getchildren():
+ rv.extend(self._match(child, metadata))
+ logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
+ return rv
+
+class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
+
+ def __init__(self, path, fam, encoding):
+ fpattern = '\S+\.xml'
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ SGenshiTemplateFile, encoding)
+ fam.AddMonitor(path, self)
+
+ def HandleEvent(self, event):
+ '''passthrough event handler for old calling convention'''
+ if event.filename != self.path:
+ return self.handle_event(event)
+
+ def BuildStructures(self, metadata):
+ """Build SGenshi structures."""
+ ret = []
+ for entry in self.get_matching(metadata):
+ try:
+ ret.append(entry.get_xml_value(metadata))
+ except:
+ logger.error("SGenshi: Failed to template file %s" % entry.name)
+ return ret
+
+
+class SGenshi(SGenshiEntrySet,
+ Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure):
+ """The SGenshi plugin provides templated structures."""
+ name = 'SGenshi'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ deprecated = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ try:
+ SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding)
+ except:
+ logger.error("Failed to load %s repository; disabling %s" \
+ % (self.name, self.name))
+ raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
new file mode 100644
index 000000000..ac281ad1a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -0,0 +1,413 @@
+'''This module manages ssh key files for bcfg2'''
+
+import binascii
+import re
+import os
+import socket
+import shutil
+import sys
+import tempfile
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import u_str
+
+if sys.hexversion >= 0x03000000:
+ from functools import reduce
+
+import logging
+logger = logging.getLogger(__name__)
+
+class KeyData(Bcfg2.Server.Plugin.SpecificData):
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
+ encoding)
+ self.encoding = encoding
+
+ def bind_entry(self, entry, metadata):
+ entry.set('type', 'file')
+ if entry.get('encoding') == 'base64':
+ entry.text = binascii.b2a_base64(self.data)
+ else:
+ try:
+ entry.text = u_str(self.data, self.encoding)
+ except UnicodeDecodeError:
+ e = sys.exc_info()[1]
+ logger.error("Failed to decode %s: %s" % (entry.get('name'), e))
+ logger.error("Please verify you are using the proper encoding.")
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ except ValueError:
+ e = sys.exc_info()[1]
+ logger.error("Error in specification for %s" %
+ entry.get('name'))
+ logger.error(str(e))
+ logger.error("You need to specify base64 encoding for %s." %
+ entry.get('name'))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ if entry.text in ['', None]:
+ entry.set('empty', 'true')
+
+class HostKeyEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ def __init__(self, basename, path):
+ if basename.startswith("ssh_host_key"):
+ encoding = "base64"
+ else:
+ encoding = None
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, KeyData,
+ encoding)
+ self.metadata = {'owner': 'root',
+ 'group': 'root',
+ 'type': 'file'}
+ if encoding is not None:
+ self.metadata['encoding'] = encoding
+ if basename.endswith('.pub'):
+ self.metadata['perms'] = '0644'
+ else:
+ self.metadata['perms'] = '0600'
+
+
+class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ def __init__(self, path):
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, "ssh_known_hosts", path,
+ KeyData, None)
+ self.metadata = {'owner': 'root',
+ 'group': 'root',
+ 'type': 'file',
+ 'perms': '0644'}
+
+
+class SSHbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Generator,
+ Bcfg2.Server.Plugin.PullTarget):
+ """
+ The sshbase generator manages ssh host keys (both v1 and v2)
+ for hosts. It also manages the ssh_known_hosts file. It can
+ integrate host keys from other management domains and similarly
+ export its keys. The repository contains files in the following
+ formats:
+
+ ssh_host_key.H_(hostname) -> the v1 host private key for
+ (hostname)
+ ssh_host_key.pub.H_(hostname) -> the v1 host public key
+ for (hostname)
+ ssh_host_(ec)(dr)sa_key.H_(hostname) -> the v2 ssh host
+ private key for (hostname)
+ ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host
+ public key for (hostname)
+ ssh_known_hosts -> the current known hosts file. this
+ is regenerated each time a new key is generated.
+
+ """
+ name = 'SSHbase'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ keypatterns = ["ssh_host_dsa_key",
+ "ssh_host_ecdsa_key",
+ "ssh_host_rsa_key",
+ "ssh_host_key",
+ "ssh_host_dsa_key.pub",
+ "ssh_host_ecdsa_key.pub",
+ "ssh_host_rsa_key.pub",
+ "ssh_host_key.pub"]
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ Bcfg2.Server.Plugin.PullTarget.__init__(self)
+ self.ipcache = {}
+ self.namecache = {}
+ self.__skn = False
+
+ # keep track of which bogus keys we've warned about, and only
+ # do so once
+ self.badnames = dict()
+
+ core.fam.AddMonitor(self.data, self)
+
+ self.static = dict()
+ self.entries = dict()
+ self.Entries['Path'] = dict()
+
+ self.entries['/etc/ssh/ssh_known_hosts'] = KnownHostsEntrySet(self.data)
+ self.Entries['Path']['/etc/ssh/ssh_known_hosts'] = self.build_skn
+ for keypattern in self.keypatterns:
+ self.entries["/etc/ssh/" + keypattern] = HostKeyEntrySet(keypattern,
+ self.data)
+ self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk
+
+ def get_skn(self):
+ """Build memory cache of the ssh known hosts file."""
+ if not self.__skn:
+ # if no metadata is registered yet, defer
+ if len(self.core.metadata.query.all()) == 0:
+ self.__skn = False
+ return self.__skn
+
+ skn = [s.data.decode().rstrip()
+ for s in list(self.static.values())]
+
+ mquery = self.core.metadata.query
+
+ # build hostname cache
+ names = dict()
+ for cmeta in mquery.all():
+ names[cmeta.hostname] = set([cmeta.hostname])
+ names[cmeta.hostname].update(cmeta.aliases)
+ newnames = set()
+ newips = set()
+ for name in names[cmeta.hostname]:
+ newnames.add(name.split('.')[0])
+ try:
+ newips.add(self.get_ipcache_entry(name)[0])
+ except:
+ continue
+ names[cmeta.hostname].update(newnames)
+ names[cmeta.hostname].update(cmeta.addresses)
+ names[cmeta.hostname].update(newips)
+ # TODO: Only perform reverse lookups on IPs if an option is set.
+ if True:
+ for ip in newips:
+ try:
+ names[cmeta.hostname].update(self.get_namecache_entry(ip))
+ except:
+ continue
+ names[cmeta.hostname] = sorted(names[cmeta.hostname])
+
+ pubkeys = [pubk for pubk in list(self.entries.keys())
+ if pubk.endswith('.pub')]
+ pubkeys.sort()
+ for pubkey in pubkeys:
+ for entry in sorted(self.entries[pubkey].entries.values(),
+ key=lambda e: e.specific.hostname or e.specific.group):
+ specific = entry.specific
+ hostnames = []
+ if specific.hostname and specific.hostname in names:
+ hostnames = names[specific.hostname]
+ elif specific.group:
+ hostnames = \
+ reduce(lambda x, y: x + y,
+ [names[cmeta.hostname]
+ for cmeta in \
+ mquery.by_groups([specific.group])], [])
+ elif specific.all:
+ # a generic key for all hosts? really?
+ hostnames = reduce(lambda x, y: x + y,
+ list(names.values()), [])
+ if not hostnames:
+ if specific.hostname:
+ key = specific.hostname
+ ktype = "host"
+ elif specific.group:
+ key = specific.group
+ ktype = "group"
+ else:
+ # user has added a global SSH key, but
+ # have no clients yet. don't warn about
+ # this.
+ continue
+
+ if key not in self.badnames:
+ self.badnames[key] = True
+ self.logger.info("Ignoring key for unknown %s %s" %
+ (ktype, key))
+ continue
+
+ skn.append("%s %s" % (','.join(hostnames),
+ entry.data.decode().rstrip()))
+
+ self.__skn = "\n".join(skn) + "\n"
+ return self.__skn
+
+ def set_skn(self, value):
+ """Set backing data for skn."""
+ self.__skn = value
+ skn = property(get_skn, set_skn)
+
+ def HandleEvent(self, event=None):
+ """Local event handler that does skn regen on pubkey change."""
+ # skip events we don't care about
+ action = event.code2str()
+ if action == "endExist" or event.filename == self.data:
+ return
+
+ for entry in list(self.entries.values()):
+ if entry.specific.match(event.filename):
+ entry.handle_event(event)
+ if event.filename.endswith(".pub"):
+ self.logger.info("New public key %s; invalidating "
+ "ssh_known_hosts cache" % event.filename)
+ self.skn = False
+ return
+
+ if event.filename in ['info', 'info.xml', ':info']:
+ for entry in list(self.entries.values()):
+ entry.handle_event(event)
+ return
+
+ if event.filename.endswith('.static'):
+ self.logger.info("Static key %s %s; invalidating ssh_known_hosts "
+ "cache" % (event.filename, action))
+ if action == "deleted" and event.filename in self.static:
+ del self.static[event.filename]
+ self.skn = False
+ else:
+ self.static[event.filename] = \
+ Bcfg2.Server.Plugin.FileBacked(os.path.join(self.data,
+ event.filename))
+ self.static[event.filename].HandleEvent(event)
+ self.skn = False
+ return
+
+ self.logger.warn("SSHbase: Got unknown event %s %s" %
+ (event.filename, action))
+
+ def get_ipcache_entry(self, client):
+ """Build a cache of dns results."""
+ if client in self.ipcache:
+ if self.ipcache[client]:
+ return self.ipcache[client]
+ else:
+ raise socket.gaierror
+ else:
+ # need to add entry
+ try:
+ ipaddr = socket.gethostbyname(client)
+ self.ipcache[client] = (ipaddr, client)
+ return (ipaddr, client)
+ except socket.gaierror:
+ ipaddr = Popen(["getent", "hosts", client],
+ stdout=PIPE).stdout.read().strip().split()
+ if ipaddr:
+ self.ipcache[client] = (ipaddr, client)
+ return (ipaddr, client)
+ self.ipcache[client] = False
+ self.logger.error("Failed to find IP address for %s" % client)
+ raise socket.gaierror
+
+ def get_namecache_entry(self, cip):
+ """Build a cache of name lookups from client IP addresses."""
+ if cip in self.namecache:
+ # lookup cached name from IP
+ if self.namecache[cip]:
+ return self.namecache[cip]
+ else:
+ raise socket.gaierror
+ else:
+ # add an entry that has not been cached
+ try:
+ rvlookup = socket.gethostbyaddr(cip)
+ if rvlookup[0]:
+ self.namecache[cip] = [rvlookup[0]]
+ else:
+ self.namecache[cip] = []
+ self.namecache[cip].extend(rvlookup[1])
+ return self.namecache[cip]
+ except socket.gaierror:
+ self.namecache[cip] = False
+ self.logger.error("Failed to find any names associated with IP address %s" % cip)
+ raise
+
+ def build_skn(self, entry, metadata):
+ """This function builds builds a host specific known_hosts file."""
+ try:
+ rv = self.entries[entry.get('name')].bind_entry(entry, metadata)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ client = metadata.hostname
+ entry.text = self.skn
+ hostkeys = []
+ for k in self.keypatterns:
+ if k.endswith(".pub"):
+ try:
+ hostkeys.append(self.entries["/etc/ssh/" +
+ k].best_matching(metadata))
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ pass
+ hostkeys.sort()
+ for hostkey in hostkeys:
+ entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" % (
+ hostkey.data.decode())
+ self.entries[entry.get('name')].bind_info_to_entry(entry, metadata)
+
+ def build_hk(self, entry, metadata):
+ """This binds host key data into entries."""
+ try:
+ self.entries[entry.get('name')].bind_entry(entry, metadata)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ filename = entry.get('name').split('/')[-1]
+ self.GenerateHostKeyPair(metadata.hostname, filename)
+ # Service the FAM events queued up by the key generation
+ # so the data structure entries will be available for
+ # binding.
+ #
+ # NOTE: We wait for up to ten seconds. There is some
+ # potential for race condition, because if the file
+ # monitor doesn't get notified about the new key files in
+ # time, those entries won't be available for binding. In
+ # practice, this seems "good enough".
+ tries = 0
+ is_bound = False
+ while not is_bound:
+ if tries >= 10:
+ self.logger.error("%s still not registered" % filename)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ self.core.fam.handle_events_in_interval(1)
+ tries += 1
+ try:
+ self.entries[entry.get('name')].bind_entry(entry, metadata)
+ is_bound = True
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ pass
+
+ def GenerateHostKeyPair(self, client, filename):
+ """Generate new host key pair for client."""
+ match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa)_)?key)', filename)
+ if match:
+ hostkey = "%s.H_%s" % (match.group(1), client)
+ if match.group(2):
+ keytype = match.group(2)
+ else:
+ keytype = 'rsa1'
+ else:
+ self.logger.error("Unknown key filename: %s" % filename)
+ return
+
+ fileloc = "%s/%s" % (self.data, hostkey)
+ publoc = self.data + '/' + ".".join([hostkey.split('.')[0], 'pub',
+ "H_%s" % client])
+ tempdir = tempfile.mkdtemp()
+ temploc = "%s/%s" % (tempdir, hostkey)
+ cmd = ["ssh-keygen", "-q", "-f", temploc, "-N", "",
+ "-t", keytype, "-C", "root@%s" % client]
+ proc = Popen(cmd, stdout=PIPE, stdin=PIPE)
+ proc.communicate()
+ proc.wait()
+
+ try:
+ shutil.copy(temploc, fileloc)
+ shutil.copy("%s.pub" % temploc, publoc)
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Temporary SSH keys not found: %s" % err)
+
+ try:
+ os.unlink(temploc)
+ os.unlink("%s.pub" % temploc)
+ os.rmdir(tempdir)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to unlink temporary ssh keys: %s" % err)
+
+ def AcceptChoices(self, _, metadata):
+ return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)]
+
+ def AcceptPullData(self, specific, entry, log):
+ """Per-plugin bcfg2-admin pull support."""
+ # specific will always be host specific
+ filename = "%s/%s.H_%s" % (self.data, entry['name'].split('/')[-1],
+ specific.hostname)
+ try:
+ open(filename, 'w').write(entry['text'])
+ if log:
+ print("Wrote file %s" % filename)
+ except KeyError:
+ self.logger.error("Failed to pull %s. This file does not currently "
+ "exist on the client" % entry.get('name'))
diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
new file mode 100644
index 000000000..0072dc62d
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
@@ -0,0 +1,274 @@
+import Bcfg2.Server.Plugin
+import Bcfg2.Options
+import lxml.etree
+import posixpath
+import tempfile
+import pipes
+import os
+from subprocess import Popen, PIPE, STDOUT
+# Compatibility import
+from Bcfg2.Bcfg2Py3k import ConfigParser
+
+
+class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
+ """
+ The SSLCA generator handles the creation and
+ management of ssl certificates and their keys.
+ """
+ name = 'SSLCA'
+ __author__ = 'g.hagger@gmail.com'
+ __child__ = Bcfg2.Server.Plugin.FileBacked
+ key_specs = {}
+ cert_specs = {}
+ CAs = {}
+
+ def HandleEvent(self, event=None):
+ """
+ Updates which files this plugin handles based upon filesystem events.
+ Allows configuration items to be added/removed without server restarts.
+ """
+ action = event.code2str()
+ if event.filename[0] == '/':
+ return
+ epath = "".join([self.data, self.handles[event.requestID],
+ event.filename])
+ if posixpath.isdir(epath):
+ ident = self.handles[event.requestID] + event.filename
+ else:
+ ident = self.handles[event.requestID][:-1]
+
+ fname = "".join([ident, '/', event.filename])
+
+ if event.filename.endswith('.xml'):
+ if action in ['exists', 'created', 'changed']:
+ if event.filename.endswith('key.xml'):
+ key_spec = dict(list(lxml.etree.parse(epath).find('Key').items()))
+ self.key_specs[ident] = {
+ 'bits': key_spec.get('bits', 2048),
+ 'type': key_spec.get('type', 'rsa')
+ }
+ self.Entries['Path'][ident] = self.get_key
+ elif event.filename.endswith('cert.xml'):
+ cert_spec = dict(list(lxml.etree.parse(epath).find('Cert').items()))
+ ca = cert_spec.get('ca', 'default')
+ self.cert_specs[ident] = {
+ 'ca': ca,
+ 'format': cert_spec.get('format', 'pem'),
+ 'key': cert_spec.get('key'),
+ 'days': cert_spec.get('days', 365),
+ 'C': cert_spec.get('c'),
+ 'L': cert_spec.get('l'),
+ 'ST': cert_spec.get('st'),
+ 'OU': cert_spec.get('ou'),
+ 'O': cert_spec.get('o'),
+ 'emailAddress': cert_spec.get('emailaddress')
+ }
+ cp = ConfigParser.ConfigParser()
+ cp.read(self.core.cfile)
+ self.CAs[ca] = dict(cp.items('sslca_' + ca))
+ self.Entries['Path'][ident] = self.get_cert
+ if action == 'deleted':
+ if ident in self.Entries['Path']:
+ del self.Entries['Path'][ident]
+ else:
+ if action in ['exists', 'created']:
+ if posixpath.isdir(epath):
+ self.AddDirectoryMonitor(epath[len(self.data):])
+ if ident not in self.entries and posixpath.isfile(epath):
+ self.entries[fname] = self.__child__(epath)
+ self.entries[fname].HandleEvent(event)
+ if action == 'changed':
+ self.entries[fname].HandleEvent(event)
+ elif action == 'deleted':
+ if fname in self.entries:
+ del self.entries[fname]
+ else:
+ self.entries[fname].HandleEvent(event)
+
+ def get_key(self, entry, metadata):
+ """
+ either grabs a prexisting key hostfile, or triggers the generation
+ of a new key if one doesn't exist.
+ """
+ # set path type and permissions, otherwise bcfg2 won't bind the file
+ permdata = {'owner': 'root',
+ 'group': 'root',
+ 'type': 'file',
+ 'perms': '644'}
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+
+ # check if we already have a hostfile, or need to generate a new key
+ # TODO: verify key fits the specs
+ path = entry.get('name')
+ filename = "".join([path, '/', path.rsplit('/', 1)[1],
+ '.H_', metadata.hostname])
+ if filename not in list(self.entries.keys()):
+ key = self.build_key(filename, entry, metadata)
+ open(self.data + filename, 'w').write(key)
+ entry.text = key
+ self.entries[filename] = self.__child__("%s%s" % (self.data,
+ filename))
+ self.entries[filename].HandleEvent()
+ else:
+ entry.text = self.entries[filename].data
+
+ def build_key(self, filename, entry, metadata):
+ """
+ generates a new key according the the specification
+ """
+ type = self.key_specs[entry.get('name')]['type']
+ bits = self.key_specs[entry.get('name')]['bits']
+ if type == 'rsa':
+ cmd = ["openssl", "genrsa", bits]
+ elif type == 'dsa':
+ cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits]
+ key = Popen(cmd, stdout=PIPE).stdout.read()
+ return key
+
+ def get_cert(self, entry, metadata):
+ """
+ either grabs a prexisting cert hostfile, or triggers the generation
+ of a new cert if one doesn't exist.
+ """
+ # set path type and permissions, otherwise bcfg2 won't bind the file
+ permdata = {'owner': 'root',
+ 'group': 'root',
+ 'type': 'file',
+ 'perms': '644'}
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+
+ path = entry.get('name')
+ filename = "".join([path, '/', path.rsplit('/', 1)[1],
+ '.H_', metadata.hostname])
+
+ # first - ensure we have a key to work with
+ key = self.cert_specs[entry.get('name')].get('key')
+ key_filename = "".join([key, '/', key.rsplit('/', 1)[1],
+ '.H_', metadata.hostname])
+ if key_filename not in self.entries:
+ e = lxml.etree.Element('Path')
+ e.attrib['name'] = key
+ self.core.Bind(e, metadata)
+
+ # check if we have a valid hostfile
+ if filename in list(self.entries.keys()) and self.verify_cert(filename,
+ key_filename,
+ entry):
+ entry.text = self.entries[filename].data
+ else:
+ cert = self.build_cert(key_filename, entry, metadata)
+ open(self.data + filename, 'w').write(cert)
+ self.entries[filename] = self.__child__("%s%s" % (self.data,
+ filename))
+ self.entries[filename].HandleEvent()
+ entry.text = cert
+
+ def verify_cert(self, filename, key_filename, entry):
+ if self.verify_cert_against_ca(filename, entry):
+ if self.verify_cert_against_key(filename, key_filename):
+ return True
+ return False
+
+ def verify_cert_against_ca(self, filename, entry):
+ """
+ check that a certificate validates against the ca cert,
+ and that it has not expired.
+ """
+ chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
+ cert = self.data + filename
+ res = Popen(["openssl", "verify", "-CAfile", chaincert, cert],
+ stdout=PIPE, stderr=STDOUT).stdout.read()
+ if res == cert + ": OK\n":
+ return True
+ return False
+
+ def verify_cert_against_key(self, filename, key_filename):
+ """
+ check that a certificate validates against its private key.
+ """
+ cert = self.data + filename
+ key = self.data + key_filename
+ cmd = ("openssl x509 -noout -modulus -in %s | openssl md5" %
+ pipes.quote(cert))
+ cert_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
+ cmd = ("openssl rsa -noout -modulus -in %s | openssl md5" %
+ pipes.quote(key))
+ key_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
+ if cert_md5 == key_md5:
+ return True
+ return False
+
+ def build_cert(self, key_filename, entry, metadata):
+ """
+ creates a new certificate according to the specification
+ """
+ req_config = self.build_req_config(entry, metadata)
+ req = self.build_request(key_filename, req_config, entry)
+ ca = self.cert_specs[entry.get('name')]['ca']
+ ca_config = self.CAs[ca]['config']
+ days = self.cert_specs[entry.get('name')]['days']
+ passphrase = self.CAs[ca].get('passphrase')
+ cmd = ["openssl", "ca", "-config", ca_config, "-in", req,
+ "-days", days, "-batch"]
+ if passphrase:
+ cmd.extend(["-passin", "pass:%s" % passphrase])
+ cert = Popen(cmd, stdout=PIPE).stdout.read()
+ try:
+ os.unlink(req_config)
+ os.unlink(req)
+ except OSError:
+ self.logger.error("Failed to unlink temporary files")
+ return cert
+
+ def build_req_config(self, entry, metadata):
+ """
+ generates a temporary openssl configuration file that is
+ used to generate the required certificate request
+ """
+ # create temp request config file
+ conffile = open(tempfile.mkstemp()[1], 'w')
+ cp = ConfigParser.ConfigParser({})
+ cp.optionxform = str
+ defaults = {
+ 'req': {
+ 'default_md': 'sha1',
+ 'distinguished_name': 'req_distinguished_name',
+ 'req_extensions': 'v3_req',
+ 'x509_extensions': 'v3_req',
+ 'prompt': 'no'
+ },
+ 'req_distinguished_name': {},
+ 'v3_req': {
+ 'subjectAltName': '@alt_names'
+ },
+ 'alt_names': {}
+ }
+ for section in list(defaults.keys()):
+ cp.add_section(section)
+ for key in defaults[section]:
+ cp.set(section, key, defaults[section][key])
+ x = 1
+ altnames = list(metadata.aliases)
+ altnames.append(metadata.hostname)
+ for altname in altnames:
+ cp.set('alt_names', 'DNS.' + str(x), altname)
+ x += 1
+ for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
+ if self.cert_specs[entry.get('name')][item]:
+ cp.set('req_distinguished_name', item, self.cert_specs[entry.get('name')][item])
+ cp.set('req_distinguished_name', 'CN', metadata.hostname)
+ cp.write(conffile)
+ conffile.close()
+ return conffile.name
+
+ def build_request(self, key_filename, req_config, entry):
+ """
+ creates the certificate request
+ """
+ req = tempfile.mkstemp()[1]
+ days = self.cert_specs[entry.get('name')]['days']
+ key = self.data + key_filename
+ cmd = ["openssl", "req", "-new", "-config", req_config,
+ "-days", days, "-key", key, "-text", "-out", req]
+ res = Popen(cmd, stdout=PIPE).stdout.read()
+ return req
diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
new file mode 100644
index 000000000..aeb3b9f74
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
@@ -0,0 +1,142 @@
+#import lxml.etree
+import logging
+import binascii
+import difflib
+#import sqlalchemy
+#import sqlalchemy.orm
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Snapshots
+import Bcfg2.Logger
+from Bcfg2.Server.Snapshots.model import Snapshot
+import sys
+import time
+import threading
+
+# Compatibility import
+from Bcfg2.Bcfg2Py3k import Queue
+
+logger = logging.getLogger('Snapshots')
+
+ftypes = ['ConfigFile', 'SymLink', 'Directory']
+datafields = {
+ 'Package': ['version'],
+ 'Path': ['type'],
+ 'Service': ['status'],
+ 'ConfigFile': ['owner', 'group', 'perms'],
+ 'Directory': ['owner', 'group', 'perms'],
+ 'SymLink': ['to'],
+ }
+
+
+# py3k compatibility
+def u_str(string):
+ if sys.hexversion >= 0x03000000:
+ return string
+ else:
+ return unicode(string)
+
+
+def build_snap_ent(entry):
+ basefields = []
+ if entry.tag in ['Package', 'Service']:
+ basefields += ['type']
+ desired = dict([(key, u_str(entry.get(key))) for key in basefields])
+ state = dict([(key, u_str(entry.get(key))) for key in basefields])
+ desired.update([(key, u_str(entry.get(key))) for key in \
+ datafields[entry.tag]])
+ if entry.tag == 'ConfigFile' or \
+ ((entry.tag == 'Path') and (entry.get('type') == 'file')):
+ if entry.text == None:
+ desired['contents'] = None
+ else:
+ if entry.get('encoding', 'ascii') == 'ascii':
+ desired['contents'] = u_str(entry.text)
+ else:
+ desired['contents'] = u_str(binascii.a2b_base64(entry.text))
+
+ if 'current_bfile' in entry.attrib:
+ state['contents'] = u_str(binascii.a2b_base64( \
+ entry.get('current_bfile')))
+ elif 'current_bdiff' in entry.attrib:
+ diff = binascii.a2b_base64(entry.get('current_bdiff'))
+ state['contents'] = u_str( \
+ '\n'.join(difflib.restore(diff.split('\n'), 1)))
+
+ state.update([(key, u_str(entry.get('current_' + key, entry.get(key)))) \
+ for key in datafields[entry.tag]])
+ if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false':
+ state = None
+ return [desired, state]
+
+
+class Snapshots(Bcfg2.Server.Plugin.Statistics,
+ Bcfg2.Server.Plugin.Plugin):
+ name = 'Snapshots'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Statistics.__init__(self)
+ self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
+ self.work_queue = Queue()
+ self.loader = threading.Thread(target=self.load_snapshot)
+ self.loader.start()
+
+ def load_snapshot(self):
+ while self.running:
+ try:
+ (metadata, data) = self.work_queue.get(block=True, timeout=5)
+ except:
+ continue
+ self.statistics_from_old_stats(metadata, data)
+
+ def process_statistics(self, metadata, data):
+ return self.work_queue.put((metadata, data))
+
+ def statistics_from_old_stats(self, metadata, xdata):
+ # entries are name -> (modified, correct, start, desired, end)
+ # not sure we can get all of this from old format stats
+ t1 = time.time()
+ entries = dict([('Package', dict()),
+ ('Service', dict()), ('Path', dict())])
+ extra = dict([('Package', dict()), ('Service', dict()),
+ ('Path', dict())])
+ bad = []
+ state = xdata.find('.//Statistics')
+ correct = state.get('state') == 'clean'
+ revision = u_str(state.get('revision', '-1'))
+ for entry in state.find('.//Bad'):
+ data = [False, False, u_str(entry.get('name'))] \
+ + build_snap_ent(entry)
+ if entry.tag in ftypes:
+ etag = 'Path'
+ else:
+ etag = entry.tag
+ entries[etag][entry.get('name')] = data
+ for entry in state.find('.//Modified'):
+ if entry.tag in ftypes:
+ etag = 'Path'
+ else:
+ etag = entry.tag
+ if entry.get('name') in entries[etag]:
+ data = [True, False, u_str(entry.get('name'))] + \
+ build_snap_ent(entry)
+ else:
+ data = [True, False, u_str(entry.get('name'))] + \
+ build_snap_ent(entry)
+ for entry in state.find('.//Extra'):
+ if entry.tag in datafields:
+ data = build_snap_ent(entry)[1]
+ ename = u_str(entry.get('name'))
+ data['name'] = ename
+ extra[entry.tag][ename] = data
+ else:
+ print("extra", entry.tag, entry.get('name'))
+ t2 = time.time()
+ snap = Snapshot.from_data(self.session, correct, revision,
+ metadata, entries, extra)
+ self.session.add(snap)
+ self.session.commit()
+ t3 = time.time()
+ logger.info("Snapshot storage took %fs" % (t3 - t2))
+ return True
diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py
new file mode 100644
index 000000000..265ef95a8
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Statistics.py
@@ -0,0 +1,161 @@
+'''This file manages the statistics collected by the BCFG2 Server'''
+
+import binascii
+import copy
+import difflib
+import logging
+from lxml.etree import XML, SubElement, Element, XMLSyntaxError
+import lxml.etree
+import os
+from time import asctime, localtime, time, strptime, mktime
+import threading
+
+import Bcfg2.Server.Plugin
+
+
+class StatisticsStore(object):
+ """Manages the memory and file copy of statistics collected about client runs."""
+ __min_write_delay__ = 0
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.element = Element('Dummy')
+ self.dirty = 0
+ self.lastwrite = 0
+ self.logger = logging.getLogger('Bcfg2.Server.Statistics')
+ self.ReadFromFile()
+
+ def WriteBack(self, force=0):
+ """Write statistics changes back to persistent store."""
+ if (self.dirty and (self.lastwrite + self.__min_write_delay__ <= time())) \
+ or force:
+ try:
+ fout = open(self.filename + '.new', 'w')
+ except IOError:
+ ioerr = sys.exc_info()[1]
+ self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
+ else:
+ fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True))
+ fout.close()
+ os.rename(self.filename + '.new', self.filename)
+ self.dirty = 0
+ self.lastwrite = time()
+
+ def ReadFromFile(self):
+ """Reads current state regarding statistics."""
+ try:
+ fin = open(self.filename, 'r')
+ data = fin.read()
+ fin.close()
+ self.element = XML(data)
+ self.dirty = 0
+ except (IOError, XMLSyntaxError):
+ self.logger.error("Creating new statistics file %s"%(self.filename))
+ self.element = Element('ConfigStatistics')
+ self.WriteBack()
+ self.dirty = 0
+
+ def updateStats(self, xml, client):
+ """Updates the statistics of a current node with new data."""
+
+ # Current policy:
+ # - Keep anything less than 24 hours old
+ # - Keep latest clean run for clean nodes
+ # - Keep latest clean and dirty run for dirty nodes
+ newstat = xml.find('Statistics')
+
+ if newstat.get('state') == 'clean':
+ node_dirty = 0
+ else:
+ node_dirty = 1
+
+ # Find correct node entry in stats data
+ # The following list comprehension should be guarenteed to return at
+ # most one result
+ nodes = [elem for elem in self.element.findall('Node') \
+ if elem.get('name') == client]
+ nummatch = len(nodes)
+ if nummatch == 0:
+ # Create an entry for this node
+ node = SubElement(self.element, 'Node', name=client)
+ elif nummatch == 1 and not node_dirty:
+ # Delete old instance
+ node = nodes[0]
+ [node.remove(elem) for elem in node.findall('Statistics') \
+ if self.isOlderThan24h(elem.get('time'))]
+ elif nummatch == 1 and node_dirty:
+ # Delete old dirty statistics entry
+ node = nodes[0]
+ [node.remove(elem) for elem in node.findall('Statistics') \
+ if (elem.get('state') == 'dirty' \
+ and self.isOlderThan24h(elem.get('time')))]
+ else:
+ # Shouldn't be reached
+ self.logger.error("Duplicate node entry for %s"%(client))
+
+ # Set current time for stats
+ newstat.set('time', asctime(localtime()))
+
+ # Add statistic
+ node.append(copy.copy(newstat))
+
+ # Set dirty
+ self.dirty = 1
+ self.WriteBack(force=1)
+
+ def isOlderThan24h(self, testTime):
+ """Helper function to determine if <time> string is older than 24 hours."""
+ now = time()
+ utime = mktime(strptime(testTime))
+ secondsPerDay = 60*60*24
+
+ return (now-utime) > secondsPerDay
+
+
+class Statistics(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ThreadedStatistics,
+ Bcfg2.Server.Plugin.PullSource):
+ name = 'Statistics'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.PullSource.__init__(self)
+ fpath = "%s/etc/statistics.xml" % datastore
+ self.data_file = StatisticsStore(fpath)
+
+ def handle_statistic(self, metadata, data):
+ self.data_file.updateStats(data, metadata.hostname)
+
+ def FindCurrent(self, client):
+ rt = self.data_file.element.xpath('//Node[@name="%s"]' % client)[0]
+ maxtime = max([strptime(stat.get('time')) for stat \
+ in rt.findall('Statistics')])
+ return [stat for stat in rt.findall('Statistics') \
+ if strptime(stat.get('time')) == maxtime][0]
+
+ def GetExtra(self, client):
+ return [(entry.tag, entry.get('name')) for entry \
+ in self.FindCurrent(client).xpath('.//Extra/*')]
+
+ def GetCurrentEntry(self, client, e_type, e_name):
+ curr = self.FindCurrent(client)
+ entry = curr.xpath('.//Bad/%s[@name="%s"]' % (e_type, e_name))
+ if not entry:
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ cfentry = entry[-1]
+
+ owner = cfentry.get('current_owner', cfentry.get('owner'))
+ group = cfentry.get('current_group', cfentry.get('group'))
+ perms = cfentry.get('current_perms', cfentry.get('perms'))
+ if cfentry.get('sensitive') in ['true', 'True']:
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ elif 'current_bfile' in cfentry.attrib:
+ contents = binascii.a2b_base64(cfentry.get('current_bfile'))
+ elif 'current_bdiff' in cfentry.attrib:
+ diff = binascii.a2b_base64(cfentry.get('current_bdiff'))
+ contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
+ else:
+ contents = None
+
+ return (owner, group, perms, contents)
diff --git a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
new file mode 100644
index 000000000..f4232ad5c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
@@ -0,0 +1,10 @@
+"""This generator provides service mappings."""
+
+import Bcfg2.Server.Plugin
+
+
+class Svcmgr(Bcfg2.Server.Plugin.PrioDir):
+ """This is a generator that handles service assignments."""
+ name = 'Svcmgr'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/Svn.py b/src/lib/Bcfg2/Server/Plugins/Svn.py
new file mode 100644
index 000000000..ae43388ea
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Svn.py
@@ -0,0 +1,46 @@
+import os
+import pipes
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Svn')
+
+
+class Svn(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Svn is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Svn'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # path to svn directory for bcfg2 repo
+ svn_dir = "%s/.svn" % datastore
+
+ # Read revision from bcfg2 repo
+ if os.path.isdir(svn_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not a directory" % svn_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized svn plugin with svn directory = %s" % svn_dir)
+
+ def get_revision(self):
+ """Read svn revision information for the Bcfg2 repository."""
+ try:
+ data = Popen(("env LC_ALL=C svn info %s" %
+ pipes.quote(self.datastore)), shell=True,
+ stdout=PIPE).communicate()[0].split('\n')
+ return [line.split(': ')[1] for line in data \
+ if line[:9] == 'Revision:'][-1]
+ except IndexError:
+ logger.error("Failed to read svn info; disabling svn support")
+ logger.error('''Ran command "svn info %s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/src/lib/Bcfg2/Server/Plugins/Svn2.py b/src/lib/Bcfg2/Server/Plugins/Svn2.py
new file mode 100644
index 000000000..8d79348f8
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Svn2.py
@@ -0,0 +1,125 @@
+try:
+ import pysvn
+ missing = False
+except:
+ missing = True
+import Bcfg2.Server.Plugin
+
+class Svn2(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Svn is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Svn2'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ conflicts = ['Svn']
+ experimental = True
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Update','Commit']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+
+ if missing:
+ self.logger.error("Svn2: Missing PySvn")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ self.client = pysvn.Client()
+
+ self.core = core
+ self.datastore = datastore
+ self.svn_root = None
+ self.revision = None
+
+ # Read revision from bcfg2 repo
+ revision = self.get_revision()
+ if not self.revision:
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ self.logger.debug("Initialized svn plugin with svn root %s at revision %s"
+ % (self.svn_root, revision))
+
+ def get_revision(self):
+ """Read svn revision information for the Bcfg2 repository."""
+ try:
+ info = self.client.info(self.datastore)
+ self.revision = info.revision
+ self.svn_root = info.url
+ return str(self.revision.number)
+ except:
+ self.logger.error("Svn2: Failed to get revision", exc_info=1)
+ self.revision = None
+ return str(-1)
+
+ def commit_data(self, file_list, comment=None):
+ """Commit changes into the repository"""
+ if not comment:
+ comment = 'Svn2: autocommit'
+
+ # First try to update
+ if not self.Update():
+ self.logger.error("Failed to update svn repository, refusing to commit changes")
+ return
+
+ #FIXME - look for conflicts?
+
+ for fname in file_list:
+ stat = self.client.status(fname)
+ self.client.add([f.path for f in stat \
+ if f.text_status == pysvn.wc_status_kind.unversioned])
+ try:
+ self.revision = self.client.checkin([self.datastore], comment,
+ recurse=True)
+ self.revision = self.client.update(self.datastore, recurse=True)[0]
+ self.logger.info("Svn2: Commited changes. At %s" %
+ self.revision.number)
+ except Exception, err:
+ # try to be smart about the error we got back
+ details = None
+ if "callback_ssl_server_trust_prompt" in str(err):
+ details = "SVN server certificate is not trusted"
+ elif "callback_get_login" in str(err):
+ details = "SVN credentials not cached"
+
+ if details is None:
+ self.logger.error("Svn2: Failed to commit changes",
+ exc_info=1)
+ else:
+ self.logger.error("Svn2: Failed to commit changes: %s" %
+ details)
+
+ def Update(self):
+ '''Svn2.Update() => True|False\nUpdate svn working copy\n'''
+ try:
+ old_revision = self.revision.number
+ self.revision = self.client.update(self.datastore, recurse=True)[0]
+ except Exception, err:
+ # try to be smart about the error we got back
+ details = None
+ if "callback_ssl_server_trust_prompt" in str(err):
+ details = "SVN server certificate is not trusted"
+ elif "callback_get_login" in str(err):
+ details = "SVN credentials not cached"
+
+ if details is None:
+ self.logger.error("Svn2: Failed to update server repository",
+ exc_info=1)
+ else:
+ self.logger.error("Svn2: Failed to update server repository: %s" %
+ details)
+ return False
+
+ if old_revision == self.revision.number:
+ self.logger.debug("repository is current")
+ else:
+ self.logger.info("Updated %s from revision %s to %s" % \
+ (self.datastore, old_revision, self.revision.number))
+ return True
+
+ def Commit(self):
+ """Svn2.Commit() => True|False\nCommit svn repository\n"""
+ try:
+ self.commit_changes([])
+ return True
+ except:
+ return False
+
+
diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
new file mode 100644
index 000000000..8879fdef1
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
@@ -0,0 +1,80 @@
+'''This module implements a templating generator based on Cheetah'''
+
+import binascii
+import logging
+import sys
+import traceback
+import Bcfg2.Server.Plugin
+# py3k compatibility
+if sys.hexversion >= 0x03000000:
+ unicode = str
+
+logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
+
+try:
+ import Cheetah.Template
+ import Cheetah.Parser
+except:
+ logger.error("TCheetah: Failed to import Cheetah. Is it installed?")
+ raise
+
+
+class TemplateFile:
+ """Template file creates Cheetah template structures for the loaded file."""
+
+ def __init__(self, name, specific, encoding):
+ self.name = name
+ self.specific = specific
+ self.encoding = encoding
+ self.template = None
+ self.searchlist = dict()
+
+ def handle_event(self, event):
+ """Handle all fs events for this template."""
+ if event.code2str() == 'deleted':
+ return
+ try:
+ s = {'useStackFrames': False}
+ self.template = Cheetah.Template.Template(open(self.name).read(),
+ compilerSettings=s,
+ searchList=self.searchlist)
+ except Cheetah.Parser.ParseError:
+ perror = sys.exc_info()[1]
+ logger.error("Cheetah parse error for file %s" % (self.name))
+ logger.error(perror.report())
+
+ def bind_entry(self, entry, metadata):
+ """Build literal file information."""
+ self.template.metadata = metadata
+ self.searchlist['metadata'] = metadata
+ self.template.path = entry.get('realname', entry.get('name'))
+ self.searchlist['path'] = entry.get('realname', entry.get('name'))
+ self.template.source_path = self.name
+ self.searchlist['source_path'] = self.name
+
+ if entry.tag == 'Path':
+ entry.set('type', 'file')
+ try:
+ if type(self.template) == unicode:
+ entry.text = self.template
+ else:
+ if entry.get('encoding') == 'base64':
+ # take care of case where file needs base64 encoding
+ entry.text = binascii.b2a_base64(self.template)
+ else:
+ entry.text = unicode(str(self.template), self.encoding)
+ except:
+ (a, b, c) = sys.exc_info()
+ msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
+ logger.error(msg)
+ logger.error("TCheetah template error for %s" % self.searchlist['path'])
+ del a, b, c
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+
+
+class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
+ """The TCheetah generator implements a templating mechanism for configuration files."""
+ name = 'TCheetah'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ filename_pattern = 'template'
+ es_child_cls = TemplateFile
diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
new file mode 100644
index 000000000..3ba0f4272
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
@@ -0,0 +1,135 @@
+"""This module implements a templating generator based on Genshi."""
+
+import binascii
+import logging
+import sys
+import Bcfg2.Server.Plugin
+# py3k compatibility
+if sys.hexversion >= 0x03000000:
+ unicode = str
+
+logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
+
+# try to import genshi stuff
+try:
+ import genshi.core
+ import genshi.input
+ from genshi.template import TemplateLoader, \
+ TextTemplate, MarkupTemplate, TemplateError
+except ImportError:
+ logger.error("TGenshi: Failed to import Genshi. Is it installed?")
+ raise Bcfg2.Server.Plugin.PluginInitError
+try:
+ from genshi.template import NewTextTemplate
+ have_ntt = True
+except:
+ have_ntt = False
+
+def removecomment(stream):
+ """A genshi filter that removes comments from the stream."""
+ for kind, data, pos in stream:
+ if kind is genshi.core.COMMENT:
+ continue
+ yield kind, data, pos
+
+
+class TemplateFile:
+ """Template file creates Genshi template structures for the loaded file."""
+
+ def __init__(self, name, specific, encoding):
+ self.name = name
+ self.specific = specific
+ self.encoding = encoding
+ if self.specific.all:
+ matchname = self.name
+ elif self.specific.group:
+ matchname = self.name[:self.name.find('.G')]
+ else:
+ matchname = self.name[:self.name.find('.H')]
+ if matchname.endswith('.txt'):
+ self.template_cls = TextTemplate
+ elif matchname.endswith('.newtxt'):
+ if not have_ntt:
+ logger.error("Genshi NewTextTemplates not supported by this version of Genshi")
+ else:
+ self.template_cls = NewTextTemplate
+ else:
+ self.template_cls = MarkupTemplate
+ self.HandleEvent = self.handle_event
+
+ def handle_event(self, event=None):
+ """Handle all fs events for this template."""
+ if event and event.code2str() == 'deleted':
+ return
+ try:
+ loader = TemplateLoader()
+ try:
+ self.template = loader.load(self.name, cls=self.template_cls,
+ encoding=self.encoding)
+ except LookupError:
+ lerror = sys.exc_info()[1]
+ logger.error('Genshi lookup error: %s' % lerror)
+ except TemplateError:
+ terror = sys.exc_info()[1]
+ logger.error('Genshi template error: %s' % terror)
+ except genshi.input.ParseError:
+ perror = sys.exc_info()[1]
+ logger.error('Genshi parse error: %s' % perror)
+
+ def bind_entry(self, entry, metadata):
+ """Build literal file information."""
+ fname = entry.get('realname', entry.get('name'))
+ if entry.tag == 'Path':
+ entry.set('type', 'file')
+ try:
+ stream = self.template.generate( \
+ name=fname, metadata=metadata,
+ path=self.name).filter(removecomment)
+ if have_ntt:
+ ttypes = [TextTemplate, NewTextTemplate]
+ else:
+ ttypes = [TextTemplate]
+ if True in [isinstance(self.template, t) for t in ttypes]:
+ try:
+ textdata = stream.render('text', strip_whitespace=False)
+ except TypeError:
+ textdata = stream.render('text')
+ if type(textdata) == unicode:
+ entry.text = textdata
+ else:
+ if entry.get('encoding') == 'base64':
+ # take care of case where file needs base64 encoding
+ entry.text = binascii.b2a_base64(textdata)
+ else:
+ entry.text = unicode(textdata, self.encoding)
+ else:
+ try:
+ xmldata = stream.render('xml', strip_whitespace=False)
+ except TypeError:
+ xmldata = stream.render('xml')
+ if type(xmldata) == unicode:
+ entry.text = xmldata
+ else:
+ entry.text = unicode(xmldata, self.encoding)
+ if entry.text == '':
+ entry.set('empty', 'true')
+ except TemplateError:
+ terror = sys.exc_info()[1]
+ logger.error('Genshi template error: %s' % terror)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ except AttributeError:
+ err = sys.exc_info()[1]
+ logger.error('Genshi template loading error: %s' % err)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+
+
+class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
+ """
+ The TGenshi generator implements a templating
+ mechanism for configuration files.
+
+ """
+ name = 'TGenshi'
+ __author__ = 'jeff@ocjtech.us'
+ filename_pattern = 'template\.(txt|newtxt|xml)'
+ es_child_cls = TemplateFile
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
new file mode 100644
index 000000000..2c0ee03e0
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -0,0 +1,83 @@
+import re
+import imp
+import sys
+import logging
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger(__name__)
+
+class HelperModule(Bcfg2.Server.Plugin.SpecificData):
+ _module_name_re = re.compile(r'([^/]+?)\.py')
+
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
+ encoding)
+ match = self._module_name_re.search(self.name)
+ if match:
+ self._module_name = match.group(1)
+ else:
+ self._module_name = name
+ self._attrs = []
+
+ def handle_event(self, event):
+ Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
+ try:
+ module = imp.load_source(self._module_name, self.name)
+ except:
+ err = sys.exc_info()[1]
+ logger.error("TemplateHelper: Failed to import %s: %s" %
+ (self.name, err))
+ return
+
+ if not hasattr(module, "__export__"):
+ logger.error("TemplateHelper: %s has no __export__ list" %
+ self.name)
+ return
+
+ for sym in module.__export__:
+ if sym not in self._attrs and hasattr(self, sym):
+ logger.warning("TemplateHelper: %s: %s is a reserved keyword, "
+ "skipping export" % (self.name, sym))
+ setattr(self, sym, getattr(module, sym))
+ # remove old exports
+ for sym in set(self._attrs) - set(module.__export__):
+ delattr(self, sym)
+
+ self._attrs = module.__export__
+
+
+class HelperSet(Bcfg2.Server.Plugin.EntrySet):
+ ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\.py[co])$")
+
+ def __init__(self, path, fam, encoding, plugin_name):
+ fpattern = '[0-9A-Za-z_\-]+\.py'
+ self.plugin_name = plugin_name
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ HelperModule, encoding)
+ fam.AddMonitor(path, self)
+
+ def HandleEvent(self, event):
+ if (event.filename != self.path and
+ not self.ignore.match(event.filename)):
+ return self.handle_event(event)
+
+
+class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector):
+ """ A plugin to provide helper classes and functions to templates """
+ name = 'TemplateHelper'
+ __author__ = 'chris.a.st.pierre@gmail.com'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+
+ try:
+ self.helpers = HelperSet(self.data, core.fam, core.encoding,
+ self.name)
+ except:
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def get_additional_data(self, metadata):
+ return dict([(h._module_name, h)
+ for h in list(self.helpers.entries.values())])
diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py
new file mode 100644
index 000000000..b0d21545c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py
@@ -0,0 +1,43 @@
+import os
+import Bcfg2.Server.Plugin
+
+
+def async_run(prog, args):
+ pid = os.fork()
+ if pid:
+ os.waitpid(pid, 0)
+ else:
+ dpid = os.fork()
+ if not dpid:
+ os.system(" ".join([prog] + args))
+ os._exit(0)
+
+
+class Trigger(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Statistics):
+ """Trigger is a plugin that calls external scripts (on the server)."""
+ name = 'Trigger'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Statistics.__init__(self)
+ try:
+ os.stat(self.data)
+ except:
+ self.logger.error("Trigger: spool directory %s does not exist; "
+ "unloading" % self.data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def process_statistics(self, metadata, _):
+ args = [metadata.hostname, '-p', metadata.profile, '-g',
+ ':'.join([g for g in metadata.groups])]
+ for notifier in os.listdir(self.data):
+ if ((notifier[-1] == '~') or
+ (notifier[:2] == '.#') or
+ (notifier[-4:] == '.swp') or
+ (notifier in ['SCCS', '.svn', '4913'])):
+ continue
+ npath = self.data + '/' + notifier
+ self.logger.debug("Running %s %s" % (npath, " ".join(args)))
+ async_run(npath, args)
diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py
new file mode 100644
index 000000000..f9f1b4e52
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/__init__.py
@@ -0,0 +1,34 @@
+"""Imports for Bcfg2.Server.Plugins."""
+
+__all__ = [
+ 'Account',
+ 'Base',
+ 'Bundler',
+ 'Bzr',
+ 'Cfg',
+ 'Cvs',
+ 'Darcs',
+ 'Decisions',
+ 'Fossil',
+ 'Git',
+ 'GroupPatterns',
+ 'Hg',
+ 'Hostbase',
+ 'Metadata',
+ 'NagiosGen',
+ 'Ohai',
+ 'Packages',
+ 'Properties',
+ 'Probes',
+ 'Pkgmgr',
+ 'Rules',
+ 'SSHbase',
+ 'Snapshots',
+ 'Statistics',
+ 'Svcmgr',
+ 'Svn',
+ 'TCheetah',
+ 'Trigger',
+ 'SGenshi',
+ 'TGenshi',
+ ]