summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Server/Plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Server/Plugins')
-rw-r--r--src/lib/Bcfg2/Server/Plugins/BB.py83
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Base.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py138
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py14
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py63
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py72
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py100
-rw-r--r--src/lib/Bcfg2/Server/Plugins/DBStats.py52
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Decisions.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Deps.py21
-rw-r--r--src/lib/Bcfg2/Server/Plugins/FileProbes.py62
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupPatterns.py53
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hostbase.py23
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py957
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py25
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py17
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py75
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py7
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py211
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py99
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Pkgmgr.py56
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py247
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py111
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py117
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SEModules.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SGenshi.py97
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSLCA.py110
-rw-r--r--src/lib/Bcfg2/Server/Plugins/ServiceCompat.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Snapshots.py27
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Statistics.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svcmgr.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TCheetah.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TGenshi.py18
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py120
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Trigger.py65
-rw-r--r--src/lib/Bcfg2/Server/Plugins/__init__.py2
45 files changed, 2219 insertions, 1095 deletions
diff --git a/src/lib/Bcfg2/Server/Plugins/BB.py b/src/lib/Bcfg2/Server/Plugins/BB.py
deleted file mode 100644
index c015ec47c..000000000
--- a/src/lib/Bcfg2/Server/Plugins/BB.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Plugin
-import glob
-import os
-import socket
-
-#manage boot symlinks
- #add statistics check to do build->boot mods
-
-#map profiles: first array is not empty we replace the -p with a determined profile.
-logger = Bcfg2.Server.Plugin.logger
-
-class BBfile(Bcfg2.Server.Plugin.XMLFileBacked):
- """Class for bb files."""
- def Index(self):
- """Build data into an xml object."""
-
- try:
- self.data = lxml.etree.XML(self.data)
- except lxml.etree.XMLSyntaxError:
- Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
- return
- self.tftppath = self.data.get('tftp', '/tftpboot')
- self.macs = {}
- self.users = {}
- self.actions = {}
- self.bootlinks = []
-
- for node in self.data.findall('Node'):
- iface = node.find('Interface')
- if iface != None:
- mac = "01-%s" % (iface.get('mac'.replace(':','-').lower()))
- self.actions[node.get('name')] = node.get('action')
- self.bootlinks.append((mac, node.get('action')))
- try:
- ip = socket.gethostbyname(node.get('name'))
- except:
- logger.error("failed host resolution for %s" % node.get('name'))
-
- self.macs[node.get('name')] = (iface.get('mac'), ip)
- else:
- logger.error("%s" % lxml.etree.tostring(node))
- self.users[node.get('name')] = node.get('user',"").split(':')
-
- def enforce_bootlinks(self):
- for mac, target in self.bootlinks:
- path = self.tftppath + '/' + mac
- if not os.path.islink(path):
- logger.error("Boot file %s not a link" % path)
- if target != os.readlink(path):
- try:
- os.unlink(path)
- os.symlink(target, path)
- except:
- logger.error("Failed to modify link %s" % path)
-
-class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
- __child__ = BBfile
-
-
-class BB(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Connector):
- """The BB plugin maps users to machines and metadata to machines."""
- name = 'BB'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.store = BBDirectoryBacked(self.data, core.fam)
-
- def get_additional_data(self, metadata):
-
- users = {}
- for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []):
- pubkeys = []
- for fname in glob.glob('/home/%s/.ssh/*.pub'%user):
- pubkeys.append(open(fname).read())
-
- users[user] = pubkeys
-
- return dict([('users', users),
- ('macs', self.store.entries['bb.xml'].macs)])
diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py
index 389ca7a95..2020f7795 100644
--- a/src/lib/Bcfg2/Server/Plugins/Base.py
+++ b/src/lib/Bcfg2/Server/Plugins/Base.py
@@ -3,10 +3,7 @@
import copy
import lxml.etree
import sys
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
+from Bcfg2.Bcfg2Py3k import reduce
import Bcfg2.Server.Plugin
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index ccb99481e..65914c371 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -1,25 +1,25 @@
"""This provides bundle clauses with translation functionality."""
import copy
+import logging
import lxml.etree
import os
import os.path
import re
import sys
-
+import Bcfg2.Server
import Bcfg2.Server.Plugin
+import Bcfg2.Server.Lint
try:
- import genshi.template
import genshi.template.base
- import Bcfg2.Server.Plugins.SGenshi
+ import Bcfg2.Server.Plugins.TGenshi
have_genshi = True
except:
have_genshi = False
class BundleFile(Bcfg2.Server.Plugin.StructFile):
-
def get_xml_value(self, metadata):
bundlename = os.path.splitext(os.path.basename(self.name))[0]
bundle = lxml.etree.Element('Bundle', name=bundlename)
@@ -27,6 +27,58 @@ class BundleFile(Bcfg2.Server.Plugin.StructFile):
return bundle
+if have_genshi:
+ class BundleTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
+ Bcfg2.Server.Plugin.StructFile):
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
+ specific,
+ encoding)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name)
+ self.logger = logging.getLogger(name)
+
+ def get_xml_value(self, metadata):
+ if not hasattr(self, 'template'):
+ self.logger.error("No parsed template information for %s" %
+ self.name)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ stream = self.template.generate(metadata=metadata).filter(
+ Bcfg2.Server.Plugins.TGenshi.removecomment)
+ data = lxml.etree.XML(stream.render('xml',
+ strip_whitespace=False),
+ parser=Bcfg2.Server.XMLParser)
+ bundlename = os.path.splitext(os.path.basename(self.name))[0]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ for item in self.Match(metadata, data):
+ bundle.append(copy.deepcopy(item))
+ return bundle
+ except LookupError:
+ lerror = sys.exc_info()[1]
+ self.logger.error('Genshi lookup error: %s' % lerror)
+ except genshi.template.TemplateError:
+ terror = sys.exc_info()[1]
+ self.logger.error('Genshi template error: %s' % terror)
+ raise
+ except genshi.input.ParseError:
+ perror = sys.exc_info()[1]
+ self.logger.error('Genshi parse error: %s' % perror)
+ raise
+
+ def Match(self, metadata, xdata):
+ """Return matching fragments of parsed template."""
+ rv = []
+ for child in xdata.getchildren():
+ rv.extend(self._match(child, metadata))
+ self.logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
+ return rv
+
+
+ class SGenshiTemplateFile(BundleTemplateFile):
+ # provided for backwards compat
+ pass
+
+
class Bundler(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Structure,
Bcfg2.Server.Plugin.XMLDirectoryBacked):
@@ -50,25 +102,20 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Failed to load Bundle repository")
raise Bcfg2.Server.Plugin.PluginInitError
- def template_dispatch(self, name):
- bundle = lxml.etree.parse(name)
+ def template_dispatch(self, name, _):
+ bundle = lxml.etree.parse(name,
+ parser=Bcfg2.Server.XMLParser)
nsmap = bundle.getroot().nsmap
- if name.endswith('.xml'):
- if have_genshi and \
- (nsmap == {'py': 'http://genshi.edgewall.org/'}):
- # allow for genshi bundles with .xml extensions
- spec = Bcfg2.Server.Plugin.Specificity()
- return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
- spec,
- self.encoding)
- else:
- return BundleFile(name)
- elif name.endswith('.genshi'):
+ if (name.endswith('.genshi') or
+ ('py' in nsmap and
+ nsmap['py'] == 'http://genshi.edgewall.org/')):
if have_genshi:
spec = Bcfg2.Server.Plugin.Specificity()
- return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
- spec,
- self.encoding)
+ return BundleTemplateFile(name, spec, self.encoding)
+ else:
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Genshi not available: %s" % name)
+ else:
+ return BundleFile(name, self.fam)
def BuildStructures(self, metadata):
"""Build all structures for client (metadata)."""
@@ -97,3 +144,54 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Bundler: Unexpected bundler error for %s" %
bundlename, exc_info=1)
return bundleset
+
+
+class BundlerLint(Bcfg2.Server.Lint.ServerPlugin):
+ """ Perform various bundle checks """
+ def Run(self):
+ """ run plugin """
+ self.missing_bundles()
+ for bundle in self.core.plugins['Bundler'].entries.values():
+ if (self.HandlesFile(bundle.name) and
+ (not have_genshi or
+ not isinstance(bundle, BundleTemplateFile))):
+ self.bundle_names(bundle)
+
+ @classmethod
+ def Errors(cls):
+ return {"bundle-not-found":"error",
+ "inconsistent-bundle-name":"warning"}
+
+ def missing_bundles(self):
+ """ find bundles listed in Metadata but not implemented in Bundler """
+ if self.files is None:
+ # when given a list of files on stdin, this check is
+ # useless, so skip it
+ groupdata = self.metadata.groups_xml.xdata
+ ref_bundles = set([b.get("name")
+ for b in groupdata.findall("//Bundle")])
+
+ allbundles = self.core.plugins['Bundler'].entries.keys()
+ for bundle in ref_bundles:
+ xmlbundle = "%s.xml" % bundle
+ genshibundle = "%s.genshi" % bundle
+ if (xmlbundle not in allbundles and
+ genshibundle not in allbundles):
+ self.LintError("bundle-not-found",
+ "Bundle %s referenced, but does not exist" %
+ bundle)
+
+ def bundle_names(self, bundle):
+ """ verify bundle name attribute matches filename """
+ try:
+ xdata = lxml.etree.XML(bundle.data)
+ except AttributeError:
+ # genshi template
+ xdata = lxml.etree.parse(bundle.template.filepath).getroot()
+
+ fname = bundle.name.split('Bundler/')[1].split('.')[0]
+ bname = xdata.get('name')
+ if fname != bname:
+ self.LintError("inconsistent-bundle-name",
+ "Inconsistent bundle name: filename is %s, "
+ "bundle name is %s" % (fname, bname))
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
index 3edd1d8cb..f02461673 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
@@ -6,8 +6,7 @@ from Bcfg2.Server.Plugins.Cfg import CfgGenerator
logger = logging.getLogger(__name__)
try:
- import Cheetah.Template
- import Cheetah.Parser
+ from Cheetah.Template import Template
have_cheetah = True
except ImportError:
have_cheetah = False
@@ -25,9 +24,9 @@ class CfgCheetahGenerator(CfgGenerator):
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
def get_data(self, entry, metadata):
- template = Cheetah.Template.Template(self.data,
- compilerSettings=self.settings)
+ template = Template(self.data.decode(self.encoding),
+ compilerSettings=self.settings)
template.metadata = metadata
template.path = entry.get('realname', entry.get('name'))
- template.source_path = self.path
+ template.source_path = self.name
return template.respond()
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py
new file mode 100644
index 000000000..a75329d2a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py
@@ -0,0 +1,14 @@
+import logging
+from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import CfgEncryptedGenerator
+
+logger = logging.getLogger(__name__)
+
+class CfgEncryptedCheetahGenerator(CfgCheetahGenerator, CfgEncryptedGenerator):
+ __extensions__ = ['cheetah.crypt', 'crypt.cheetah']
+
+ def handle_event(self, event):
+ CfgEncryptedGenerator.handle_event(self, event)
+
+ def get_data(self, entry, metadata):
+ return CfgCheetahGenerator.get_data(self, entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
new file mode 100644
index 000000000..2c926fae7
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
@@ -0,0 +1,63 @@
+import logging
+import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+try:
+ from Bcfg2.Encryption import ssl_decrypt, EVPError
+ have_crypto = True
+except ImportError:
+ have_crypto = False
+
+logger = logging.getLogger(__name__)
+
+def passphrases():
+ section = "encryption"
+ if SETUP.cfp.has_section(section):
+ return dict([(o, SETUP.cfp.get(section, o))
+ for o in SETUP.cfp.options(section)])
+ else:
+ return dict()
+
+def decrypt(crypted):
+ if not have_crypto:
+ msg = "Cfg: M2Crypto is not available: %s" % entry.get("name")
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ for passwd in passphrases().values():
+ try:
+ return ssl_decrypt(crypted, passwd)
+ except EVPError:
+ pass
+ raise EVPError("Failed to decrypt")
+
+class CfgEncryptedGenerator(CfgGenerator):
+ __extensions__ = ["crypt"]
+
+ def __init__(self, fname, spec, encoding):
+ CfgGenerator.__init__(self, fname, spec, encoding)
+ if not have_crypto:
+ msg = "Cfg: M2Crypto is not available: %s" % entry.get("name")
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ def handle_event(self, event):
+ if event.code2str() == 'deleted':
+ return
+ try:
+ crypted = open(self.name).read()
+ except UnicodeDecodeError:
+ crypted = open(self.name, mode='rb').read()
+ except:
+ logger.error("Failed to read %s" % self.name)
+ return
+ # todo: let the user specify a passphrase by name
+ try:
+ self.data = decrypt(crypted)
+ except EVPError:
+ msg = "Failed to decrypt %s" % self.name
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ def get_data(self, entry, metadata):
+ if self.data is None:
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to decrypt %s" % self.name)
+ return CfgGenerator.get_data(self, entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
new file mode 100644
index 000000000..6605cca7c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
@@ -0,0 +1,26 @@
+import logging
+from Bcfg2.Bcfg2Py3k import StringIO
+from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import decrypt, \
+ CfgEncryptedGenerator
+
+logger = logging.getLogger(__name__)
+
+try:
+ from genshi.template import TemplateLoader
+except ImportError:
+ # CfgGenshiGenerator will raise errors if genshi doesn't exist
+ TemplateLoader = object
+
+
+class EncryptedTemplateLoader(TemplateLoader):
+ def _instantiate(self, cls, fileobj, filepath, filename, encoding=None):
+ plaintext = StringIO(decrypt(fileobj.read()))
+ return TemplateLoader._instantiate(self, cls, plaintext, filepath,
+ filename, encoding=encoding)
+
+
+class CfgEncryptedGenshiGenerator(CfgGenshiGenerator):
+ __extensions__ = ['genshi.crypt', 'crypt.genshi']
+ __loader_cls__ = EncryptedTemplateLoader
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
index 2c0a076d7..277a26f97 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
@@ -1,5 +1,7 @@
+import re
import sys
import logging
+import traceback
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Cfg import CfgGenerator
@@ -8,8 +10,10 @@ logger = logging.getLogger(__name__)
try:
import genshi.core
from genshi.template import TemplateLoader, NewTextTemplate
+ from genshi.template.eval import UndefinedError
have_genshi = True
except ImportError:
+ TemplateLoader = None
have_genshi = False
# snipped from TGenshi
@@ -23,14 +27,17 @@ def removecomment(stream):
class CfgGenshiGenerator(CfgGenerator):
__extensions__ = ['genshi']
+ __loader_cls__ = TemplateLoader
+ pyerror_re = re.compile('<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>')
def __init__(self, fname, spec, encoding):
CfgGenerator.__init__(self, fname, spec, encoding)
- self.loader = TemplateLoader()
if not have_genshi:
- msg = "Cfg: Genshi is not available: %s" % entry.get("name")
+ msg = "Cfg: Genshi is not available: %s" % fname
logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ self.loader = self.__loader_cls__()
+ self.template = None
@classmethod
def ignore(cls, event, basename=None):
@@ -44,10 +51,63 @@ class CfgGenshiGenerator(CfgGenerator):
metadata=metadata,
path=self.name).filter(removecomment)
try:
- return stream.render('text', encoding=self.encoding,
- strip_whitespace=False)
- except TypeError:
- return stream.render('text', encoding=self.encoding)
+ try:
+ return stream.render('text', encoding=self.encoding,
+ strip_whitespace=False)
+ except TypeError:
+ return stream.render('text', encoding=self.encoding)
+ except UndefinedError:
+ # a failure in a genshi expression _other_ than %{ python ... %}
+ err = sys.exc_info()[1]
+ stack = traceback.extract_tb(sys.exc_info()[2])
+ for quad in stack:
+ if quad[0] == self.name:
+ logger.error("Cfg: Error rendering %s at %s: %s" %
+ (fname, quad[2], err))
+ break
+ raise
+ except:
+ # a failure in a %{ python ... %} block -- the snippet in
+ # the traceback is just the beginning of the block.
+ err = sys.exc_info()[1]
+ stack = traceback.extract_tb(sys.exc_info()[2])
+ (filename, lineno, func, text) = stack[-1]
+ # this is horrible, and I deeply apologize to whoever gets
+ # to maintain this after I go to the Great Beer Garden in
+ # the Sky. genshi is incredibly opaque about what's being
+ # executed, so the only way I can find to determine which
+ # {% python %} block is being executed -- if there are
+ # multiples -- is to iterate through them and match the
+ # snippet of the first line that's in the traceback with
+ # the first non-empty line of the block.
+ execs = [contents
+ for etype, contents, loc in self.template.stream
+ if etype == self.template.EXEC]
+ contents = None
+ if len(execs) == 1:
+ contents = execs[0]
+ elif len(execs) > 1:
+ match = pyerror_re.match(func)
+ if match:
+ firstline = match.group(0)
+ for pyblock in execs:
+ if pyblock.startswith(firstline):
+ contents = pyblock
+ break
+ # else, no EXEC blocks -- WTF?
+ if contents:
+ # we now have the bogus block, but we need to get the
+ # offending line. To get there, we do (line number
+ # given in the exception) - (firstlineno from the
+ # internal genshi code object of the snippet) + 1 =
+ # (line number of the line with an error within the
+ # block, with all multiple line breaks elided to a
+ # single line break)
+ real_lineno = lineno - contents.code.co_firstlineno
+ src = re.sub(r'\n\n+', '\n', contents.source).splitlines()
+ logger.error("Cfg: Error rendering %s at %s: %s" %
+ (fname, src[real_lineno], err))
+ raise
def handle_event(self, event):
if event.code2str() == 'deleted':
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
index 8e962efb4..956ebfe17 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
@@ -9,7 +9,7 @@ class CfgInfoXML(CfgInfo):
def __init__(self, path):
CfgInfo.__init__(self, path)
- self.infoxml = Bcfg2.Server.Plugin.InfoXML(path, noprio=True)
+ self.infoxml = Bcfg2.Server.Plugin.InfoXML(path)
def bind_info_to_entry(self, entry, metadata):
mdata = dict()
@@ -22,3 +22,9 @@ class CfgInfoXML(CfgInfo):
def handle_event(self, event):
self.infoxml.HandleEvent()
+
+ def _set_info(self, entry, info):
+ CfgInfo._set_info(self, entry, info)
+ if '__children__' in info:
+ for child in info['__children__']:
+ entry.append(child)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
index 54c17c6c5..85c13c1ac 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
@@ -7,6 +7,10 @@ logger = logging.getLogger(__name__)
class CfgLegacyInfo(CfgInfo):
__basenames__ = ['info', ':info']
+ def __init__(self, path):
+ CfgInfo.__init__(self, path)
+ self.path = path
+
def bind_info_to_entry(self, entry, metadata):
self._set_info(entry, self.metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index 6c7585993..fe993ab54 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -6,11 +6,11 @@ import sys
import stat
import pkgutil
import logging
-import binascii
import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import u_str
+from Bcfg2.Bcfg2Py3k import u_str, unicode, b64encode
+import Bcfg2.Server.Lint
logger = logging.getLogger(__name__)
@@ -113,7 +113,8 @@ class CfgInfo(CfgBaseFileMatcher):
def _set_info(self, entry, info):
for key, value in list(info.items()):
- entry.attrib.__setitem__(key, value)
+ if not key.startswith("__"):
+ entry.attrib.__setitem__(key, value)
class CfgVerifier(CfgBaseFileMatcher):
@@ -152,7 +153,19 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
global PROCESSORS
if PROCESSORS is None:
PROCESSORS = []
- for submodule in pkgutil.walk_packages(path=__path__):
+ if hasattr(pkgutil, 'walk_packages'):
+ submodules = pkgutil.walk_packages(path=__path__)
+ else:
+ #python 2.4
+ import glob
+ submodules = []
+ for path in __path__:
+ for submodule in glob.glob(os.path.join(path, "*.py")):
+ mod = '.'.join(submodule.split("/")[-1].split('.')[:-1])
+ if mod != '__init__':
+ submodules.append((None, mod, True))
+
+ for submodule in submodules:
module = getattr(__import__("%s.%s" %
(__name__,
submodule[1])).Server.Plugins.Cfg,
@@ -185,6 +198,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
return
elif action == 'changed':
self.entries[event.filename].handle_event(event)
+ return
elif action == 'deleted':
del self.entries[event.filename]
return
@@ -192,6 +206,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
logger.error("Could not process event %s for %s; ignoring" %
(action, event.filename))
+ def get_matching(self, metadata):
+ return [item for item in list(self.entries.values())
+ if (isinstance(item, CfgGenerator) and
+ item.specific.matches(metadata))]
+
def entry_init(self, event, proc):
if proc.__specific__:
Bcfg2.Server.Plugin.EntrySet.entry_init(
@@ -270,10 +289,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
if entry.get('encoding') == 'base64':
- data = binascii.b2a_base64(data)
+ data = b64encode(data)
else:
try:
- data = u_str(data, self.encoding)
+ if not isinstance(data, unicode):
+ data = u_str(data, self.encoding)
except UnicodeDecodeError:
msg = "Failed to decode %s: %s" % (entry.get('name'),
sys.exc_info()[1])
@@ -287,6 +307,10 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
logger.error("You need to specify base64 encoding for %s." %
entry.get('name'))
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ except TypeError:
+ # data is already unicode; newer versions of Cheetah
+ # seem to return unicode
+ pass
if data:
entry.text = data
@@ -298,7 +322,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
generators = [ent for ent in list(self.entries.values())
if (isinstance(ent, CfgGenerator) and
ent.specific.matches(metadata))]
- if not matching:
+ if not generators:
msg = "No base file found for %s" % entry.get('name')
logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
@@ -347,26 +371,26 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
if attr in new_entry]
if badattr:
# check for info files and inform user of their removal
- if os.path.exists(self.path + "/:info"):
- logger.info("Removing :info file and replacing with "
- "info.xml")
- os.remove(self.path + "/:info")
- if os.path.exists(self.path + "/info"):
- logger.info("Removing info file and replacing with "
- "info.xml")
- os.remove(self.path + "/info")
+ for ifile in ['info', ':info']:
+ info = os.path.join(self.path, ifile)
+ if os.path.exists(info):
+ logger.info("Removing %s and replacing with info.xml" %
+ info)
+ os.remove(info)
metadata_updates = {}
metadata_updates.update(self.metadata)
for attr in badattr:
metadata_updates[attr] = new_entry.get(attr)
infoxml = lxml.etree.Element('FileInfo')
infotag = lxml.etree.SubElement(infoxml, 'Info')
- [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \
- for attr in metadata_updates]
+ [infotag.attrib.__setitem__(attr, metadata_updates[attr])
+ for attr in metadata_updates]
ofile = open(self.path + "/info.xml", "w")
- ofile.write(lxml.etree.tostring(infoxml, pretty_print=True))
+ ofile.write(lxml.etree.tostring(infoxml, xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
ofile.close()
- self.debug_log("Wrote file %s" % (self.path + "/info.xml"),
+ self.debug_log("Wrote file %s" % os.path.join(self.path,
+ "info.xml"),
flag=log)
@@ -385,9 +409,22 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
SETUP = core.setup
if 'validate' not in SETUP:
- SETUP['validate'] = Bcfg2.Options.CFG_VALIDATION
+ SETUP.add_option('validate', Bcfg2.Options.CFG_VALIDATION)
SETUP.reparse()
+ def has_generator(self, entry, metadata):
+ """ return True if the given entry can be generated for the
+ given metadata; False otherwise """
+ if entry.get('name') not in self.entries:
+ return False
+
+ for ent in self.entries[entry.get('name')].entries.values():
+ if ent.__specific__ and not ent.specific.matches(metadata):
+ continue
+ if isinstance(ent, CfgGenerator):
+ return True
+ return False
+
def AcceptChoices(self, entry, metadata):
return self.entries[entry.get('name')].list_accept_choices(entry,
metadata)
@@ -396,3 +433,26 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
return self.entries[new_entry.get('name')].write_update(specific,
new_entry,
log)
+
+class CfgLint(Bcfg2.Server.Lint.ServerPlugin):
+ """ warn about usage of .cat and .diff files """
+
+ def Run(self):
+ for basename, entry in list(self.core.plugins['Cfg'].entries.items()):
+ self.check_entry(basename, entry)
+
+
+ @classmethod
+ def Errors(cls):
+ return {"cat-file-used":"warning",
+ "diff-file-used":"warning"}
+
+ def check_entry(self, basename, entry):
+ cfg = self.core.plugins['Cfg']
+ for basename, entry in list(cfg.entries.items()):
+ for fname, processor in entry.entries.items():
+ if self.HandlesFile(fname) and isinstance(processor, CfgFilter):
+ extension = fname.split(".")[-1]
+ self.LintError("%s-file-used" % extension,
+ "%s file used on %s: %s" %
+ (extension, basename, fname))
diff --git a/src/lib/Bcfg2/Server/Plugins/DBStats.py b/src/lib/Bcfg2/Server/Plugins/DBStats.py
index 999e078b9..63c590f0f 100644
--- a/src/lib/Bcfg2/Server/Plugins/DBStats.py
+++ b/src/lib/Bcfg2/Server/Plugins/DBStats.py
@@ -1,8 +1,8 @@
-import binascii
import difflib
import logging
import lxml.etree
import platform
+import sys
import time
try:
@@ -11,61 +11,47 @@ except ImportError:
pass
import Bcfg2.Server.Plugin
-import Bcfg2.Server.Reports.importscript
+from Bcfg2.Server.Reports.importscript import load_stat
from Bcfg2.Server.Reports.reports.models import Client
-import Bcfg2.Server.Reports.settings
-from Bcfg2.Server.Reports.updatefix import update_database
+from Bcfg2.Bcfg2Py3k import b64decode
+
# for debugging output only
logger = logging.getLogger('Bcfg2.Plugins.DBStats')
-class DBStats(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
+
+class DBStats(Bcfg2.Server.Plugin.ThreadedStatistics,
Bcfg2.Server.Plugin.PullSource):
name = 'DBStats'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
Bcfg2.Server.Plugin.PullSource.__init__(self)
self.cpath = "%s/Metadata/clients.xml" % datastore
self.core = core
- logger.debug("Searching for new models to add to the statistics database")
- try:
- update_database()
- except Exception:
- inst = sys.exc_info()[1]
- logger.debug(str(inst))
- logger.debug(str(type(inst)))
+ if not self.core.database_available:
+ raise Bcfg2.Server.Plugin.PluginInitError
def handle_statistic(self, metadata, data):
newstats = data.find("Statistics")
newstats.set('time', time.asctime(time.localtime()))
- # ick
- data = lxml.etree.tostring(newstats)
- ndx = lxml.etree.XML(data)
- e = lxml.etree.Element('Node', name=metadata.hostname)
- e.append(ndx)
- container = lxml.etree.Element("ConfigStatistics")
- container.append(e)
- # FIXME need to build a metadata interface to expose a list of clients
start = time.time()
for i in [1, 2, 3]:
try:
- Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clients_xml.xdata,
- container,
- self.core.encoding,
- 0,
- logger,
- True,
- platform.node())
+ load_stat(metadata,
+ newstats,
+ self.core.encoding,
+ 0,
+ logger,
+ True,
+ platform.node())
logger.info("Imported data for %s in %s seconds" \
% (metadata.hostname, time.time() - start))
return
except MultipleObjectsReturned:
e = sys.exc_info()[1]
- logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \
- (metadata.hostname, e))
+ logger.error("DBStats: MultipleObjectsReturned while "
+ "handling %s: %s" % (metadata.hostname, e))
logger.error("DBStats: Data is inconsistent")
break
except:
@@ -100,10 +86,10 @@ class DBStats(Bcfg2.Server.Plugin.Plugin,
if entry.reason.is_sensitive:
raise Bcfg2.Server.Plugin.PluginExecutionError
elif len(entry.reason.unpruned) != 0:
- ret.append('\n'.join(entry.reason.unpruned))
+ ret.append('\n'.join(entry.reason.unpruned))
elif entry.reason.current_diff != '':
if entry.reason.is_binary:
- ret.append(binascii.a2b_base64(entry.reason.current_diff))
+ ret.append(b64decode(entry.reason.current_diff))
else:
ret.append('\n'.join(difflib.restore(\
entry.reason.current_diff.split('\n'), 1)))
diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py
index b432474f2..90d9ecbe3 100644
--- a/src/lib/Bcfg2/Server/Plugins/Decisions.py
+++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py
@@ -14,6 +14,8 @@ class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')]
class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
+ basename_is_regex = True
+
def __init__(self, path, fam, encoding):
"""Container for decision specification files.
@@ -23,8 +25,7 @@ class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
- `encoding`: XML character encoding
"""
- pattern = '(white|black)list'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, '(white|black)list', path,
DecisionFile, encoding)
try:
fam.AddMonitor(path, self)
diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py
index 9b848baae..d3a1ee871 100644
--- a/src/lib/Bcfg2/Server/Plugins/Deps.py
+++ b/src/lib/Bcfg2/Server/Plugins/Deps.py
@@ -7,27 +7,10 @@ import Bcfg2.Server.Plugin
class DNode(Bcfg2.Server.Plugin.INode):
"""DNode provides supports for single predicate types for dependencies."""
- raw = {'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"}
- containers = ['Group']
-
- def __init__(self, data, idict, parent=None):
- self.data = data
- self.contents = {}
- if parent == None:
- self.predicate = lambda x, d: True
- else:
- predicate = parent.predicate
- if data.tag in list(self.raw.keys()):
- self.predicate = eval(self.raw[data.tag] %
- {'name': data.get('name')},
- {'predicate': predicate})
- else:
- raise Exception
- mytype = self.__class__
- self.children = []
+ def _load_children(self, data, idict):
for item in data.getchildren():
if item.tag in self.containers:
- self.children.append(mytype(item, idict, self))
+ self.children.append(self.__class__(item, idict, self))
else:
data = [(child.tag, child.get('name'))
for child in item.getchildren()]
diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
index 5beec7be0..632d586e8 100644
--- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py
+++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
@@ -7,23 +7,24 @@ the client """
import os
import sys
import errno
-import binascii
import lxml.etree
import Bcfg2.Options
+import Bcfg2.Server
import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import b64decode
probecode = """#!/usr/bin/env python
import os
import pwd
import grp
-import binascii
import lxml.etree
+from Bcfg2.Bcfg2Py3k import b64encode
path = "%s"
if not os.path.exists(path):
- print "%%s does not exist" %% path
+ print("%%s does not exist" %% path)
raise SystemExit(1)
stat = os.stat(path)
@@ -32,18 +33,10 @@ data = lxml.etree.Element("ProbedFileData",
owner=pwd.getpwuid(stat[4])[0],
group=grp.getgrgid(stat[5])[0],
perms=oct(stat[0] & 07777))
-data.text = binascii.b2a_base64(open(path).read())
-print lxml.etree.tostring(data)
+data.text = b64encode(open(path).read())
+print(lxml.etree.tostring(data, xml_declaration=False).decode('UTF-8'))
"""
-class FileProbesConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
- """ Config file handler for FileProbes """
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
-
-
class FileProbes(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Probing):
""" This module allows you to probe a client for a file, which is then
@@ -53,14 +46,15 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
the client """
name = 'FileProbes'
- experimental = True
__author__ = 'chris.a.st.pierre@gmail.com'
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Probing.__init__(self)
- self.config = FileProbesConfig(os.path.join(self.data, 'config.xml'),
- core.fam)
+ self.config = Bcfg2.Server.Plugin.StructFile(os.path.join(self.data,
+ 'config.xml'),
+ fam=core.fam,
+ should_monitor=True)
self.entries = dict()
self.probes = dict()
@@ -75,13 +69,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# do not probe for files that are already in Cfg and
# for which update is false; we can't possibly do
# anything with the data we get from such a probe
- try:
- if (entry.get('update', 'false').lower() == "false" and
- cfg.entries[path].get_pertinent_entries(entry,
- metadata)):
- continue
- except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError):
- pass
+ if (entry.get('update', 'false').lower() == "false" and
+ not cfg.has_generator(entry, metadata)):
+ continue
self.entries[metadata.hostname][path] = entry
probe = lxml.etree.Element('probe', name=path,
source=self.name,
@@ -102,7 +92,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
(data.get('name'), metadata.hostname))
else:
try:
- self.write_data(lxml.etree.XML(data.text), metadata)
+ self.write_data(lxml.etree.XML(data.text,
+ parser=Bcfg2.Server.XMLParser),
+ metadata)
except lxml.etree.XMLSyntaxError:
# if we didn't get XML back from the probe, assume
# it's an error message
@@ -111,23 +103,24 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
def write_data(self, data, metadata):
"""Write the probed file data to the bcfg2 specification."""
filename = data.get("name")
- contents = binascii.a2b_base64(data.text)
+ contents = b64decode(data.text)
entry = self.entries[metadata.hostname][filename]
cfg = self.core.plugins['Cfg']
specific = "%s.H_%s" % (os.path.basename(filename), metadata.hostname)
# we can't use os.path.join() for this because specific
# already has a leading /, which confuses os.path.join()
- fileloc = "%s%s" % (cfg.data, os.path.join(filename, specific))
+ fileloc = os.path.join(cfg.data,
+ os.path.join(filename, specific).lstrip("/"))
create = False
try:
cfg.entries[filename].bind_entry(entry, metadata)
- except Bcfg2.Server.Plugin.PluginExecutionError:
+ except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError):
create = True
# get current entry data
if entry.text and entry.get("encoding") == "base64":
- entrydata = binascii.a2b_base64(entry.text)
+ entrydata = b64decode(entry.text)
else:
entrydata = entry.text
@@ -135,7 +128,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
self.logger.info("Writing new probed file %s" % fileloc)
self.write_file(fileloc, contents)
self.verify_file(filename, contents, metadata)
- infoxml = os.path.join("%s%s" % (cfg.data, filename), "info.xml")
+ infoxml = os.path.join(cfg.data, filename.lstrip("/"), "info.xml")
self.write_infoxml(infoxml, entry, data)
elif entrydata == contents:
self.debug_log("Existing %s contents match probed contents" %
@@ -194,7 +187,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# get current entry data
if entry.get("encoding") == "base64":
- entrydata = binascii.a2b_base64(entry.text)
+ entrydata = b64decode(entry.text)
else:
entrydata = entry.text
if entrydata == contents:
@@ -206,8 +199,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
if os.path.exists(infoxml):
return
- self.logger.info("Writing info.xml at %s for %s" %
- (infoxml, data.get("name")))
+ self.logger.info("Writing %s for %s" % (infoxml, data.get("name")))
info = \
lxml.etree.Element("Info",
owner=data.get("owner",
@@ -222,8 +214,10 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
root = lxml.etree.Element("FileInfo")
root.append(info)
try:
- open(infoxml, "w").write(lxml.etree.tostring(root,
- pretty_print=True))
+ open(infoxml,
+ "w").write(lxml.etree.tostring(root,
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
except IOError:
err = sys.exc_info()[1]
self.logger.error("Could not write %s: %s" % (fileloc, err))
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
index 58b4d4afb..837f47279 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -1,6 +1,9 @@
+import os
import re
+import sys
import logging
import lxml.etree
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
class PackedDigitRange(object):
@@ -58,7 +61,7 @@ class PatternMap(object):
return self.groups
def process_re(self, name):
- match = self.re.match(name)
+ match = self.re.search(name)
if not match:
return None
ret = list()
@@ -70,17 +73,22 @@ class PatternMap(object):
ret.append(newg)
return ret
+ def __str__(self):
+ return "%s: %s %s" % (self.__class__.__name__, self.pattern,
+ self.groups)
+
-class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
__identifier__ = None
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
+ def __init__(self, filename, fam=None):
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam,
+ should_monitor=True)
self.patterns = []
self.logger = logging.getLogger(self.__class__.__name__)
def Index(self):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
self.patterns = []
for entry in self.xdata.xpath('//GroupPattern'):
try:
@@ -112,13 +120,42 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
class GroupPatterns(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Connector):
name = "GroupPatterns"
- experimental = True
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = PatternFile(self.data + '/config.xml',
- core.fam)
+ self.config = PatternFile(os.path.join(self.data, 'config.xml'),
+ fam=core.fam)
def get_additional_groups(self, metadata):
return self.config.process_patterns(metadata.hostname)
+
+
+class GroupPatternsLint(Bcfg2.Server.Lint.ServerPlugin):
+ def Run(self):
+ """ run plugin """
+ cfg = self.core.plugins['GroupPatterns'].config
+ for entry in cfg.xdata.xpath('//GroupPattern'):
+ groups = [g.text for g in entry.findall('Group')]
+ self.check(entry, groups, ptype='NamePattern')
+ self.check(entry, groups, ptype='NameRange')
+
+ @classmethod
+ def Errors(cls):
+ return {"pattern-fails-to-initialize":"error"}
+
+ def check(self, entry, groups, ptype="NamePattern"):
+ if ptype == "NamePattern":
+ pmap = lambda p: PatternMap(p, None, groups)
+ else:
+ pmap = lambda p: PatternMap(None, p, groups)
+
+ for el in entry.findall(ptype):
+ pat = el.text
+ try:
+ pmap(pat)
+ except:
+ err = sys.exc_info()[1]
+ self.LintError("pattern-fails-to-initialize",
+ "Failed to initialize %s %s for %s: %s" %
+ (ptype, pat, entry.get('pattern'), err))
diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
index e9c1c1cff..69b019160 100644
--- a/src/lib/Bcfg2/Server/Plugins/Hostbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
@@ -3,19 +3,24 @@ This file provides the Hostbase plugin.
It manages dns/dhcp/nis host information
"""
+from lxml.etree import Element, SubElement
import os
+import re
+from time import strftime
os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
-from lxml.etree import Element, SubElement
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
-from time import strftime
-from sets import Set
from django.template import Context, loader
from django.db import connection
-import re
# Compatibility imports
from Bcfg2.Bcfg2Py3k import StringIO
+try:
+ set
+except NameError:
+ # deprecated since python 2.6
+ from sets import Set as set
+
class Hostbase(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Structure,
@@ -383,7 +388,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
""")
hostbase = cursor.fetchall()
domains = [host[0].split(".", 1)[1] for host in hostbase]
- domains_set = Set(domains)
+ domains_set = set(domains)
domain_data = [(domain, domains.count(domain)) for domain in domains_set]
domain_data.sort()
@@ -393,7 +398,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
ips = cursor.fetchall()
three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
for ip in ips]
- three_octets_set = Set(three_octets)
+ three_octets_set = set(three_octets)
three_octets_data = [(octet, three_octets.count(octet)) \
for octet in three_octets_set]
three_octets_data.sort()
@@ -412,7 +417,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
append_data.append((three_octet, tuple(tosort)))
two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
- two_octets_set = Set(two_octets)
+ two_octets_set = set(two_octets)
two_octets_data = [(octet, two_octets.count(octet))
for octet in two_octets_set]
two_octets_data.sort()
@@ -446,7 +451,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
else:
if appenddata[0] == ip[0]:
simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
+ ips.append((appenddata[2], appenddata[0], set(namelist),
cnamelist, simple, appenddata[1]))
appenddata = ip
simple = True
@@ -455,7 +460,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
if ip[4]:
cnamelist.append(ip[4].split('.', 1)[0])
simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
+ ips.append((appenddata[2], appenddata[0], set(namelist),
cnamelist, simple, appenddata[1]))
context = Context({
'subnet': subnet[0],
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index 29abf5b13..9883085db 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -139,7 +139,7 @@ class LdapConnection(object):
result = self.conn.search_s(
query.base,
SCOPE_MAP[query.scope],
- query.filter,
+ query.filter.replace("\\", "\\\\"),
query.attrs,
)
break
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index 970126b80..a5fa78088 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -2,17 +2,39 @@
This file stores persistent metadata for the Bcfg2 Configuration Repository.
"""
-import copy
-import fcntl
-import lxml.etree
+import re
import os
-import os.path
-import socket
import sys
import time
-
-import Bcfg2.Server.FileMonitor
+import copy
+import fcntl
+import socket
+import lxml.etree
+import Bcfg2.Server
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
+from Bcfg2.Bcfg2Py3k import MutableMapping
+from Bcfg2.version import Bcfg2VersionInfo
+
+try:
+ from django.db import models
+ has_django = True
+except ImportError:
+ has_django = False
+
+
+try:
+ all
+except NameError:
+ # some of the crazy lexical closure stuff below works with all()
+ # but not with this loop inline. i really don't understand
+ # lexical closures some^Wmost days
+ def all(iterable):
+ for element in iterable:
+ if not element:
+ return False
+ return True
def locked(fd):
@@ -24,28 +46,68 @@ def locked(fd):
return False
-class MetadataConsistencyError(Exception):
- """This error gets raised when metadata is internally inconsistent."""
- pass
+if has_django:
+ class MetadataClientModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255, primary_key=True)
+ version = models.CharField(max_length=31, null=True)
+ class ClientVersions(MutableMapping):
+ def __getitem__(self, key):
+ try:
+ return MetadataClientModel.objects.get(hostname=key).version
+ except MetadataClientModel.DoesNotExist:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ client = MetadataClientModel.objects.get_or_create(hostname=key)[0]
+ client.version = value
+ client.save()
+
+ def __delitem__(self, key):
+ # UserDict didn't require __delitem__, but MutableMapping
+ # does. we don't want deleting a client version record to
+ # delete the client, so we just set the version to None,
+ # which is kinda like deleting it, but not really.
+ try:
+ client = MetadataClientModel.objects.get(hostname=key)
+ except MetadataClientModel.DoesNotExist:
+ raise KeyError(key)
+ client.version = None
+ client.save()
-class MetadataRuntimeError(Exception):
- """This error is raised when the metadata engine
- is called prior to reading enough data.
- """
- pass
+ def __len__(self):
+ return MetadataClientModel.objects.count()
+ def __iter__(self):
+ for client in MetadataClientModel.objects.all():
+ yield client.hostname
-class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+ def keys(self):
+ return [c.hostname for c in MetadataClientModel.objects.all()]
+
+ def __contains__(self, key):
+ try:
+ client = MetadataClientModel.objects.get(hostname=key)
+ return True
+ except MetadataClientModel.DoesNotExist:
+ return False
+
+
+class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
"""Handles xml config files and all XInclude statements"""
def __init__(self, metadata, watch_clients, basefile):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
- os.path.join(metadata.data,
- basefile),
- metadata.core.fam)
+ # we tell XMLFileBacked _not_ to add a monitor for this file,
+ # because the main Metadata plugin has already added one.
+ # then we immediately set should_monitor to the proper value,
+ # so that XInclude'd files get properly watched
+ fpath = os.path.join(metadata.data, basefile)
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath,
+ fam=metadata.core.fam,
+ should_monitor=False)
+ self.should_monitor = watch_clients
self.metadata = metadata
self.basefile = basefile
- self.should_monitor = watch_clients
self.data = None
self.basedata = None
self.basedir = metadata.data
@@ -56,25 +118,22 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
@property
def xdata(self):
if not self.data:
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" %
+ self.basefile)
return self.data
@property
def base_xdata(self):
if not self.basedata:
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" %
+ self.basefile)
return self.basedata
- def add_monitor(self, fpath, fname):
- """Add a fam monitor for an included file"""
- if self.should_monitor:
- self.metadata.core.fam.AddMonitor(fpath, self.metadata)
- self.extras.append(fname)
-
def load_xml(self):
"""Load changes from XML"""
try:
- xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile))
+ xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile),
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
self.logger.error('Failed to parse %s' % self.basefile)
return
@@ -100,12 +159,14 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
try:
datafile = open(tmpfile, 'w')
except IOError:
- e = sys.exc_info()[1]
- self.logger.error("Failed to write %s: %s" % (tmpfile, e))
- raise MetadataRuntimeError
+ msg = "Failed to write %s: %s" % (tmpfile, sys.exc_info()[1])
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
# prep data
dataroot = xmltree.getroot()
- newcontents = lxml.etree.tostring(dataroot, pretty_print=True)
+ newcontents = lxml.etree.tostring(dataroot, xml_declaration=False,
+ pretty_print=True).decode('UTF-8')
+
fd = datafile.fileno()
while locked(fd) == True:
@@ -114,21 +175,24 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
datafile.write(newcontents)
except:
fcntl.lockf(fd, fcntl.LOCK_UN)
- self.logger.error("Metadata: Failed to write new xml data to %s" %
- tmpfile, exc_info=1)
+ msg = "Metadata: Failed to write new xml data to %s: %s" % \
+ (tmpfile, sys.exc_info()[1])
+ self.logger.error(msg, exc_info=1)
os.unlink(tmpfile)
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
datafile.close()
-
# check if clients.xml is a symlink
if os.path.islink(fname):
fname = os.readlink(fname)
try:
os.rename(tmpfile, fname)
+
except:
- self.logger.error("Metadata: Failed to rename %s" % tmpfile)
- raise MetadataRuntimeError
+ msg = "Metadata: Failed to rename %s: %s" % (tmpfile,
+ sys.exc_info()[1])
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
def find_xml_for_xpath(self, xpath):
"""Find and load xml file containing the xpath query"""
@@ -144,22 +208,26 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
"""Try to find the data in included files"""
for included in self.extras:
try:
- xdata = lxml.etree.parse(os.path.join(self.basedir,
- included))
+ xdata = lxml.etree.parse(included,
+ parser=Bcfg2.Server.XMLParser)
cli = xdata.xpath(xpath)
if len(cli) > 0:
- return {'filename': os.path.join(self.basedir,
- included),
+ return {'filename': included,
'xmltree': xdata,
'xquery': cli}
except lxml.etree.XMLSyntaxError:
- self.logger.error('Failed to parse %s' % (included))
+ self.logger.error('Failed to parse %s' % included)
return {}
+ def add_monitor(self, fpath):
+ self.extras.append(fpath)
+ if self.fam and self.should_monitor:
+ self.fam.AddMonitor(fpath, self.metadata)
+
def HandleEvent(self, event):
"""Handle fam events"""
- filename = event.filename.split('/')[-1]
- if filename in self.extras:
+ filename = os.path.basename(event.filename)
+ if event.filename in self.extras:
if event.code2str() == 'exists':
return False
elif filename != self.basefile:
@@ -172,8 +240,8 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
class ClientMetadata(object):
"""This object contains client metadata."""
- def __init__(self, client, profile, groups, bundles,
- aliases, addresses, categories, uuid, password, query):
+ def __init__(self, client, profile, groups, bundles, aliases, addresses,
+ categories, uuid, password, version, query):
self.hostname = client
self.profile = profile
self.bundles = bundles
@@ -184,6 +252,11 @@ class ClientMetadata(object):
self.uuid = uuid
self.password = password
self.connectors = []
+ self.version = version
+ try:
+ self.version_info = Bcfg2VersionInfo(version)
+ except:
+ self.version_info = None
self.query = query
def inGroup(self, group):
@@ -198,7 +271,8 @@ class ClientMetadata(object):
class MetadataQuery(object):
- def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category):
+ def __init__(self, by_name, get_clients, by_groups, by_profiles,
+ all_groups, all_groups_in_category):
# resolver is set later
self.by_name = by_name
self.names_by_groups = by_groups
@@ -217,74 +291,125 @@ class MetadataQuery(object):
return [self.by_name(name) for name in self.all_clients()]
-class Metadata(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Metadata,
- Bcfg2.Server.Plugin.Statistics):
+class MetadataGroup(tuple):
+ def __new__(cls, name, bundles=None, category=None,
+ is_profile=False, is_public=False, is_private=False):
+ if bundles is None:
+ bundles = set()
+ return tuple.__new__(cls, (bundles, category))
+
+ def __init__(self, name, bundles=None, category=None,
+ is_profile=False, is_public=False, is_private=False):
+ if bundles is None:
+ bundles = set()
+ tuple.__init__(self)
+ self.name = name
+ self.bundles = bundles
+ self.category = category
+ self.is_profile = is_profile
+ self.is_public = is_public
+ self.is_private = is_private
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ return "%s %s (bundles=%s, category=%s)" % \
+ (self.__class__.__name__, self.name, self.bundles,
+ self.category)
+
+ def __hash__(self):
+ return hash(self.name)
+
+class Metadata(Bcfg2.Server.Plugin.Metadata,
+ Bcfg2.Server.Plugin.Statistics,
+ Bcfg2.Server.Plugin.DatabaseBacked):
"""This class contains data for bcfg2 server metadata."""
__author__ = 'bcfg-dev@mcs.anl.gov'
name = "Metadata"
sort_order = 500
def __init__(self, core, datastore, watch_clients=True):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Metadata.__init__(self)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- if watch_clients:
- try:
- core.fam.AddMonitor(os.path.join(self.data, "groups.xml"), self)
- core.fam.AddMonitor(os.path.join(self.data, "clients.xml"), self)
- except:
- print("Unable to add file monitor for groups.xml or clients.xml")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- self.clients_xml = XMLMetadataConfig(self, watch_clients, 'clients.xml')
- self.groups_xml = XMLMetadataConfig(self, watch_clients, 'groups.xml')
- self.states = {}
- if watch_clients:
- self.states = {"groups.xml": False,
- "clients.xml": False}
- self.addresses = {}
+ Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
+ self.watch_clients = watch_clients
+ self.states = dict()
+ self.extra = dict()
+ self.handlers = []
+ self._handle_file("groups.xml")
+ if (self._use_db and
+ os.path.exists(os.path.join(self.data, "clients.xml"))):
+ self.logger.warning("Metadata: database enabled but clients.xml"
+ "found, parsing in compatibility mode")
+ self._handle_file("clients.xml")
+ elif not self._use_db:
+ self._handle_file("clients.xml")
+
+ # mapping of clientname -> authtype
self.auth = dict()
- self.clients = {}
- self.aliases = {}
- self.groups = {}
- self.cgroups = {}
- self.public = []
- self.private = []
- self.profiles = []
- self.categories = {}
- self.bad_clients = {}
- self.uuid = {}
+ # list of clients required to have non-global password
self.secure = []
+ # list of floating clients
self.floating = []
+ # mapping of clientname -> password
self.passwords = {}
+ self.addresses = {}
+ self.raddresses = {}
+ # mapping of clientname -> [groups]
+ self.clientgroups = {}
+ # list of clients
+ self.clients = []
+ self.aliases = {}
+ self.raliases = {}
+ # mapping of groupname -> MetadataGroup object
+ self.groups = {}
+ # mappings of predicate -> MetadataGroup object
+ self.group_membership = dict()
+ self.negated_groups = dict()
+ # mapping of hostname -> version string
+ if self._use_db:
+ self.versions = ClientVersions()
+ else:
+ self.versions = dict()
+ self.uuid = {}
self.session_cache = {}
self.default = None
self.pdirty = False
- self.extra = {'groups.xml': [],
- 'clients.xml': []}
self.password = core.password
self.query = MetadataQuery(core.build_metadata,
- lambda: list(self.clients.keys()),
+ lambda: list(self.clients),
self.get_client_names_by_groups,
self.get_client_names_by_profiles,
self.get_all_group_names,
self.get_all_groups_in_category)
@classmethod
- def init_repo(cls, repo, groups, os_selection, clients):
- path = os.path.join(repo, cls.name)
- os.makedirs(path)
- open(os.path.join(repo, "Metadata", "groups.xml"),
- "w").write(groups % os_selection)
- open(os.path.join(repo, "Metadata", "clients.xml"),
- "w").write(clients % socket.getfqdn())
-
- def get_groups(self):
- '''return groups xml tree'''
- groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
- root = groups_tree.getroot()
- return root
+ def init_repo(cls, repo, **kwargs):
+ # must use super here; inheritance works funny with class methods
+ super(Metadata, cls).init_repo(repo)
+
+ for fname in ["clients.xml", "groups.xml"]:
+ aname = re.sub(r'[^A-z0-9_]', '_', fname)
+ if aname in kwargs:
+ open(os.path.join(repo, cls.name, fname),
+ "w").write(kwargs[aname])
+
+ def _handle_file(self, fname):
+ if self.watch_clients:
+ try:
+ self.core.fam.AddMonitor(os.path.join(self.data, fname), self)
+ except:
+ err = sys.exc_info()[1]
+ msg = "Unable to add file monitor for %s: %s" % (fname, err)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+ self.states[fname] = False
+ aname = re.sub(r'[^A-z0-9_]', '_', fname)
+ xmlcfg = XMLMetadataConfig(self, self.watch_clients, fname)
+ setattr(self, aname, xmlcfg)
+ self.handlers.append(xmlcfg.HandleEvent)
+ self.extra[fname] = []
def _search_xdata(self, tag, name, tree, alias=False):
for node in tree.findall("//%s" % tag):
@@ -312,7 +437,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
node = self._search_xdata(tag, name, config.xdata, alias=alias)
if node != None:
self.logger.error("%s \"%s\" already exists" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
element = lxml.etree.SubElement(config.base_xdata.getroot(),
tag, name=name)
if attribs:
@@ -322,70 +447,130 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
def add_group(self, group_name, attribs):
"""Add group to groups.xml."""
- return self._add_xdata(self.groups_xml, "Group", group_name,
- attribs=attribs)
+ if self._use_db:
+ msg = "Metadata does not support adding groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._add_xdata(self.groups_xml, "Group", group_name,
+ attribs=attribs)
def add_bundle(self, bundle_name):
"""Add bundle to groups.xml."""
- return self._add_xdata(self.groups_xml, "Bundle", bundle_name)
+ if self._use_db:
+ msg = "Metadata does not support adding bundles with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._add_xdata(self.groups_xml, "Bundle", bundle_name)
- def add_client(self, client_name, attribs):
+ def add_client(self, client_name, attribs=None):
"""Add client to clients.xml."""
- return self._add_xdata(self.clients_xml, "Client", client_name,
- attribs=attribs, alias=True)
+ if attribs is None:
+ attribs = dict()
+ if self._use_db:
+ client = MetadataClientModel(hostname=client_name)
+ client.save()
+ self.clients = self.list_clients()
+ return client
+ else:
+ return self._add_xdata(self.clients_xml, "Client", client_name,
+ attribs=attribs, alias=True)
def _update_xdata(self, config, tag, name, attribs, alias=False):
node = self._search_xdata(tag, name, config.xdata, alias=alias)
if node == None:
self.logger.error("%s \"%s\" does not exist" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
(tag, node.get('name')))
if not xdict:
self.logger.error("Unexpected error finding %s \"%s\"" %
(tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
for key, val in list(attribs.items()):
xdict['xquery'][0].set(key, val)
config.write_xml(xdict['filename'], xdict['xmltree'])
def update_group(self, group_name, attribs):
"""Update a groups attributes."""
- return self._update_xdata(self.groups_xml, "Group", group_name, attribs)
+ if self._use_db:
+ msg = "Metadata does not support updating groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._update_xdata(self.groups_xml, "Group", group_name,
+ attribs)
def update_client(self, client_name, attribs):
"""Update a clients attributes."""
- return self._update_xdata(self.clients_xml, "Client", client_name,
- attribs, alias=True)
+ if self._use_db:
+ msg = "Metadata does not support updating clients with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._update_xdata(self.clients_xml, "Client", client_name,
+ attribs, alias=True)
+
+ def list_clients(self):
+ """ List all clients in client database """
+ if self._use_db:
+ return set([c.hostname for c in MetadataClientModel.objects.all()])
+ else:
+ return self.clients
def _remove_xdata(self, config, tag, name, alias=False):
node = self._search_xdata(tag, name, config.xdata)
if node == None:
self.logger.error("%s \"%s\" does not exist" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
(tag, node.get('name')))
if not xdict:
self.logger.error("Unexpected error finding %s \"%s\"" %
(tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict['xquery'][0].getparent().remove(xdict['xquery'][0])
- self.groups_xml.write_xml(xdict['filename'], xdict['xmltree'])
+ config.write_xml(xdict['filename'], xdict['xmltree'])
def remove_group(self, group_name):
"""Remove a group."""
- return self._remove_xdata(self.groups_xml, "Group", group_name)
+ if self._use_db:
+ msg = "Metadata does not support removing groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._remove_xdata(self.groups_xml, "Group", group_name)
def remove_bundle(self, bundle_name):
"""Remove a bundle."""
- return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
+ if self._use_db:
+ msg = "Metadata does not support removing bundles with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
+
+ def remove_client(self, client_name):
+ """Remove a bundle."""
+ if self._use_db:
+ try:
+ client = MetadataClientModel.objects.get(hostname=client_name)
+ except MetadataClientModel.DoesNotExist:
+ msg = "Client %s does not exist" % client_name
+ self.logger.warning(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ client.delete()
+ self.clients = self.list_clients()
+ else:
+ return self._remove_xdata(self.clients_xml, "Client", client_name)
def _handle_clients_xml_event(self, event):
xdata = self.clients_xml.xdata
- self.clients = {}
+ self.clients = []
+ self.clientgroups = {}
self.aliases = {}
self.raliases = {}
- self.bad_clients = {}
self.secure = []
self.floating = []
self.addresses = {}
@@ -406,12 +591,15 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
'cert+password')
if 'uuid' in client.attrib:
self.uuid[client.get('uuid')] = clname
- if client.get('secure', 'false') == 'true':
+ if client.get('secure', 'false').lower() == 'true':
self.secure.append(clname)
- if client.get('location', 'fixed') == 'floating':
+ if (client.get('location', 'fixed') == 'floating' or
+ client.get('floating', 'false').lower() == 'true'):
self.floating.append(clname)
if 'password' in client.attrib:
self.passwords[clname] = client.get('password')
+ if 'version' in client.attrib:
+ self.versions[clname] = client.get('version')
self.raliases[clname] = set()
for alias in client.findall('Alias'):
@@ -426,115 +614,199 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
if clname not in self.raddresses:
self.raddresses[clname] = set()
self.raddresses[clname].add(alias.get('address'))
- self.clients.update({clname: client.get('profile')})
+ self.clients.append(clname)
+ try:
+ self.clientgroups[clname].append(client.get('profile'))
+ except KeyError:
+ self.clientgroups[clname] = [client.get('profile')]
self.states['clients.xml'] = True
+ if self._use_db:
+ self.clients = self.list_clients()
def _handle_groups_xml_event(self, event):
- xdata = self.groups_xml.xdata
- self.public = []
- self.private = []
- self.profiles = []
self.groups = {}
- grouptmp = {}
- self.categories = {}
- groupseen = list()
- for group in xdata.xpath('//Groups/Group'):
- if group.get('name') not in groupseen:
- groupseen.append(group.get('name'))
+
+ # get_condition and aggregate_conditions must be separate
+ # functions in order to ensure that the scope is right for the
+ # closures they return
+ def get_condition(element):
+ negate = element.get('negate', 'false').lower() == 'true'
+ pname = element.get("name")
+ if element.tag == 'Group':
+ return lambda c, g, _: negate != (pname in g)
+ elif element.tag == 'Client':
+ return lambda c, g, _: negate != (pname == c)
+
+ def aggregate_conditions(conditions):
+ return lambda client, groups, cats: \
+ all(cond(client, groups, cats) for cond in conditions)
+
+ # first, we get a list of all of the groups declared in the
+ # file. we do this in two stages because the old way of
+ # parsing groups.xml didn't support nested groups; in the old
+ # way, only Group tags under a Groups tag counted as
+ # declarative. so we parse those first, and then parse the
+ # other Group tags if they haven't already been declared.
+ # this lets you set options on a group (e.g., public="false")
+ # at the top level and then just use the name elsewhere, which
+ # is the original behavior
+ for grp in self.groups_xml.xdata.xpath("//Groups/Group") + \
+ self.groups_xml.xdata.xpath("//Groups/Group//Group"):
+ if grp.get("name") in self.groups:
+ continue
+ self.groups[grp.get("name")] = \
+ MetadataGroup(grp.get("name"),
+ bundles=[b.get("name")
+ for b in grp.findall("Bundle")],
+ category=grp.get("category"),
+ is_profile=grp.get("profile", "false") == "true",
+ is_public=grp.get("public", "false") == "true",
+ is_private=grp.get("public", "true") == "false")
+ if grp.get('default', 'false') == 'true':
+ self.default = grp.get('name')
+
+ self.group_membership = dict()
+ self.negated_groups = dict()
+ self.options = dict()
+ # confusing loop condition; the XPath query asks for all
+ # elements under a Group tag under a Groups tag; that is
+ # infinitely recursive, so "all" elements really means _all_
+ # elements. We then manually filter out non-Group elements
+ # since there doesn't seem to be a way to get Group elements
+ # of arbitrary depth with particular ultimate ancestors in
+ # XPath. We do the same thing for Client tags.
+ for el in self.groups_xml.xdata.xpath("//Groups/Group//*") + \
+ self.groups_xml.xdata.xpath("//Groups/Client//*"):
+ if ((el.tag != 'Group' and el.tag != 'Client') or
+ el.getchildren()):
+ continue
+
+ conditions = []
+ for parent in el.iterancestors():
+ cond = get_condition(parent)
+ if cond:
+ conditions.append(cond)
+
+ gname = el.get("name")
+ if el.get("negate", "false").lower() == "true":
+ self.negated_groups[aggregate_conditions(conditions)] = \
+ self.groups[gname]
else:
- self.logger.error("Metadata: Group %s defined multiply" %
- group.get('name'))
- grouptmp[group.get('name')] = \
- ([item.get('name') for item in group.findall('./Bundle')],
- [item.get('name') for item in group.findall('./Group')])
- grouptmp[group.get('name')][1].append(group.get('name'))
- if group.get('default', 'false') == 'true':
- self.default = group.get('name')
- if group.get('profile', 'false') == 'true':
- self.profiles.append(group.get('name'))
- if group.get('public', 'false') == 'true':
- self.public.append(group.get('name'))
- elif group.get('public', 'true') == 'false':
- self.private.append(group.get('name'))
- if 'category' in group.attrib:
- self.categories[group.get('name')] = group.get('category')
-
- for group in grouptmp:
- # self.groups[group] => (bundles, groups, categories)
- self.groups[group] = (set(), set(), {})
- tocheck = [group]
- group_cat = self.groups[group][2]
- while tocheck:
- now = tocheck.pop()
- self.groups[group][1].add(now)
- if now in grouptmp:
- (bundles, groups) = grouptmp[now]
- for ggg in groups:
- if ggg in self.groups[group][1]:
- continue
- if (ggg not in self.categories or \
- self.categories[ggg] not in self.groups[group][2]):
- self.groups[group][1].add(ggg)
- tocheck.append(ggg)
- if ggg in self.categories:
- group_cat[self.categories[ggg]] = ggg
- elif ggg in self.categories:
- self.logger.info("Group %s: %s cat-suppressed %s" % \
- (group,
- group_cat[self.categories[ggg]],
- ggg))
- [self.groups[group][0].add(bund) for bund in bundles]
+ if self.groups[gname].category and gname in self.groups:
+ category = self.groups[gname].category
+
+ def in_cat(client, groups, categories):
+ if category in categories:
+ # this is debug, not warning, because it
+ # gets called a _lot_ -- every time a
+ # group in a category is processed for
+ # every creation of client metadata. this
+ # message is produced in two other places,
+ # so the user should get warned by one of
+ # those.
+ self.logger.debug("%s: Group %s suppressed by "
+ "category %s; %s already a "
+ "member of %s" %
+ (self.name, gname, category,
+ client, categories[category]))
+ return False
+ return True
+ conditions.append(in_cat)
+
+ self.group_membership[aggregate_conditions(conditions)] = \
+ self.groups[gname]
self.states['groups.xml'] = True
def HandleEvent(self, event):
"""Handle update events for data files."""
- if self.clients_xml.HandleEvent(event):
- self._handle_clients_xml_event(event)
- elif self.groups_xml.HandleEvent(event):
- self._handle_groups_xml_event(event)
-
- if False not in list(self.states.values()):
- # check that all client groups are real and complete
- real = list(self.groups.keys())
- for client in list(self.clients.keys()):
- if self.clients[client] not in self.profiles:
- self.logger.error("Client %s set as nonexistent or "
- "incomplete group %s" %
- (client, self.clients[client]))
- self.logger.error("Removing client mapping for %s" % client)
- self.bad_clients[client] = self.clients[client]
- del self.clients[client]
- for bclient in list(self.bad_clients.keys()):
- if self.bad_clients[bclient] in self.profiles:
- self.logger.info("Restored profile mapping for client %s" %
- bclient)
- self.clients[bclient] = self.bad_clients[bclient]
- del self.bad_clients[bclient]
-
- def set_profile(self, client, profile, addresspair):
+ for hdlr in self.handlers:
+ aname = re.sub(r'[^A-z0-9_]', '_', os.path.basename(event.filename))
+ if hdlr(event):
+ try:
+ proc = getattr(self, "_handle_%s_event" % aname)
+ except AttributeError:
+ proc = self._handle_default_event
+ proc(event)
+
+ if False not in list(self.states.values()) and self.debug_flag:
+ # check that all groups are real and complete. this is
+ # just logged at a debug level because many groups might
+ # be probed, and we don't want to warn about them.
+ for client, groups in list(self.clientgroups.items()):
+ for group in groups:
+ if group not in self.groups:
+ self.debug_log("Client %s set as nonexistent group %s" %
+ (client, group))
+ for gname, ginfo in list(self.groups.items()):
+ for group in ginfo.groups:
+ if group not in self.groups:
+ self.debug_log("Group %s set as nonexistent group %s" %
+ (gname, group))
+
+
+ def set_profile(self, client, profile, addresspair, force=False):
"""Set group parameter for provided client."""
- self.logger.info("Asserting client %s profile to %s" % (client, profile))
+ self.logger.info("Asserting client %s profile to %s" %
+ (client, profile))
if False in list(self.states.values()):
- raise MetadataRuntimeError
- if profile not in self.public:
- self.logger.error("Failed to set client %s to private group %s" %
- (client, profile))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet")
+ if not force and profile not in self.groups:
+ msg = "Profile group %s does not exist" % profile
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ group = self.groups[profile]
+ if not force and not group.is_public:
+ msg = "Cannot set client %s to private group %s" % (client, profile)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+
if client in self.clients:
- self.logger.info("Changing %s group from %s to %s" %
- (client, self.clients[client], profile))
+ if self._use_db:
+ msg = "DBMetadata does not support asserting client profiles"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ profiles = [g for g in self.clientgroups[client]
+ if g in self.groups and self.groups[g].is_profile]
+ self.logger.info("Changing %s profile from %s to %s" %
+ (client, profiles, profile))
self.update_client(client, dict(profile=profile))
+ if client in self.clientgroups:
+ for p in profiles:
+ self.clientgroups[client].remove(p)
+ self.clientgroups[client].append(profile)
+ else:
+ self.clientgroups[client] = [profile]
else:
self.logger.info("Creating new client: %s, profile %s" %
(client, profile))
- if addresspair in self.session_cache:
- # we are working with a uuid'd client
- self.add_client(self.session_cache[addresspair][1],
- dict(uuid=client, profile=profile,
- address=addresspair[0]))
+ if self._use_db:
+ self.add_client(client)
else:
- self.add_client(client, dict(profile=profile))
- self.clients[client] = profile
+ if addresspair in self.session_cache:
+ # we are working with a uuid'd client
+ self.add_client(self.session_cache[addresspair][1],
+ dict(uuid=client, profile=profile,
+ address=addresspair[0]))
+ else:
+ self.add_client(client, dict(profile=profile))
+ self.clients.append(client)
+ self.clientgroups[client] = [profile]
+ if not self._use_db:
+ self.clients_xml.write()
+
+ def set_version(self, client, version):
+ """Set group parameter for provided client."""
+ self.logger.info("Setting client %s version to %s" % (client, version))
+ if client in self.clients:
+ self.logger.info("Setting version on client %s to %s" %
+ (client, version))
+ self.update_client(client, dict(version=version))
+ else:
+ msg = "Cannot set version on non-existent client %s" % client
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ self.versions[client] = version
self.clients_xml.write()
def resolve_client(self, addresspair, cleanup_cache=False):
@@ -549,7 +821,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
# _any_ port numbers - perhaps a priority queue could
# be faster?
curtime = time.time()
- for addrpair in self.session_cache.keys():
+ for addrpair in list(self.session_cache.keys()):
if addresspair[0] == addrpair[0]:
(stamp, _) = self.session_cache[addrpair]
if curtime - stamp > cache_ttl:
@@ -565,9 +837,9 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
address = addresspair[0]
if address in self.addresses:
if len(self.addresses[address]) != 1:
- self.logger.error("Address %s has multiple reverse assignments; "
- "a uuid must be used" % (address))
- raise MetadataConsistencyError
+ err = "Address %s has multiple reverse assignments; a uuid must be used" % address
+ self.logger.error(err)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(err)
return self.addresses[address][0]
try:
cname = socket.gethostbyaddr(address)[0].lower()
@@ -575,34 +847,102 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
return self.aliases[cname]
return cname
except socket.herror:
- warning = "address resolution error for %s" % (address)
+ warning = "address resolution error for %s" % address
self.logger.warning(warning)
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(warning)
+
+ def _merge_groups(self, client, groups, categories=None):
+ """ set group membership based on the contents of groups.xml
+ and initial group membership of this client. Returns a tuple
+ of (allgroups, categories)"""
+ numgroups = -1 # force one initial pass
+ if categories is None:
+ categories = dict()
+ while numgroups != len(groups):
+ numgroups = len(groups)
+ for predicate, group in self.group_membership.items():
+ if group.name in groups:
+ continue
+ if predicate(client, groups, categories):
+ groups.add(group.name)
+ if group.category:
+ categories[group.category] = group.name
+ for predicate, group in self.negated_groups.items():
+ if group.name not in groups:
+ continue
+ if predicate(client, groups, categories):
+ groups.remove(group.name)
+ if group.category:
+ del categories[group.category]
+ return (groups, categories)
def get_initial_metadata(self, client):
"""Return the metadata for a given client."""
if False in list(self.states.values()):
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet")
client = client.lower()
if client in self.aliases:
client = self.aliases[client]
- if client in self.clients:
- profile = self.clients[client]
- (bundles, groups, categories) = self.groups[profile]
- else:
- if self.default == None:
- self.logger.error("Cannot set group for client %s; "
- "no default group set" % client)
- raise MetadataConsistencyError
- self.set_profile(client, self.default, (None, None))
- profile = self.default
- [bundles, groups, categories] = self.groups[self.default]
+
+ groups = set()
+ categories = dict()
+ profile = None
+
+ if client not in self.clients:
+ pgroup = None
+ if client in self.clientgroups:
+ pgroup = self.clientgroups[client][0]
+ elif self.default:
+ pgroup = self.default
+
+ if pgroup:
+ self.set_profile(client, pgroup, (None, None), force=True)
+ groups.add(pgroup)
+ category = self.groups[pgroup].category
+ if category:
+ categories[category] = pgroup
+ if (pgroup in self.groups and self.groups[pgroup].is_profile):
+ profile = pgroup
+ else:
+ msg = "Cannot add new client %s; no default group set" % client
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+
+ if client in self.clientgroups:
+ for cgroup in self.clientgroups[client]:
+ if cgroup in groups:
+ continue
+ if cgroup not in self.groups:
+ self.groups[cgroup] = MetadataGroup(cgroup)
+ category = self.groups[cgroup].category
+ if category and category in categories:
+ self.logger.warning("%s: Group %s suppressed by "
+ "category %s; %s already a member "
+ "of %s" %
+ (self.name, cgroup, category,
+ client, categories[category]))
+ continue
+ if category:
+ categories[category] = cgroup
+ groups.add(cgroup)
+ # favor client groups for setting profile
+ if not profile and self.groups[cgroup].is_profile:
+ profile = cgroup
+
+ groups, categories = self._merge_groups(client, groups,
+ categories=categories)
+
+ bundles = set()
+ for group in groups:
+ try:
+ bundles.update(self.groups[group].bundles)
+ except KeyError:
+ self.logger.warning("%s: %s is a member of undefined group %s" %
+ (self.name, client, group))
+
aliases = self.raliases.get(client, set())
addresses = self.raddresses.get(client, set())
- newgroups = set(groups)
- newbundles = set(bundles)
- newcategories = {}
- newcategories.update(categories)
+ version = self.versions.get(client, None)
if client in self.passwords:
password = self.passwords[client]
else:
@@ -613,61 +953,70 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
uuid = uuids[0]
else:
uuid = None
- for group in self.cgroups.get(client, []):
- if group in self.groups:
- nbundles, ngroups, ncategories = self.groups[group]
- else:
- nbundles, ngroups, ncategories = ([], [group], {})
- [newbundles.add(b) for b in nbundles if b not in newbundles]
- [newgroups.add(g) for g in ngroups if g not in newgroups]
- newcategories.update(ncategories)
- return ClientMetadata(client, profile, newgroups, newbundles, aliases,
- addresses, newcategories, uuid, password,
+ if not profile:
+ # one last ditch attempt at setting the profile
+ profiles = [g for g in groups
+ if g in self.groups and self.groups[g].is_profile]
+ if len(profiles) >= 1:
+ profile = profiles[0]
+
+ return ClientMetadata(client, profile, groups, bundles, aliases,
+ addresses, categories, uuid, password, version,
self.query)
def get_all_group_names(self):
all_groups = set()
- [all_groups.update(g[1]) for g in list(self.groups.values())]
+ all_groups.update(self.groups.keys())
+ all_groups.update([g.name for g in self.group_membership.values()])
+ all_groups.update([g.name for g in self.negated_groups.values()])
+ for grp in self.clientgroups.values():
+ all_groups.update(grp)
return all_groups
def get_all_groups_in_category(self, category):
- all_groups = set()
- [all_groups.add(g) for g in self.categories \
- if self.categories[g] == category]
- return all_groups
+ return set([g.name for g in self.groups.values()
+ if g.category == category])
def get_client_names_by_profiles(self, profiles):
- return [client for client, profile in list(self.clients.items()) \
- if profile in profiles]
+ rv = []
+ for client in list(self.clients):
+ mdata = self.get_initial_metadata(client)
+ if mdata.profile in profiles:
+ rv.append(client)
+ return rv
def get_client_names_by_groups(self, groups):
mdata = [self.core.build_metadata(client)
- for client in list(self.clients.keys())]
+ for client in list(self.clients)]
return [md.hostname for md in mdata if md.groups.issuperset(groups)]
+ def get_client_names_by_bundles(self, bundles):
+ mdata = [self.core.build_metadata(client)
+ for client in list(self.clients.keys())]
+ return [md.hostname for md in mdata if md.bundles.issuperset(bundles)]
+
def merge_additional_groups(self, imd, groups):
for group in groups:
- if (group in self.categories and
- self.categories[group] in imd.categories):
+ if group in imd.groups:
continue
- newbundles, newgroups, _ = self.groups.get(group,
- (list(),
- [group],
- dict()))
- for newbundle in newbundles:
- if newbundle not in imd.bundles:
- imd.bundles.add(newbundle)
- for newgroup in newgroups:
- if newgroup not in imd.groups:
- if (newgroup in self.categories and
- self.categories[newgroup] in imd.categories):
- continue
- if newgroup in self.private:
- self.logger.error("Refusing to add dynamic membership "
- "in private group %s for client %s" %
- (newgroup, imd.hostname))
- continue
- imd.groups.add(newgroup)
+ if group in self.groups and self.groups[group].category:
+ category = self.groups[group].category
+ if self.groups[group].category in imd.categories:
+ self.logger.warning("%s: Group %s suppressed by category "
+ "%s; %s already a member of %s" %
+ (self.name, group, category,
+ imd.hostname,
+ imd.categories[category]))
+ continue
+ imd.categories[group] = category
+ imd.groups.add(group)
+
+ self._merge_groups(imd.hostname, imd.groups,
+ categories=imd.categories)
+
+ for group in imd.groups:
+ if group in self.groups:
+ imd.bundles.update(self.groups[group].bundles)
def merge_additional_data(self, imd, source, data):
if not hasattr(imd, source):
@@ -686,8 +1035,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
(client, address))
return True
else:
- self.logger.error("Got request for non-float client %s from %s" %
- (client, address))
+ self.logger.error("Got request for non-float client %s from %s"
+ % (client, address))
return False
resolved = self.resolve_client(addresspair)
if resolved.lower() == client.lower():
@@ -711,9 +1060,10 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
id_method = 'address'
try:
client = self.resolve_client(address)
- except MetadataConsistencyError:
- self.logger.error("Client %s failed to resolve; metadata problem"
- % address[0])
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ err = sys.exc_info()[1]
+ self.logger.error("Client %s failed to resolve: %s" %
+ (address[0], err))
return False
else:
id_method = 'uuid'
@@ -768,7 +1118,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
"secure mode" % address[0])
return False
# populate the session cache
- if user.decode('utf-8') != 'root':
+ if user != 'root':
self.session_cache[address] = (time.time(), client)
return True
@@ -792,7 +1142,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
def include_group(group):
return not only_client or group in clientmeta.groups
- groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
+ groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"),
+ parser=Bcfg2.Server.XMLParser)
try:
groups_tree.xinclude()
except lxml.etree.XIncludeError:
@@ -810,20 +1161,26 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
del categories[None]
if hosts:
instances = {}
- clients = self.clients
- for client, profile in list(clients.items()):
+ for client in list(self.clients):
if include_client(client):
continue
- if profile in instances:
- instances[profile].append(client)
+ if client in self.clientgroups:
+ groups = self.clientgroups[client]
+ elif self.default:
+ groups = [self.default]
else:
- instances[profile] = [client]
- for profile, clist in list(instances.items()):
+ continue
+ for group in groups:
+ try:
+ instances[group].append(client)
+ except KeyError:
+ instances[group] = [client]
+ for group, clist in list(instances.items()):
clist.sort()
viz_str.append('"%s-instances" [ label="%s", shape="record" ];' %
- (profile, '|'.join(clist)))
+ (group, '|'.join(clist)))
viz_str.append('"%s-instances" -> "group-%s";' %
- (profile, profile))
+ (group, group))
if bundles:
bundles = []
[bundles.append(bund.get('name')) \
@@ -864,3 +1221,35 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
viz_str.append('"%s" [label="%s", shape="record", style="filled", fillcolor="%s"];' %
(category, category, categories[category]))
return "\n".join("\t" + s for s in viz_str)
+
+
+class MetadataLint(Bcfg2.Server.Lint.ServerPlugin):
+ def Run(self):
+ self.nested_clients()
+ self.deprecated_options()
+
+ @classmethod
+ def Errors(cls):
+ return {"nested-client-tags": "warning",
+ "deprecated-clients-options": "warning"}
+
+ def deprecated_options(self):
+ clientdata = self.metadata.clients_xml.xdata
+ for el in groupdata.xpath("//Client"):
+ loc = el.get("location")
+ if loc:
+ if loc == "floating":
+ floating = True
+ else:
+ floating = False
+ self.LintError("deprecated-clients-options",
+ "The location='%s' option is deprecated. "
+ "Please use floating='%s' instead: %s" %
+ (loc, floating, self.RenderXML(el)))
+
+ def nested_clients(self):
+ groupdata = self.metadata.groups_xml.xdata
+ for el in groupdata.xpath("//Client//Client"):
+ self.LintError("nested-client-tags",
+ "Client %s nested within Client tag: %s" %
+ (el.get("name"), self.RenderXML(el)))
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index 4dbd57d16..f2b8336e0 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -7,18 +7,23 @@ import glob
import socket
import logging
import lxml.etree
-
+import Bcfg2.Server
import Bcfg2.Server.Plugin
LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen')
line_fmt = '\t%-32s %s'
-class NagiosGenConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
+class NagiosGenConfig(Bcfg2.Server.Plugin.StructFile):
def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+ # create config.xml if missing
+ if not os.path.exists(filename):
+ LOGGER.warning("NagiosGen: %s missing. "
+ "Creating empty one for you." % filename)
+ open(filename, "w").write("<NagiosGen></NagiosGen>")
+
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ should_monitor=True)
class NagiosGen(Bcfg2.Server.Plugin.Plugin,
@@ -51,7 +56,12 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
def createhostconfig(self, entry, metadata):
"""Build host specific configuration file."""
- host_address = socket.gethostbyname(metadata.hostname)
+ try:
+ host_address = socket.gethostbyname(metadata.hostname)
+ except socket.gaierror:
+ LOGGER.error("Failed to find IP address for %s" %
+ metadata.hostname)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
host_groups = [grp for grp in metadata.groups
if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
host_config = ['define host {',
@@ -84,7 +94,8 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
LOGGER.warn("Parsing deprecated NagiosGen/parents.xml. "
"Update to the new-style config with "
"nagiosgen-convert.py.")
- parents = lxml.etree.parse(pfile)
+ parents = lxml.etree.parse(pfile,
+ parser=Bcfg2.Server.XMLParser)
for el in parents.xpath("//Depend[@name='%s']" % metadata.hostname):
if 'parent' in xtra:
xtra['parent'] += "," + el.get("on")
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 5fff20d98..20f9ba877 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -41,7 +41,7 @@ class OhaiCache(object):
# simply return if the client returned nothing
return
self.cache[item] = json.loads(value)
- file("%s/%s.json" % (self.dirname, item), 'w').write(value)
+ open("%s/%s.json" % (self.dirname, item), 'w').write(value)
def __getitem__(self, item):
if item not in self.cache:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
index 49e9d417b..685cd5c1d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -2,13 +2,15 @@ import re
import gzip
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
-from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Bcfg2Py3k import cPickle
class AptCollection(Collection):
def get_group(self, group):
- self.logger.warning("Packages: Package groups are not supported by APT")
+ self.logger.warning("Packages: Package groups are not "
+ "supported by APT")
return []
+
class AptSource(Source):
basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
ptype = 'deb'
@@ -22,14 +24,15 @@ class AptSource(Source):
'components': self.components, 'arches': self.arches}]
def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
+ cache = open(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides,
+ self.essentialpkgs), cache, 2)
cache.close()
def load_state(self):
- data = file(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
+ data = open(self.cachefile)
+ (self.pkgnames, self.deps, self.provides,
+ self.essentialpkgs) = cPickle.load(data)
def filter_unknown(self, unknown):
filtered = set([u for u in unknown if u.startswith('choice')])
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
index 3ea14ce75..b05a69d4a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -1,6 +1,7 @@
import sys
import copy
import logging
+import lxml
import Bcfg2.Server.Plugin
logger = logging.getLogger(__name__)
@@ -52,13 +53,40 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
@property
def cachekey(self):
- return md5(self.get_config()).hexdigest()
+ return md5(self.sourcelist()).hexdigest()
def get_config(self):
- self.logger.error("Packages: Cannot generate config for host with "
- "multiple source types (%s)" % self.metadata.hostname)
+ self.logger.error("Packages: Cannot generate config for host %s with "
+ "no sources or multiple source types" %
+ self.metadata.hostname)
return ""
+ def sourcelist(self):
+ srcs = []
+ for source in self.sources:
+ # get_urls() loads url_map as a side-effect
+ source.get_urls()
+ for url_map in source.url_map:
+ if url_map['arch'] not in metadata.groups:
+ continue
+ reponame = source.get_repo_name(url_map)
+ srcs.append("Name: %s" % reponame)
+ srcs.append(" Type: %s" % source.ptype)
+ if url_map['url']:
+ srcs.append(" URL: %s" % url_map['url'])
+ elif url_map['rawurl']:
+ srcs.append(" RAWURL: %s" % url_map['rawurl'])
+ if source.gpgkeys:
+ srcs.append(" GPG Key(s): %s" % ", ".join(source.gpgkeys))
+ else:
+ srcs.append(" GPG Key(s): None")
+ if len(source.blacklist):
+ srcs.append(" Blacklist: %s" % ", ".join(source.blacklist))
+ if len(source.whitelist):
+ srcs.append(" Whitelist: %s" % ", ".join(source.whitelist))
+ srcs.append("")
+ return "\n".join(srcs)
+
def get_relevant_groups(self):
groups = []
for source in self.sources:
@@ -79,6 +107,14 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
cachefiles.add(source.cachefile)
return list(cachefiles)
+ def get_groups(self, grouplist):
+ """ provided since some backends may be able to query multiple
+ groups at once faster than serially """
+ rv = dict()
+ for group, ptype in grouplist:
+ rv[group] = self.get_group(group, ptype)
+ return rv
+
def get_group(self, group, ptype=None):
for source in self.sources:
pkgs = source.get_group(self.metadata, group, ptype=ptype)
@@ -152,6 +188,28 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
""" do any collection-level data setup tasks """
pass
+ def packages_from_entry(self, entry):
+ """ given a Package or BoundPackage entry, get a list of the
+ package(s) described by it in a format appropriate for passing
+ to complete(). by default, that's just the name; only the Yum
+ backend supports getting versions"""
+ return [entry.get("name")]
+
+ def packages_to_entry(self, pkglist, entry):
+ for pkg in pkglist:
+ lxml.etree.SubElement(entry, 'BoundPackage', name=pkg,
+ version=self.setup.cfp.get("packages",
+ "version",
+ default="auto"),
+ type=self.ptype, origin='Packages')
+
+ def get_new_packages(self, initial, complete):
+ """ compute the difference between the complete package list
+ and the initial package list. this is necessary because the
+ format may be different between the two lists due to
+ packages_{to,from}_entry() """
+ return list(complete.difference(initial))
+
def complete(self, packagelist):
'''Build the transitive closure of all package dependencies
@@ -350,15 +408,7 @@ def factory(metadata, sources, basepath, debug=False):
",".join([s.__name__ for s in sclasses]))
cclass = Collection
elif len(sclasses) == 0:
- # you'd think this should be a warning, but it happens all the
- # freaking time if you have a) machines in your clients.xml
- # that do not have the proper groups set up yet (e.g., if you
- # have multiple Bcfg2 servers and Packages-relevant groups set
- # by probes); and b) templates that query all or multiple
- # machines (e.g., with metadata.query.all_clients())
- if debug:
- logger.error("Packages: No sources found for %s" %
- metadata.hostname)
+ logger.error("Packages: No sources found for %s" % metadata.hostname)
cclass = Collection
else:
cclass = get_collection_class(sclasses.pop().__name__.replace("Source",
@@ -373,4 +423,3 @@ def factory(metadata, sources, basepath, debug=False):
clients[metadata.hostname] = ckey
collections[ckey] = collection
return collection
-
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
index 99a090739..34c7b42c1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -1,6 +1,6 @@
import gzip
import tarfile
-from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Bcfg2Py3k import cPickle
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
@@ -9,6 +9,7 @@ class PacCollection(Collection):
self.logger.warning("Packages: Package groups are not supported by Pacman")
return []
+
class PacSource(Source):
basegroups = ['arch', 'parabola']
ptype = 'pacman'
@@ -22,13 +23,13 @@ class PacSource(Source):
'components': self.components, 'arches': self.arches}]
def save_state(self):
- cache = file(self.cachefile, 'wb')
+ cache = open(self.cachefile, 'wb')
cPickle.dump((self.pkgnames, self.deps, self.provides),
cache, 2)
cache.close()
def load_state(self):
- data = file(self.cachefile)
+ data = open(self.cachefile)
self.pkgnames, self.deps, self.provides = cPickle.load(data)
def filter_unknown(self, unknown):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
index 7796b9e34..0d565be31 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -4,17 +4,15 @@ import lxml.etree
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError
-class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile,
+class PackagesSources(Bcfg2.Server.Plugin.StructFile,
Bcfg2.Server.Plugin.Debuggable):
__identifier__ = None
def __init__(self, filename, cachepath, fam, packages, setup):
Bcfg2.Server.Plugin.Debuggable.__init__(self)
try:
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
- filename,
- fam)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ should_monitor=True)
except OSError:
err = sys.exc_info()[1]
msg = "Packages: Failed to read configuration file: %s" % err
@@ -22,7 +20,6 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
msg += " Have you created it?"
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginInitError(msg)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
self.cachepath = cachepath
self.setup = setup
if not os.path.exists(self.cachepath):
@@ -42,18 +39,11 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
source.toggle_debug()
def HandleEvent(self, event=None):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.HandleEvent(self, event=event)
+ Bcfg2.Server.Plugin.XMLFileBacked.HandleEvent(self, event=event)
if event and event.filename != self.name:
- for fname in self.extras:
- fpath = None
- if fname.startswith("/"):
- fpath = os.path.abspath(fname)
- else:
- fpath = \
- os.path.abspath(os.path.join(os.path.dirname(self.name),
- fname))
+ for fpath in self.extras:
if fpath == os.path.abspath(event.filename):
- self.parsed.add(fname)
+ self.parsed.add(fpath)
break
if self.loaded:
@@ -65,7 +55,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
return sorted(list(self.parsed)) == sorted(self.extras)
def Index(self):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
self.entries = []
for xsource in self.xdata.findall('.//Source'):
source = self.source_from_xml(xsource)
@@ -87,7 +77,8 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
stype.title())
cls = getattr(module, "%sSource" % stype.title())
except (ImportError, AttributeError):
- self.logger.error("Packages: Unknown source type %s" % stype)
+ ex = sys.exc_info()[1]
+ self.logger.error("Packages: Unknown source type %s (%s)" % (stype, ex))
return None
try:
@@ -106,4 +97,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
return "PackagesSources: %s" % repr(self.entries)
def __str__(self):
- return "PackagesSources: %s" % str(self.entries)
+ return "PackagesSources: %s sources" % len(self.entries)
+
+ def __len__(self):
+ return len(self.entries)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
index edcdcd9f2..df3706fb1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -1,11 +1,10 @@
import os
import re
import sys
-import base64
import Bcfg2.Server.Plugin
from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \
HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \
- urlopen, file, cPickle
+ urlopen, cPickle
try:
from hashlib import md5
@@ -51,7 +50,18 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
for key, tag in [('components', 'Component'), ('arches', 'Arch'),
('blacklist', 'Blacklist'),
('whitelist', 'Whitelist')]:
- self.__dict__[key] = [item.text for item in xsource.findall(tag)]
+ setattr(self, key, [item.text for item in xsource.findall(tag)])
+ self.server_options = dict()
+ self.client_options = dict()
+ opts = xsource.findall("Options")
+ for el in opts:
+ repoopts = dict([(k, v)
+ for k, v in el.attrib.items()
+ if k != "clientonly" and k != "serveronly"])
+ if el.get("clientonly", "false").lower() == "false":
+ self.server_options.update(repoopts)
+ if el.get("serveronly", "false").lower() == "false":
+ self.client_options.update(repoopts)
self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
@@ -137,9 +147,8 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
def get_repo_name(self, url_map):
# try to find a sensible name for a repo
- if 'components' in url_map and url_map['components']:
- # use the first component as the name
- rname = url_map['components'][0]
+ if 'component' in url_map and url_map['component']:
+ rname = url_map['component']
else:
name = None
for repo_re in (self.mrepo_re,
@@ -149,12 +158,15 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
if match:
name = match.group(1)
break
- if name is None:
- # couldn't figure out the name from the URL or URL map
- # (which probably means its a screwy URL), so we just
- # generate a random one
- name = base64.b64encode(os.urandom(16))[:-2]
- rname = "%s-%s" % (self.groups[0], name)
+ if name and self.groups:
+ rname = "%s-%s" % (self.groups[0], name)
+ elif self.groups:
+ rname = self.groups[0]
+ else:
+ # a global source with no reasonable name. just use
+ # the full url and let the regex below make it even
+ # uglier.
+ rname = url_map['url']
# see yum/__init__.py in the yum source, lines 441-449, for
# the source of this regex. yum doesn't like anything but
# string.ascii_letters, string.digits, and [-_.:]. There
@@ -169,6 +181,9 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
else:
return self.__class__.__name__
+ def __repr__(self):
+ return str(self)
+
def get_urls(self):
return []
urls = property(get_urls)
@@ -182,6 +197,10 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
if a in metadata.groups]
vdict = dict()
for agrp in agroups:
+ if agrp not in self.provides:
+ self.logger.warning("%s provides no packages for %s" %
+ (self, agrp))
+ continue
for key, value in list(self.provides[agrp].items()):
if key not in vdict:
vdict[key] = set(value)
@@ -213,7 +232,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
fname = self.escape_url(url)
try:
data = fetch_url(url)
- file(fname, 'w').write(data)
+ open(fname, 'w').write(data)
except ValueError:
self.logger.error("Packages: Bad url string %s" % url)
raise
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index 53344e200..cba3373c1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -1,17 +1,14 @@
import os
+import re
import sys
-import time
import copy
-import glob
import socket
-import random
import logging
-import threading
import lxml.etree
-from UserDict import DictMixin
-from subprocess import Popen, PIPE, STDOUT
+from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file
+from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, URLError, \
+ ConfigParser
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \
fetch_url
@@ -96,19 +93,29 @@ class YumCollection(Collection):
if not os.path.exists(self.cachefile):
os.mkdir(self.cachefile)
- self.configdir = os.path.join(self.basepath, "yum")
- if not os.path.exists(self.configdir):
- os.mkdir(self.configdir)
- self.cfgfile = os.path.join(self.configdir,
- "%s-yum.conf" % self.cachekey)
+ self.cfgfile = os.path.join(self.cachefile, "yum.conf")
self.write_config()
if has_pulp and self.has_pulp_sources:
_setup_pulp(self.setup)
+ self._helper = None
+
@property
def helper(self):
- return self.setup.cfp.get("packages:yum", "helper",
- default="/usr/sbin/bcfg2-yum-helper")
+ try:
+ return self.setup.cfp.get("packages:yum", "helper")
+ except:
+ pass
+
+ if not self._helper:
+ # first see if bcfg2-yum-helper is in PATH
+ try:
+ Popen(['bcfg2-yum-helper'],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE).wait()
+ self._helper = 'bcfg2-yum-helper'
+ except OSError:
+ self._helper = "/usr/sbin/bcfg2-yum-helper"
+ return self._helper
@property
def use_yum(self):
@@ -129,11 +136,21 @@ class YumCollection(Collection):
yumconf = self.get_config(raw=True)
yumconf.add_section("main")
- mainopts = dict(cachedir=self.cachefile,
+ # we set installroot to the cache directory so
+ # bcfg2-yum-helper works with an empty rpmdb. otherwise
+ # the rpmdb is so hopelessly intertwined with yum that we
+ # have to totally reinvent the dependency resolver.
+ mainopts = dict(cachedir='/',
+ installroot=self.cachefile,
keepcache="0",
- sslverify="0",
debuglevel="0",
+ sslverify="0",
reposdir="/dev/null")
+ if self.setup['debug']:
+ mainopts['debuglevel'] = "5"
+ elif self.setup['verbose']:
+ mainopts['debuglevel'] = "2"
+
try:
for opt in self.setup.cfp.options("packages:yum"):
if opt not in self.option_blacklist:
@@ -162,7 +179,7 @@ class YumCollection(Collection):
config.add_section(reponame)
added = True
except ConfigParser.DuplicateSectionError:
- match = re.match("-(\d)", reponame)
+ match = re.search("-(\d+)", reponame)
if match:
rid = int(match.group(1)) + 1
else:
@@ -186,6 +203,13 @@ class YumCollection(Collection):
config.set(reponame, "includepkgs",
" ".join(source.whitelist))
+ if raw:
+ opts = source.server_options
+ else:
+ opts = source.client_options
+ for opt, val in opts.items():
+ config.set(reponame, opt, val)
+
if raw:
return config
else:
@@ -346,6 +370,25 @@ class YumCollection(Collection):
# for API completeness
return self.call_helper("get_provides", package)
+ def get_groups(self, grouplist):
+ if not self.use_yum:
+ self.logger.warning("Packages: Package groups are not supported by "
+ "Bcfg2's internal Yum dependency generator")
+ return []
+
+ if not grouplist:
+ return dict()
+
+ gdicts = []
+ for group, ptype in grouplist:
+ if group.startswith("@"):
+ group = group[1:]
+ if not ptype:
+ ptype = "default"
+ gdicts.append(dict(group=group, type=ptype))
+
+ return self.call_helper("get_groups", gdicts)
+
def get_group(self, group, ptype="default"):
if not self.use_yum:
self.logger.warning("Packages: Package groups are not supported by "
@@ -355,32 +398,106 @@ class YumCollection(Collection):
if group.startswith("@"):
group = group[1:]
- pkgs = self.call_helper("get_group", dict(group=group, type=ptype))
- return pkgs
+ return self.call_helper("get_group", dict(group=group, type=ptype))
+
+ def packages_from_entry(self, entry):
+ rv = set()
+ name = entry.get("name")
+
+ def _tag_to_pkg(tag):
+ rv = (name, tag.get("arch"), tag.get("epoch"),
+ tag.get("version"), tag.get("release"))
+ # if a package requires no specific version, we just use
+ # the name, not the tuple. this limits the amount of JSON
+ # encoding/decoding that has to be done to pass the
+ # package list to bcfg2-yum-helper.
+ if rv[1:] == (None, None, None, None):
+ return name
+ else:
+ return rv
+
+ for inst in entry.getchildren():
+ if inst.tag != "Instance":
+ continue
+ rv.add(_tag_to_pkg(inst))
+ if not rv:
+ rv.add(_tag_to_pkg(entry))
+ return list(rv)
+
+ def packages_to_entry(self, pkglist, entry):
+ def _get_entry_attrs(pkgtup):
+ attrs = dict(version=self.setup.cfp.get("packages",
+ "version",
+ default="auto"))
+ if attrs['version'] == 'any':
+ return attrs
+
+ if pkgtup[1]:
+ attrs['arch'] = pkgtup[1]
+ if pkgtup[2]:
+ attrs['epoch'] = pkgtup[2]
+ if pkgtup[3]:
+ attrs['version'] = pkgtup[3]
+ if pkgtup[4]:
+ attrs['release'] = pkgtup[4]
+ return attrs
+
+ packages = dict()
+ for pkg in pkglist:
+ try:
+ packages[pkg[0]].append(pkg)
+ except KeyError:
+ packages[pkg[0]] = [pkg]
+ for name, instances in packages.items():
+ pkgattrs = dict(type=self.ptype,
+ origin='Packages',
+ name=name)
+ if len(instances) > 1:
+ pkg_el = lxml.etree.SubElement(entry, 'BoundPackage',
+ **pkgattrs)
+ for inst in instances:
+ lxml.etree.SubElement(pkg_el, "Instance",
+ _get_entry_attrs(inst))
+ else:
+ attrs = _get_entry_attrs(instances[0])
+ attrs.update(pkgattrs)
+ lxml.etree.SubElement(entry, 'BoundPackage', **attrs)
+
+ def get_new_packages(self, initial, complete):
+ initial_names = []
+ for pkg in initial:
+ if isinstance(pkg, tuple):
+ initial_names.append(pkg[0])
+ else:
+ initial_names.append(pkg)
+ new = []
+ for pkg in complete:
+ if pkg[0] not in initial_names:
+ new.append(pkg)
+ return new
def complete(self, packagelist):
if not self.use_yum:
return Collection.complete(self, packagelist)
- packages = set()
- unknown = set(packagelist)
-
- if unknown:
+ if packagelist:
result = \
self.call_helper("complete",
- dict(packages=list(unknown),
+ dict(packages=list(packagelist),
groups=list(self.get_relevant_groups())))
- if result and "packages" in result and "unknown" in result:
- # we stringify every package because it gets returned
- # in unicode; set.update() doesn't work if some
- # elements are unicode and other are strings. (I.e.,
- # u'foo' and 'foo' get treated as unique elements.)
- packages.update([str(p) for p in result['packages']])
- unknown = set([str(p) for p in result['unknown']])
-
+ if not result:
+ # some sort of error, reported by call_helper()
+ return set(), packagelist
+ # json doesn't understand sets or tuples, so we get back a
+ # lists of lists (packages) and a list of unicode strings
+ # (unknown). turn those into a set of tuples and a set of
+ # strings, respectively.
+ unknown = set([str(u) for u in result['unknown']])
+ packages = set([tuple(p) for p in result['packages']])
self.filter_unknown(unknown)
-
- return packages, unknown
+ return packages, unknown
+ else:
+ return set(), set()
def call_helper(self, command, input=None):
""" Make a call to bcfg2-yum-helper. The yum libs have
@@ -388,16 +505,12 @@ class YumCollection(Collection):
around that in long-running processes it to have a short-lived
helper. No, seriously -- check out the yum-updatesd code.
It's pure madness. """
- # it'd be nice if we could change this to be more verbose if
- # -v was given to bcfg2-server, but Collection objects don't
- # get the 'setup' variable, so we don't know how verbose
- # bcfg2-server is. It'd also be nice if we could tell yum to
- # log to syslog. So would a unicorn.
cmd = [self.helper, "-c", self.cfgfile]
- if self.debug_flag:
+ verbose = self.debug_flag or self.setup['verbose']
+ if verbose:
cmd.append("-v")
cmd.append(command)
- self.debug_log("Packages: running %s" % " ".join(cmd))
+ self.debug_log("Packages: running %s" % " ".join(cmd), flag=verbose)
try:
helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
@@ -415,9 +528,9 @@ class YumCollection(Collection):
if rv:
self.logger.error("Packages: error running bcfg2-yum-helper "
"(returned %d): %s" % (rv, stderr))
- elif self.debug_flag:
+ else:
self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
- stderr)
+ stderr, flag=verbose)
try:
return json.loads(stdout)
except ValueError:
@@ -500,15 +613,14 @@ class YumSource(Source):
def save_state(self):
if not self.use_yum:
- cache = file(self.cachefile, 'wb')
+ cache = open(self.cachefile, 'wb')
cPickle.dump((self.packages, self.deps, self.provides,
self.filemap, self.url_map), cache, 2)
cache.close()
-
def load_state(self):
if not self.use_yum:
- data = file(self.cachefile)
+ data = open(self.cachefile)
(self.packages, self.deps, self.provides,
self.filemap, self.url_map) = cPickle.load(data)
@@ -520,7 +632,7 @@ class YumSource(Source):
usettings = [{'version':self.version, 'component':comp,
'arch':arch}
for comp in self.components]
- else: # rawurl given
+ else: # rawurl given
usettings = [{'version':self.version, 'component':None,
'arch':arch}]
@@ -546,6 +658,11 @@ class YumSource(Source):
except ValueError:
self.logger.error("Packages: Bad url string %s" % rmdurl)
return []
+ except URLError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Failed to fetch url %s. %s" %
+ (rmdurl, err))
+ return []
except HTTPError:
err = sys.exc_info()[1]
self.logger.error("Packages: Failed to fetch url %s. code=%s" %
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index d789a6d39..d3095300a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -11,14 +11,16 @@ from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen
from Bcfg2.Server.Plugins.Packages import Collection
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+yum_config_default = "/etc/yum.repos.d/bcfg2.repo"
+apt_config_default = "/etc/apt/sources.d/bcfg2"
+
class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.Connector,
- Bcfg2.Server.Plugin.GoalValidator):
+ Bcfg2.Server.Plugin.ClientRunHooks):
name = 'Packages'
conflicts = ['Pkgmgr']
- experimental = True
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
def __init__(self, core, datastore):
@@ -26,11 +28,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
self.sentinels = set()
- self.cachepath = os.path.join(self.data, 'cache')
- self.keypath = os.path.join(self.data, 'keys')
+ self.cachepath = \
+ self.core.setup.cfp.get("packages", "cache",
+ default=os.path.join(self.data, 'cache'))
+ self.keypath = \
+ self.core.setup.cfp.get("packages", "keycache",
+ default=os.path.join(self.data, 'keys'))
if not os.path.exists(self.keypath):
# create key directory if needed
os.makedirs(self.keypath)
@@ -40,11 +46,16 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self.core.setup)
def toggle_debug(self):
- Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
+ rv = Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
self.sources.toggle_debug()
+ return rv
@property
def disableResolver(self):
+ if self.disableMetaData:
+ # disabling metadata without disabling the resolver Breaks
+ # Things
+ return True
try:
return not self.core.setup.cfp.getboolean("packages", "resolver")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
@@ -87,16 +98,18 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if entry.tag == 'Package':
collection = self._get_collection(metadata)
entry.set('version', self.core.setup.cfp.get("packages",
- "version",
- default="auto"))
+ "version",
+ default="auto"))
entry.set('type', collection.ptype)
elif entry.tag == 'Path':
- if (entry.get("name") == self.core.setup.cfp.get("packages",
- "yum_config",
- default="") or
- entry.get("name") == self.core.setup.cfp.get("packages",
- "apt_config",
- default="")):
+ if (entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "yum_config",
+ default=yum_config_default) or
+ entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "apt_config",
+ default=apt_config_default)):
self.create_config(entry, metadata)
def HandlesEntry(self, entry, metadata):
@@ -110,12 +123,14 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
return True
elif entry.tag == 'Path':
# managed entries for yum/apt configs
- if (entry.get("name") == self.core.setup.cfp.get("packages",
- "yum_config",
- default="") or
- entry.get("name") == self.core.setup.cfp.get("packages",
- "apt_config",
- default="")):
+ if (entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "yum_config",
+ default=yum_config_default) or
+ entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "apt_config",
+ default=apt_config_default)):
return True
return False
@@ -151,26 +166,24 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
# essential pkgs are those marked as such by the distribution
essential = collection.get_essential()
to_remove = []
+ groups = []
for struct in structures:
for pkg in struct.xpath('//Package | //BoundPackage'):
if pkg.get("name"):
- initial.add(pkg.get("name"))
+ initial.update(collection.packages_from_entry(pkg))
elif pkg.get("group"):
- try:
- if pkg.get("type"):
- gpkgs = collection.get_group(pkg.get("group"),
- ptype=pkg.get("type"))
- else:
- gpkgs = collection.get_group(pkg.get("group"))
- base.update(gpkgs)
- except TypeError:
- raise
- self.logger.error("Could not resolve group %s" %
- pkg.get("group"))
+ groups.append((pkg.get("group"),
+ pkg.get("type")))
to_remove.append(pkg)
else:
self.logger.error("Packages: Malformed Package: %s" %
- lxml.etree.tostring(pkg))
+ lxml.etree.tostring(pkg,
+ xml_declaration=False).decode('UTF-8'))
+
+ gpkgs = collection.get_groups(groups)
+ for group, pkgs in gpkgs.items():
+ base.update(pkgs)
+
base.update(initial | essential)
for el in to_remove:
el.getparent().remove(el)
@@ -179,16 +192,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if unknown:
self.logger.info("Packages: Got %d unknown entries" % len(unknown))
self.logger.info("Packages: %s" % list(unknown))
- newpkgs = list(packages.difference(initial))
+ newpkgs = collection.get_new_packages(initial, packages)
self.debug_log("Packages: %d initial, %d complete, %d new" %
(len(initial), len(packages), len(newpkgs)))
newpkgs.sort()
- for pkg in newpkgs:
- lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
- version=self.core.setup.cfp.get("packages",
- "version",
- default="auto"),
- type=collection.ptype, origin='Packages')
+ collection.packages_to_entry(newpkgs, independent)
def Refresh(self):
'''Packages.Refresh() => True|False\nReload configuration
@@ -271,10 +279,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
collection = self._get_collection(metadata)
return dict(sources=collection.get_additional_data())
- def validate_goals(self, metadata, _):
- """ we abuse the GoalValidator plugin since validate_goals()
- is the very last thing called during a client config run. so
- we use this to clear the collection cache for this client,
- which must persist only the duration of a client run """
+ def end_client_run(self, metadata):
+ """ clear the collection cache for this client, which must
+ persist only the duration of a client run"""
if metadata.hostname in Collection.clients:
del Collection.clients[metadata.hostname]
+
+ def end_statistics(self, metadata):
+ self.end_client_run(metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
index e9254cdcc..7dac907e1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -1,12 +1,17 @@
'''This module implements a package management scheme for all images'''
-import logging
+import os
import re
+import glob
+import logging
+import lxml.etree
import Bcfg2.Server.Plugin
-import lxml
+import Bcfg2.Server.Lint
+
try:
set
except NameError:
+ # deprecated since python 2.6
from sets import Set as set
logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
@@ -24,12 +29,14 @@ class FuzzyDict(dict):
print("got non-string key %s" % str(key))
return dict.__getitem__(self, key)
- def has_key(self, key):
+ def __contains__(self, key):
if isinstance(key, str):
mdata = self.fuzzy.match(key)
- if self.fuzzy.match(key):
- return dict.has_key(self, mdata.groupdict()['name'])
- return dict.has_key(self, key)
+ if mdata:
+ return dict.__contains__(self, mdata.groupdict()['name'])
+ else:
+ print("got non-string key %s" % str(key))
+ return dict.__contains__(self, key)
def get(self, key, default=None):
try:
@@ -167,3 +174,40 @@ class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
def HandleEntry(self, entry, metadata):
self.BindEntry(entry, metadata)
+
+
+class PkgmgrLint(Bcfg2.Server.Lint.ServerlessPlugin):
+ """ find duplicate Pkgmgr entries with the same priority """
+ def Run(self):
+ pset = set()
+ for pfile in glob.glob(os.path.join(self.config['repo'], 'Pkgmgr',
+ '*.xml')):
+ if self.HandlesFile(pfile):
+ xdata = lxml.etree.parse(pfile).getroot()
+ # get priority, type, group
+ priority = xdata.get('priority')
+ ptype = xdata.get('type')
+ for pkg in xdata.xpath("//Package"):
+ if pkg.getparent().tag == 'Group':
+ grp = pkg.getparent().get('name')
+ if (type(grp) is not str and
+ grp.getparent().tag == 'Group'):
+ pgrp = grp.getparent().get('name')
+ else:
+ pgrp = 'none'
+ else:
+ grp = 'none'
+ pgrp = 'none'
+ ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
+ # check if package is already listed with same
+ # priority, type, grp
+ if ptuple in pset:
+ self.LintError("duplicate-package",
+ "Duplicate Package %s, priority:%s, type:%s" %
+ (pkg.get('name'), priority, ptype))
+ else:
+ pset.add(ptuple)
+
+ @classmethod
+ def Errors(cls):
+ return {"duplicate-packages":"error"}
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index af908eee8..7f300ebe0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -1,7 +1,17 @@
+import re
+import os
+import sys
import time
-import lxml.etree
import operator
-import re
+import lxml.etree
+import Bcfg2.Server
+import Bcfg2.Server.Plugin
+
+try:
+ from django.db import models
+ has_django = True
+except ImportError:
+ has_django = False
try:
import json
@@ -14,23 +24,36 @@ except ImportError:
has_json = False
try:
- import syck
- has_syck = True
+ import syck as yaml
+ has_yaml = True
+ yaml_error = syck.error
except ImportError:
- has_syck = False
try:
import yaml
+ yaml_error = yaml.YAMLError
has_yaml = True
except ImportError:
has_yaml = False
import Bcfg2.Server.Plugin
-specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)")
-probe_matcher = re.compile("(.*/)?(?P<basename>\S+)")
+if has_django:
+ class ProbesDataModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255)
+ probe = models.CharField(max_length=255)
+ timestamp = models.DateTimeField(auto_now=True)
+ data = models.TextField(null=True)
+
+ class ProbesGroupsModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255)
+ group = models.CharField(max_length=255)
+
class ClientProbeDataSet(dict):
- """ dict of probe => [probe data] that records a for each host """
+ """ dict of probe => [probe data] that records a timestamp for
+ each host """
def __init__(self, *args, **kwargs):
if "timestamp" in kwargs and kwargs['timestamp'] is not None:
self.timestamp = kwargs.pop("timestamp")
@@ -39,61 +62,31 @@ class ClientProbeDataSet(dict):
dict.__init__(self, *args, **kwargs)
-class ProbeData(object):
- """ a ProbeData object emulates a str object, but also has .xdata
- and .json properties to provide convenient ways to use ProbeData
- objects as XML or JSON data """
+class ProbeData(str):
+ """ a ProbeData object emulates a str object, but also has .xdata,
+ .json, and .yaml properties to provide convenient ways to use
+ ProbeData objects as XML, JSON, or YAML data """
+ def __new__(cls, data):
+ return str.__new__(cls, data)
+
def __init__(self, data):
- self.data = data
+ str.__init__(self)
self._xdata = None
self._json = None
self._yaml = None
- def __str__(self):
- return str(self.data)
-
- def __repr__(self):
- return repr(self.data)
-
- def __getattr__(self, name):
- """ make ProbeData act like a str object """
- return getattr(self.data, name)
-
- def __complex__(self):
- return complex(self.data)
-
- def __int__(self):
- return int(self.data)
-
- def __long__(self):
- return long(self.data)
-
- def __float__(self):
- return float(self.data)
-
- def __eq__(self, other):
- return str(self) == str(other)
-
- def __ne__(self, other):
- return str(self) != str(other)
-
- def __gt__(self, other):
- return str(self) > str(other)
-
- def __lt__(self, other):
- return str(self) < str(other)
-
- def __ge__(self, other):
- return self > other or self == other
-
- def __le__(self, other):
- return self < other or self == other
-
+ @property
+ def data(self):
+ """ provide backwards compatibility with broken ProbeData
+ object in bcfg2 1.2.0 thru 1.2.2 """
+ return str(self)
+
@property
def xdata(self):
if self._xdata is None:
try:
- self._xdata = lxml.etree.XML(self.data)
+ self._xdata = lxml.etree.XML(self.data,
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
pass
return self._xdata
@@ -109,44 +102,30 @@ class ProbeData(object):
@property
def yaml(self):
- if self._yaml is None:
- if has_yaml:
- try:
- self._yaml = yaml.load(self.data)
- except yaml.YAMLError:
- pass
- elif has_syck:
- try:
- self._yaml = syck.load(self.data)
- except syck.error:
- pass
+ if self._yaml is None and has_yaml:
+ try:
+ self._yaml = yaml.load(self.data)
+ except yaml_error:
+ pass
return self._yaml
class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$")
+ probename = re.compile("(.*/)?(?P<basename>\S+?)(\.(?P<mode>(?:G\d\d)|H)_\S+)?$")
+ bangline = re.compile('^#!\s*(?P<interpreter>.*)$')
+ basename_is_regex = True
def __init__(self, path, fam, encoding, plugin_name):
- fpattern = '[0-9A-Za-z_\-]+'
self.plugin_name = plugin_name
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, '[0-9A-Za-z_\-]+', path,
Bcfg2.Server.Plugin.SpecificData,
encoding)
fam.AddMonitor(path, self)
- self.bangline = re.compile('^#!(?P<interpreter>.*)$')
def HandleEvent(self, event):
- if event.filename != self.path:
- if (event.code2str == 'changed' and
- event.filename.endswith("probed.xml") and
- event.filename not in self.entries):
- # for some reason, probed.xml is particularly prone to
- # getting changed events before created events,
- # because gamin is the worst ever. anyhow, we
- # specifically handle it here to avoid a warning on
- # every single server startup.
- self.entry_init(event)
- return
+ if (event.filename != self.path and
+ not event.filename.endswith("probed.xml")):
return self.handle_event(event)
def get_probe_data(self, metadata):
@@ -155,9 +134,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
candidates = self.get_matching(metadata)
candidates.sort(key=operator.attrgetter('specific'))
for entry in candidates:
- rem = specific_probe_matcher.match(entry.name)
- if not rem:
- rem = probe_matcher.match(entry.name)
+ rem = self.probename.match(entry.name)
pname = rem.group('basename')
if pname not in build:
build[pname] = entry
@@ -176,30 +153,37 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
return ret
-class Probes(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Probing,
- Bcfg2.Server.Plugin.Connector):
+class Probes(Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Connector,
+ Bcfg2.Server.Plugin.DatabaseBacked):
"""A plugin to gather information from a client machine."""
name = 'Probes'
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
try:
self.probes = ProbeSet(self.data, core.fam, core.encoding,
self.name)
except:
- raise Bcfg2.Server.Plugin.PluginInitError
+ err = sys.exc_info()[1]
+ raise Bcfg2.Server.Plugin.PluginInitError(err)
self.probedata = dict()
self.cgroups = dict()
self.load_data()
- def write_data(self):
+ def write_data(self, client):
"""Write probe data out for use with bcfg2-info."""
+ if self._use_db:
+ return self._write_data_db(client)
+ else:
+ return self._write_data_xml(client)
+
+ def _write_data_xml(self, _):
top = lxml.etree.Element("Probed")
for client, probed in sorted(self.probedata.items()):
cx = lxml.etree.SubElement(top, 'Client', name=client,
@@ -209,20 +193,47 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
value=str(self.probedata[client][probe]))
for group in sorted(self.cgroups[client]):
lxml.etree.SubElement(cx, "Group", name=group)
- data = lxml.etree.tostring(top, encoding='UTF-8',
- xml_declaration=True,
- pretty_print='true')
try:
- datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w')
+ datafile = open(os.path.join(self.data, 'probed.xml'), 'w')
+ datafile.write(lxml.etree.tostring(top, xml_declaration=False,
+ pretty_print='true').decode('UTF-8'))
except IOError:
- self.logger.error("Failed to write probed.xml")
- datafile.write(data.decode('utf-8'))
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to write probed.xml: %s" % err)
+
+ def _write_data_db(self, client):
+ for probe, data in self.probedata[client.hostname].items():
+ pdata = \
+ ProbesDataModel.objects.get_or_create(hostname=client.hostname,
+ probe=probe)[0]
+ if pdata.data != data:
+ pdata.data = data
+ pdata.save()
+ ProbesDataModel.objects.filter(hostname=client.hostname).exclude(probe__in=self.probedata[client.hostname]).delete()
+
+ for group in self.cgroups[client.hostname]:
+ try:
+ ProbesGroupsModel.objects.get(hostname=client.hostname,
+ group=group)
+ except ProbesGroupsModel.DoesNotExist:
+ grp = ProbesGroupsModel(hostname=client.hostname,
+ group=group)
+ grp.save()
+ ProbesGroupsModel.objects.filter(hostname=client.hostname).exclude(group__in=self.cgroups[client.hostname]).delete()
def load_data(self):
+ if self._use_db:
+ return self._load_data_db()
+ else:
+ return self._load_data_xml()
+
+ def _load_data_xml(self):
try:
- data = lxml.etree.parse(self.data + '/probed.xml').getroot()
+ data = lxml.etree.parse(os.path.join(self.data, 'probed.xml'),
+ parser=Bcfg2.Server.XMLParser).getroot()
except:
- self.logger.error("Failed to read file probed.xml")
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to read file probed.xml: %s" % err)
return
self.probedata = {}
self.cgroups = {}
@@ -231,12 +242,25 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
ClientProbeDataSet(timestamp=client.get("timestamp"))
self.cgroups[client.get('name')] = []
for pdata in client:
- if (pdata.tag == 'Probe'):
+ if pdata.tag == 'Probe':
self.probedata[client.get('name')][pdata.get('name')] = \
- ProbeData(pdata.get('value'))
- elif (pdata.tag == 'Group'):
+ ProbeData(pdata.get("value"))
+ elif pdata.tag == 'Group':
self.cgroups[client.get('name')].append(pdata.get('name'))
+ def _load_data_db(self):
+ self.probedata = {}
+ self.cgroups = {}
+ for pdata in ProbesDataModel.objects.all():
+ if pdata.hostname not in self.probedata:
+ self.probedata[pdata.hostname] = \
+ ClientProbeDataSet(timestamp=time.mktime(pdata.timestamp.timetuple()))
+ self.probedata[pdata.hostname][pdata.probe] = ProbeData(pdata.data)
+ for pgroup in ProbesGroupsModel.objects.all():
+ if pgroup.hostname not in self.cgroups:
+ self.cgroups[pgroup.hostname] = []
+ self.cgroups[pgroup.hostname].append(pgroup.group)
+
def GetProbes(self, meta, force=False):
"""Return a set of probes for execution on client."""
return self.probes.get_probe_data(meta)
@@ -246,25 +270,24 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
self.probedata[client.hostname] = ClientProbeDataSet()
for data in datalist:
self.ReceiveDataItem(client, data)
- self.write_data()
+ self.write_data(client)
def ReceiveDataItem(self, client, data):
"""Receive probe results pertaining to client."""
if client.hostname not in self.cgroups:
self.cgroups[client.hostname] = []
+ if client.hostname not in self.probedata:
+ self.probedata[client.hostname] = ClientProbeDataSet()
if data.text == None:
- self.logger.error("Got null response to probe %s from %s" % \
- (data.get('name'), client.hostname))
- try:
- self.probedata[client.hostname].update({data.get('name'):
+ self.logger.info("Got null response to probe %s from %s" %
+ (data.get('name'), client.hostname))
+ self.probedata[client.hostname].update({data.get('name'):
ProbeData('')})
- except KeyError:
- self.probedata[client.hostname] = \
- ClientProbeDataSet([(data.get('name'), ProbeData(''))])
return
dlines = data.text.split('\n')
- self.logger.debug("%s:probe:%s:%s" % (client.hostname,
- data.get('name'), [line.strip() for line in dlines]))
+ self.logger.debug("%s:probe:%s:%s" %
+ (client.hostname, data.get('name'),
+ [line.strip() for line in dlines]))
for line in dlines[:]:
if line.split(':')[0] == 'group':
newgroup = line.split(':')[1].strip()
@@ -272,11 +295,7 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
self.cgroups[client.hostname].append(newgroup)
dlines.remove(line)
dobj = ProbeData("\n".join(dlines))
- try:
- self.probedata[client.hostname].update({data.get('name'): dobj})
- except KeyError:
- self.probedata[client.hostname] = \
- ClientProbeDataSet([(data.get('name'), dobj)])
+ self.probedata[client.hostname].update({data.get('name'): dobj})
def get_additional_groups(self, meta):
return self.cgroups.get(meta.hostname, list())
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index 680881858..78019933a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -5,26 +5,53 @@ import copy
import logging
import lxml.etree
import Bcfg2.Server.Plugin
+try:
+ from Bcfg2.Encryption import ssl_decrypt, EVPError
+ have_crypto = True
+except ImportError:
+ have_crypto = False
+
+logger = logging.getLogger(__name__)
+
+SETUP = None
+
+def passphrases():
+ section = "encryption"
+ if SETUP.cfp.has_section(section):
+ return dict([(o, SETUP.cfp.get(section, o))
+ for o in SETUP.cfp.options(section)])
+ else:
+ return dict()
-logger = logging.getLogger('Bcfg2.Plugins.Properties')
class PropertyFile(Bcfg2.Server.Plugin.StructFile):
"""Class for properties files."""
def write(self):
""" Write the data in this data structure back to the property
file """
- if self.validate_data():
- try:
- open(self.name,
- "wb").write(lxml.etree.tostring(self.xdata,
- pretty_print=True))
- return True
- except IOError:
- err = sys.exc_info()[1]
- logger.error("Failed to write %s: %s" % (self.name, err))
- return False
- else:
- return False
+ if not SETUP.cfp.getboolean("properties", "writes_enabled",
+ default=True):
+ msg = "Properties files write-back is disabled in the configuration"
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ try:
+ self.validate_data()
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ msg = "Cannot write %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ try:
+ open(self.name,
+ "wb").write(lxml.etree.tostring(self.xdata,
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
+ return True
+ except IOError:
+ err = sys.exc_info()[1]
+ msg = "Failed to write %s: %s" % (self.name, err)
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
def validate_data(self):
""" ensure that the data in this object validates against the
@@ -34,19 +61,51 @@ class PropertyFile(Bcfg2.Server.Plugin.StructFile):
try:
schema = lxml.etree.XMLSchema(file=schemafile)
except:
- logger.error("Failed to process schema for %s" % self.name)
- return False
+ err = sys.exc_info()[1]
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to process schema for %s: %s" % (self.name, err))
else:
# no schema exists
return True
if not schema.validate(self.xdata):
- logger.error("Data for %s fails to validate; run bcfg2-lint for "
- "more details" % self.name)
- return False
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Data for %s fails to validate; run bcfg2-lint for more details" % self.name)
else:
return True
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+ if self.xdata.get("encryption", "false").lower() != "false":
+ if not have_crypto:
+ msg = "Properties: M2Crypto is not available: %s" % self.name
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ for el in self.xdata.xpath("*[@encrypted]"):
+ try:
+ el.text = self._decrypt(el)
+ except EVPError:
+ msg = "Failed to decrypt %s element in %s" % (el.tag,
+ self.name)
+ logger.error(msg)
+ raise Bcfg2.Server.PluginExecutionError(msg)
+
+ def _decrypt(self, element):
+ if not element.text.strip():
+ return
+ passes = passphrases()
+ try:
+ passphrase = passes[element.get("encrypted")]
+ try:
+ return ssl_decrypt(element.text, passphrase)
+ except EVPError:
+ # error is raised below
+ pass
+ except KeyError:
+ for passwd in passes.values():
+ try:
+ return ssl_decrypt(element.text, passwd)
+ except EVPError:
+ pass
+ raise EVPError("Failed to decrypt")
class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
__child__ = PropertyFile
@@ -62,6 +121,7 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
name = 'Properties'
def __init__(self, core, datastore):
+ global SETUP
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
try:
@@ -72,5 +132,16 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
(e.strerror, e.filename))
raise Bcfg2.Server.Plugin.PluginInitError
- def get_additional_data(self, _):
- return copy.copy(self.store.entries)
+ SETUP = core.setup
+
+ def get_additional_data(self, metadata):
+ autowatch = self.core.setup.cfp.getboolean("properties", "automatch",
+ default=False)
+ rv = dict()
+ for fname, pfile in self.store.entries.items():
+ if (autowatch or
+ pfile.xdata.get("automatch", "false").lower() == "true"):
+ rv[fname] = pfile.XMLMatch(metadata)
+ else:
+ rv[fname] = copy.copy(pfile)
+ return rv
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
new file mode 100644
index 000000000..46182e9a2
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -0,0 +1,117 @@
+import os
+import Bcfg2.Server
+import Bcfg2.Server.Plugin
+from subprocess import Popen, PIPE
+
+try:
+ from syck import load as yaml_load, error as yaml_error
+except ImportError:
+ try:
+ from yaml import load as yaml_load, YAMLError as yaml_error
+ except ImportError:
+ raise ImportError("No yaml library could be found")
+
+class PuppetENCFile(Bcfg2.Server.Plugin.FileBacked):
+ def HandleEvent(self, event=None):
+ return
+
+
+class PuppetENC(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector,
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.DirectoryBacked):
+ """ A plugin to run Puppet external node classifiers
+ (http://docs.puppetlabs.com/guides/external_nodes.html) """
+ name = 'PuppetENC'
+ experimental = True
+ __child__ = PuppetENCFile
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
+ self.core.fam)
+ self.cache = dict()
+
+ def _run_encs(self, metadata):
+ cache = dict(groups=[], params=dict())
+ for enc in self.entries.keys():
+ epath = os.path.join(self.data, enc)
+ self.debug_log("PuppetENC: Running ENC %s for %s" %
+ (enc, metadata.hostname))
+ proc = Popen([epath, metadata.hostname], stdin=PIPE, stdout=PIPE,
+ stderr=PIPE)
+ (out, err) = proc.communicate()
+ rv = proc.wait()
+ if rv != 0:
+ msg = "PuppetENC: Error running ENC %s for %s (%s): %s" % \
+ (enc, metadata.hostname, rv)
+ self.logger.error("%s: %s" % (msg, err))
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ if err:
+ self.debug_log("ENC Error: %s" % err)
+
+ try:
+ yaml = yaml_load(out)
+ self.debug_log("Loaded data from %s for %s: %s" %
+ (enc, metadata.hostname, yaml))
+ except yaml_error:
+ err = sys.exc_info()[1]
+ msg = "Error decoding YAML from %s for %s: %s" % \
+ (enc, metadata.hostname, err)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ groups = []
+ if "classes" in yaml:
+ # stock Puppet ENC output format
+ groups = yaml['classes']
+ elif "groups" in yaml:
+ # more Bcfg2-ish output format
+ groups = yaml['groups']
+ if groups:
+ if isinstance(groups, list):
+ self.debug_log("ENC %s adding groups to %s: %s" %
+ (enc, metadata.hostname, groups))
+ cache['groups'].extend(groups)
+ else:
+ self.debug_log("ENC %s adding groups to %s: %s" %
+ (enc, metadata.hostname, groups.keys()))
+ for group, params in groups.items():
+ cache['groups'].append(group)
+ if params:
+ cache['params'].update(params)
+ if "parameters" in yaml and yaml['parameters']:
+ cache['params'].update(yaml['parameters'])
+ if "environment" in yaml:
+ self.logger.info("Ignoring unsupported environment section of "
+ "ENC %s for %s" % (enc, metadata.hostname))
+
+ self.cache[metadata.hostname] = cache
+
+ def get_additional_groups(self, metadata):
+ if metadata.hostname not in self.cache:
+ self._run_encs(metadata)
+ return self.cache[metadata.hostname]['groups']
+
+ def get_additional_data(self, metadata):
+ if metadata.hostname not in self.cache:
+ self._run_encs(metadata)
+ return self.cache[metadata.hostname]['params']
+
+ def end_client_run(self, metadata):
+ """ clear the entire cache at the end of each client run. this
+ guarantees that each client will run all ENCs at or near the
+ start of each run; we have to clear the entire cache instead
+ of just the cache for this client because a client that builds
+ templates that use metadata for other clients will populate
+ the cache for those clients, which we don't want. This makes
+ the caching less than stellar, but it does prevent multiple
+ runs of ENCs for a single host a) for groups and data
+ separately; and b) when a single client's metadata is
+ generated multiple times by separate templates """
+ self.cache = dict()
+
+ def end_statistics(self, metadata):
+ self.end_client_run(self, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/SEModules.py b/src/lib/Bcfg2/Server/Plugins/SEModules.py
new file mode 100644
index 000000000..62b3fb10a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/SEModules.py
@@ -0,0 +1,45 @@
+import os
+import logging
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import b64encode
+
+logger = logging.getLogger(__name__)
+
+class SEModuleData(Bcfg2.Server.Plugin.SpecificData):
+ def bind_entry(self, entry, _):
+ entry.set('encoding', 'base64')
+ entry.text = b64encode(self.data)
+
+
+class SEModules(Bcfg2.Server.Plugin.GroupSpool):
+ """ Handle SELinux 'module' entries """
+ name = 'SEModules'
+ __author__ = 'chris.a.st.pierre@gmail.com'
+ es_child_cls = SEModuleData
+ entry_type = 'SELinux'
+ experimental = True
+
+ def _get_module_name(self, entry):
+ """ GroupSpool stores entries as /foo.pp, but we want people
+ to be able to specify module entries as name='foo' or
+ name='foo.pp', so we put this abstraction in between """
+ if entry.get("name").endswith(".pp"):
+ name = entry.get("name")
+ else:
+ name = entry.get("name") + ".pp"
+ return "/" + name
+
+ def HandlesEntry(self, entry, metadata):
+ if entry.tag in self.Entries and entry.get('type') == 'module':
+ return self._get_module_name(entry) in self.Entries[entry.tag]
+ return Bcfg2.Server.Plugin.GroupSpool.HandlesEntry(self, entry,
+ metadata)
+
+ def HandleEntry(self, entry, metadata):
+ entry.set("name", self._get_module_name(entry))
+ return self.Entries[entry.tag][entry.get("name")](entry, metadata)
+
+ def add_entry(self, event):
+ self.filename_pattern = \
+ os.path.basename(os.path.dirname(self.event_path(event)))
+ Bcfg2.Server.Plugin.GroupSpool.add_entry(self, event)
diff --git a/src/lib/Bcfg2/Server/Plugins/SGenshi.py b/src/lib/Bcfg2/Server/Plugins/SGenshi.py
deleted file mode 100644
index 0ba08125e..000000000
--- a/src/lib/Bcfg2/Server/Plugins/SGenshi.py
+++ /dev/null
@@ -1,97 +0,0 @@
-'''This module implements a templating generator based on Genshi'''
-
-import genshi.input
-import genshi.template
-import lxml.etree
-import logging
-import copy
-import sys
-import os.path
-
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Plugins.TGenshi
-
-logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
-
-
-class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
- Bcfg2.Server.Plugin.StructFile):
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
- specific, encoding)
- Bcfg2.Server.Plugin.StructFile.__init__(self, name)
-
- def get_xml_value(self, metadata):
- if not hasattr(self, 'template'):
- logger.error("No parsed template information for %s" % (self.name))
- raise Bcfg2.Server.Plugin.PluginExecutionError
- try:
- stream = self.template.generate(metadata=metadata).filter( \
- Bcfg2.Server.Plugins.TGenshi.removecomment)
- data = lxml.etree.XML(stream.render('xml', strip_whitespace=False))
- bundlename = os.path.splitext(os.path.basename(self.name))[0]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- for item in self.Match(metadata, data):
- bundle.append(copy.deepcopy(item))
- return bundle
- except LookupError:
- lerror = sys.exc_info()[1]
- logger.error('Genshi lookup error: %s' % lerror)
- except genshi.template.TemplateError:
- terror = sys.exc_info()[1]
- logger.error('Genshi template error: %s' % terror)
- raise
- except genshi.input.ParseError:
- perror = sys.exc_info()[1]
- logger.error('Genshi parse error: %s' % perror)
- raise
-
- def Match(self, metadata, xdata):
- """Return matching fragments of parsed template."""
- rv = []
- for child in xdata.getchildren():
- rv.extend(self._match(child, metadata))
- logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
- return rv
-
-class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
-
- def __init__(self, path, fam, encoding):
- fpattern = '\S+\.xml'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- SGenshiTemplateFile, encoding)
- fam.AddMonitor(path, self)
-
- def HandleEvent(self, event):
- '''passthrough event handler for old calling convention'''
- if event.filename != self.path:
- return self.handle_event(event)
-
- def BuildStructures(self, metadata):
- """Build SGenshi structures."""
- ret = []
- for entry in self.get_matching(metadata):
- try:
- ret.append(entry.get_xml_value(metadata))
- except:
- logger.error("SGenshi: Failed to template file %s" % entry.name)
- return ret
-
-
-class SGenshi(SGenshiEntrySet,
- Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure):
- """The SGenshi plugin provides templated structures."""
- name = 'SGenshi'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- try:
- SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding)
- except:
- logger.error("Failed to load %s repository; disabling %s" \
- % (self.name, self.name))
- raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index a1a29727f..cbe8d0d9b 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -1,20 +1,16 @@
"""This module manages ssh key files for bcfg2"""
-import binascii
import re
import os
+import sys
import socket
import shutil
-import sys
+import logging
import tempfile
from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import u_str
+from Bcfg2.Bcfg2Py3k import u_str, reduce, b64encode
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
-import logging
logger = logging.getLogger(__name__)
class KeyData(Bcfg2.Server.Plugin.SpecificData):
@@ -31,7 +27,7 @@ class KeyData(Bcfg2.Server.Plugin.SpecificData):
def bind_entry(self, entry, metadata):
entry.set('type', 'file')
if entry.get('encoding') == 'base64':
- entry.text = binascii.b2a_base64(self.data)
+ entry.text = b64encode(self.data)
else:
try:
entry.text = u_str(self.data, self.encoding)
diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
index 0072dc62d..9d1c51a08 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSLCA.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
@@ -3,12 +3,15 @@ import Bcfg2.Options
import lxml.etree
import posixpath
import tempfile
-import pipes
import os
from subprocess import Popen, PIPE, STDOUT
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
"""
@@ -22,6 +25,10 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
cert_specs = {}
CAs = {}
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.GroupSpool.__init__(self, core, datastore)
+ self.infoxml = dict()
+
def HandleEvent(self, event=None):
"""
Updates which files this plugin handles based upon filesystem events.
@@ -37,19 +44,21 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
else:
ident = self.handles[event.requestID][:-1]
- fname = "".join([ident, '/', event.filename])
+ fname = os.path.join(ident, event.filename)
if event.filename.endswith('.xml'):
if action in ['exists', 'created', 'changed']:
if event.filename.endswith('key.xml'):
- key_spec = dict(list(lxml.etree.parse(epath).find('Key').items()))
+ key_spec = dict(list(lxml.etree.parse(epath,
+ parser=Bcfg2.Server.XMLParser).find('Key').items()))
self.key_specs[ident] = {
'bits': key_spec.get('bits', 2048),
'type': key_spec.get('type', 'rsa')
}
self.Entries['Path'][ident] = self.get_key
elif event.filename.endswith('cert.xml'):
- cert_spec = dict(list(lxml.etree.parse(epath).find('Cert').items()))
+ cert_spec = dict(list(lxml.etree.parse(epath,
+ parser=Bcfg2.Server.XMLParser).find('Cert').items()))
ca = cert_spec.get('ca', 'default')
self.cert_specs[ident] = {
'ca': ca,
@@ -67,6 +76,9 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
cp.read(self.core.cfile)
self.CAs[ca] = dict(cp.items('sslca_' + ca))
self.Entries['Path'][ident] = self.get_cert
+ elif event.filename.endswith("info.xml"):
+ self.infoxml[ident] = Bcfg2.Server.Plugin.InfoXML(epath)
+ self.infoxml[ident].HandleEvent(event)
if action == 'deleted':
if ident in self.Entries['Path']:
del self.Entries['Path'][ident]
@@ -90,28 +102,27 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
either grabs a prexisting key hostfile, or triggers the generation
of a new key if one doesn't exist.
"""
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
# check if we already have a hostfile, or need to generate a new key
# TODO: verify key fits the specs
path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path),
+ metadata.hostname))
if filename not in list(self.entries.keys()):
key = self.build_key(filename, entry, metadata)
open(self.data + filename, 'w').write(key)
entry.text = key
- self.entries[filename] = self.__child__("%s%s" % (self.data,
- filename))
+ self.entries[filename] = self.__child__(self.data + filename)
self.entries[filename].HandleEvent()
else:
entry.text = self.entries[filename].data
+ entry.set("type", "file")
+ if path in self.infoxml:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata,
+ infoxml=self.infoxml[path])
+ else:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata)
+
def build_key(self, filename, entry, metadata):
"""
generates a new key according the the specification
@@ -130,56 +141,61 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
either grabs a prexisting cert hostfile, or triggers the generation
of a new cert if one doesn't exist.
"""
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path),
+ metadata.hostname))
# first - ensure we have a key to work with
key = self.cert_specs[entry.get('name')].get('key')
- key_filename = "".join([key, '/', key.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ key_filename = os.path.join(key, "%s.H_%s" % (os.path.basename(key),
+ metadata.hostname))
if key_filename not in self.entries:
e = lxml.etree.Element('Path')
- e.attrib['name'] = key
+ e.set('name', key)
self.core.Bind(e, metadata)
# check if we have a valid hostfile
- if filename in list(self.entries.keys()) and self.verify_cert(filename,
- key_filename,
- entry):
+ if (filename in list(self.entries.keys()) and
+ self.verify_cert(filename, key_filename, entry)):
entry.text = self.entries[filename].data
else:
cert = self.build_cert(key_filename, entry, metadata)
open(self.data + filename, 'w').write(cert)
- self.entries[filename] = self.__child__("%s%s" % (self.data,
- filename))
+ self.entries[filename] = self.__child__(self.data + filename)
self.entries[filename].HandleEvent()
entry.text = cert
+ entry.set("type", "file")
+ if path in self.infoxml:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata,
+ infoxml=self.infoxml[path])
+ else:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata)
+
def verify_cert(self, filename, key_filename, entry):
- if self.verify_cert_against_ca(filename, entry):
- if self.verify_cert_against_key(filename, key_filename):
- return True
- return False
+ do_verify = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('verify_certs', True)
+ if do_verify:
+ return (self.verify_cert_against_ca(filename, entry) and
+ self.verify_cert_against_key(filename, key_filename))
+ return True
def verify_cert_against_ca(self, filename, entry):
"""
check that a certificate validates against the ca cert,
and that it has not expired.
"""
- chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
+ chaincert = \
+ self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
cert = self.data + filename
- res = Popen(["openssl", "verify", "-CAfile", chaincert, cert],
+ res = Popen(["openssl", "verify", "-untrusted", chaincert, "-purpose",
+ "sslserver", cert],
stdout=PIPE, stderr=STDOUT).stdout.read()
if res == cert + ": OK\n":
+ self.debug_log("SSLCA: %s verified successfully against CA" %
+ entry.get("name"))
return True
+ self.logger.warning("SSLCA: %s failed verification against CA: %s" %
+ (entry.get("name"), res))
return False
def verify_cert_against_key(self, filename, key_filename):
@@ -188,14 +204,20 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
"""
cert = self.data + filename
key = self.data + key_filename
- cmd = ("openssl x509 -noout -modulus -in %s | openssl md5" %
- pipes.quote(cert))
- cert_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
- cmd = ("openssl rsa -noout -modulus -in %s | openssl md5" %
- pipes.quote(key))
- key_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
+ cert_md5 = \
+ md5(Popen(["openssl", "x509", "-noout", "-modulus", "-in", cert],
+ stdout=PIPE,
+ stderr=STDOUT).stdout.read().strip()).hexdigest()
+ key_md5 = \
+ md5(Popen(["openssl", "rsa", "-noout", "-modulus", "-in", key],
+ stdout=PIPE,
+ stderr=STDOUT).stdout.read().strip()).hexdigest()
if cert_md5 == key_md5:
+ self.debug_log("SSLCA: %s verified successfully against key %s" %
+ (filename, key_filename))
return True
+ self.logger.warning("SSLCA: %s failed verification against key %s" %
+ (filename, key_filename))
return False
def build_cert(self, key_filename, entry, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
new file mode 100644
index 000000000..aad92b7c7
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
@@ -0,0 +1,32 @@
+import Bcfg2.Server.Plugin
+
+class ServiceCompat(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.StructureValidator):
+ """ Use old-style service modes for older clients """
+ name = 'ServiceCompat'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ mode_map = {('true', 'true'): 'default',
+ ('interactive', 'true'): 'interactive_only',
+ ('false', 'false'): 'manual'}
+
+ def validate_structures(self, metadata, structures):
+ """ Apply defaults """
+ if metadata.version_info and metadata.version_info > (1, 3, 0, '', 0):
+ # do not care about a client that is _any_ 1.3.0 release
+ # (including prereleases and RCs)
+ return
+
+ for struct in structures:
+ for entry in struct.xpath("//BoundService|//Service"):
+ mode_key = (entry.get("restart", "true").lower(),
+ entry.get("install", "true").lower())
+ try:
+ mode = self.mode_map[mode_key]
+ except KeyError:
+ self.logger.info("Could not map restart and install "
+ "settings of %s:%s to an old-style "
+ "Service mode for %s; using 'manual'" %
+ (entry.tag, entry.get("name"),
+ metadata.hostname))
+ mode = "manual"
+ entry.set("mode", mode)
diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
index aeb3b9f74..e62638b4f 100644
--- a/src/lib/Bcfg2/Server/Plugins/Snapshots.py
+++ b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
@@ -1,9 +1,5 @@
-#import lxml.etree
import logging
-import binascii
import difflib
-#import sqlalchemy
-#import sqlalchemy.orm
import Bcfg2.Server.Plugin
import Bcfg2.Server.Snapshots
import Bcfg2.Logger
@@ -13,7 +9,7 @@ import time
import threading
# Compatibility import
-from Bcfg2.Bcfg2Py3k import Queue
+from Bcfg2.Bcfg2Py3k import Queue, u_str, b64decode
logger = logging.getLogger('Snapshots')
@@ -28,14 +24,6 @@ datafields = {
}
-# py3k compatibility
-def u_str(string):
- if sys.hexversion >= 0x03000000:
- return string
- else:
- return unicode(string)
-
-
def build_snap_ent(entry):
basefields = []
if entry.tag in ['Package', 'Service']:
@@ -52,13 +40,12 @@ def build_snap_ent(entry):
if entry.get('encoding', 'ascii') == 'ascii':
desired['contents'] = u_str(entry.text)
else:
- desired['contents'] = u_str(binascii.a2b_base64(entry.text))
+ desired['contents'] = u_str(b64decode(entry.text))
if 'current_bfile' in entry.attrib:
- state['contents'] = u_str(binascii.a2b_base64( \
- entry.get('current_bfile')))
+ state['contents'] = u_str(b64decode(entry.get('current_bfile')))
elif 'current_bdiff' in entry.attrib:
- diff = binascii.a2b_base64(entry.get('current_bdiff'))
+ diff = b64decode(entry.get('current_bdiff'))
state['contents'] = u_str( \
'\n'.join(difflib.restore(diff.split('\n'), 1)))
@@ -69,14 +56,12 @@ def build_snap_ent(entry):
return [desired, state]
-class Snapshots(Bcfg2.Server.Plugin.Statistics,
- Bcfg2.Server.Plugin.Plugin):
+class Snapshots(Bcfg2.Server.Plugin.Statistics):
name = 'Snapshots'
experimental = True
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
+ Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
self.work_queue = Queue()
self.loader = threading.Thread(target=self.load_snapshot)
diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py
index 265ef95a8..984efb76c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Statistics.py
+++ b/src/lib/Bcfg2/Server/Plugins/Statistics.py
@@ -1,15 +1,14 @@
'''This file manages the statistics collected by the BCFG2 Server'''
-import binascii
import copy
import difflib
import logging
-from lxml.etree import XML, SubElement, Element, XMLSyntaxError
import lxml.etree
import os
+import sys
from time import asctime, localtime, time, strptime, mktime
import threading
-
+from Bcfg2.Bcfg2Py3k import b64decode
import Bcfg2.Server.Plugin
@@ -19,7 +18,7 @@ class StatisticsStore(object):
def __init__(self, filename):
self.filename = filename
- self.element = Element('Dummy')
+ self.element = lxml.etree.Element('Dummy')
self.dirty = 0
self.lastwrite = 0
self.logger = logging.getLogger('Bcfg2.Server.Statistics')
@@ -35,7 +34,8 @@ class StatisticsStore(object):
ioerr = sys.exc_info()[1]
self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
else:
- fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True))
+ fout.write(lxml.etree.tostring(self.element,
+ xml_declaration=False).decode('UTF-8'))
fout.close()
os.rename(self.filename + '.new', self.filename)
self.dirty = 0
@@ -47,11 +47,11 @@ class StatisticsStore(object):
fin = open(self.filename, 'r')
data = fin.read()
fin.close()
- self.element = XML(data)
+ self.element = lxml.etree.XML(data)
self.dirty = 0
- except (IOError, XMLSyntaxError):
+ except (IOError, lxml.etree.XMLSyntaxError):
self.logger.error("Creating new statistics file %s"%(self.filename))
- self.element = Element('ConfigStatistics')
+ self.element = lxml.etree.Element('ConfigStatistics')
self.WriteBack()
self.dirty = 0
@@ -77,7 +77,7 @@ class StatisticsStore(object):
nummatch = len(nodes)
if nummatch == 0:
# Create an entry for this node
- node = SubElement(self.element, 'Node', name=client)
+ node = lxml.etree.SubElement(self.element, 'Node', name=client)
elif nummatch == 1 and not node_dirty:
# Delete old instance
node = nodes[0]
@@ -112,13 +112,11 @@ class StatisticsStore(object):
return (now-utime) > secondsPerDay
-class Statistics(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
+class Statistics(Bcfg2.Server.Plugin.ThreadedStatistics,
Bcfg2.Server.Plugin.PullSource):
name = 'Statistics'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
Bcfg2.Server.Plugin.PullSource.__init__(self)
fpath = "%s/etc/statistics.xml" % datastore
@@ -151,9 +149,9 @@ class Statistics(Bcfg2.Server.Plugin.Plugin,
if cfentry.get('sensitive') in ['true', 'True']:
raise Bcfg2.Server.Plugin.PluginExecutionError
elif 'current_bfile' in cfentry.attrib:
- contents = binascii.a2b_base64(cfentry.get('current_bfile'))
+ contents = b64decode(cfentry.get('current_bfile'))
elif 'current_bdiff' in cfentry.attrib:
- diff = binascii.a2b_base64(cfentry.get('current_bdiff'))
+ diff = b64decode(cfentry.get('current_bdiff'))
contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
else:
contents = None
diff --git a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
deleted file mode 100644
index f4232ad5c..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This generator provides service mappings."""
-
-import Bcfg2.Server.Plugin
-
-
-class Svcmgr(Bcfg2.Server.Plugin.PrioDir):
- """This is a generator that handles service assignments."""
- name = 'Svcmgr'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
index 8879fdef1..2bf475363 100644
--- a/src/lib/Bcfg2/Server/Plugins/TCheetah.py
+++ b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
@@ -1,13 +1,11 @@
'''This module implements a templating generator based on Cheetah'''
-import binascii
import logging
import sys
import traceback
import Bcfg2.Server.Plugin
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- unicode = str
+
+from Bcfg2.Bcfg2Py3k import unicode, b64encode
logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
@@ -60,7 +58,7 @@ class TemplateFile:
else:
if entry.get('encoding') == 'base64':
# take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(self.template)
+ entry.text = b64encode(self.template)
else:
entry.text = unicode(str(self.template), self.encoding)
except:
@@ -78,3 +76,4 @@ class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
__author__ = 'bcfg-dev@mcs.anl.gov'
filename_pattern = 'template'
es_child_cls = TemplateFile
+ deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
index c4dd40614..c7335a0c0 100644
--- a/src/lib/Bcfg2/Server/Plugins/TGenshi.py
+++ b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
@@ -1,12 +1,10 @@
"""This module implements a templating generator based on Genshi."""
-import binascii
import logging
import sys
import Bcfg2.Server.Plugin
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- unicode = str
+
+from Bcfg2.Bcfg2Py3k import unicode, b64encode
logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
@@ -18,7 +16,7 @@ try:
TextTemplate, MarkupTemplate, TemplateError
except ImportError:
logger.error("TGenshi: Failed to import Genshi. Is it installed?")
- raise Bcfg2.Server.Plugin.PluginInitError
+ raise
try:
from genshi.template import NewTextTemplate
have_ntt = True
@@ -33,7 +31,7 @@ def removecomment(stream):
yield kind, data, pos
-class TemplateFile:
+class TemplateFile(object):
"""Template file creates Genshi template structures for the loaded file."""
def __init__(self, name, specific, encoding):
@@ -99,7 +97,7 @@ class TemplateFile:
else:
if entry.get('encoding') == 'base64':
# take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(textdata)
+ entry.text = b64encode(textdata)
else:
entry.text = unicode(textdata, self.encoding)
else:
@@ -123,6 +121,10 @@ class TemplateFile:
raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template loading error: %s' % err)
+class TemplateEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ basename_is_regex = True
+
+
class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
"""
The TGenshi generator implements a templating
@@ -132,4 +134,6 @@ class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
name = 'TGenshi'
__author__ = 'jeff@ocjtech.us'
filename_pattern = 'template\.(txt|newtxt|xml)'
+ es_cls = TemplateEntrySet
es_child_cls = TemplateFile
+ deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index 2c0ee03e0..6d92bb530 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -1,26 +1,23 @@
import re
import imp
import sys
+import glob
import logging
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
logger = logging.getLogger(__name__)
-class HelperModule(Bcfg2.Server.Plugin.SpecificData):
- _module_name_re = re.compile(r'([^/]+?)\.py')
-
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
- encoding)
- match = self._module_name_re.search(self.name)
- if match:
- self._module_name = match.group(1)
- else:
- self._module_name = name
+module_pattern = r'(?P<filename>(?P<module>[^\/]+)\.py)$'
+module_re = re.compile(module_pattern)
+
+class HelperModule(Bcfg2.Server.Plugin.FileBacked):
+ def __init__(self, name, fam=None):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam)
+ self._module_name = module_re.search(self.name).group('module')
self._attrs = []
- def handle_event(self, event):
- Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
+ def Index(self):
try:
module = imp.load_source(self._module_name, self.name)
except:
@@ -34,32 +31,29 @@ class HelperModule(Bcfg2.Server.Plugin.SpecificData):
self.name)
return
+ newattrs = []
for sym in module.__export__:
if sym not in self._attrs and hasattr(self, sym):
logger.warning("TemplateHelper: %s: %s is a reserved keyword, "
"skipping export" % (self.name, sym))
- setattr(self, sym, getattr(module, sym))
+ continue
+ try:
+ setattr(self, sym, getattr(module, sym))
+ newattrs.append(sym)
+ except AttributeError:
+ logger.warning("TemplateHelper: %s: %s exports %s, but has no "
+ "such attribute" % (self.name, sym))
# remove old exports
- for sym in set(self._attrs) - set(module.__export__):
+ for sym in set(self._attrs) - set(newattrs):
delattr(self, sym)
- self._attrs = module.__export__
+ self._attrs = newattrs
-class HelperSet(Bcfg2.Server.Plugin.EntrySet):
+class HelperSet(Bcfg2.Server.Plugin.DirectoryBacked):
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\.py[co])$")
-
- def __init__(self, path, fam, encoding, plugin_name):
- fpattern = '[0-9A-Za-z_\-]+\.py'
- self.plugin_name = plugin_name
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- HelperModule, encoding)
- fam.AddMonitor(path, self)
-
- def HandleEvent(self, event):
- if (event.filename != self.path and
- not self.ignore.match(event.filename)):
- return self.handle_event(event)
+ patterns = module_re
+ __child__ = HelperModule
class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
@@ -71,13 +65,69 @@ class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.helpers = HelperSet(self.data, core.fam)
+
+ def get_additional_data(self, _):
+ return dict([(h._module_name, h)
+ for h in self.helpers.entries.values()])
+
+
+class TemplateHelperLint(Bcfg2.Server.Lint.ServerlessPlugin):
+ """ find duplicate Pkgmgr entries with the same priority """
+ def __init__(self, *args, **kwargs):
+ Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
+ hm = HelperModule("foo.py")
+ self.reserved_keywords = dir(hm)
+
+ def Run(self):
+ for fname in os.listdir(os.path.join(self.config['repo'],
+ "TemplateHelper")):
+ helper = os.path.join(self.config['repo'], "TemplateHelper",
+ fname)
+ if not module_re.search(helper) or not self.HandlesFile(helper):
+ continue
+ self.check_helper(helper)
+
+ def check_helper(self, helper):
+ module_name = module_re.search(helper).group(1)
try:
- self.helpers = HelperSet(self.data, core.fam, core.encoding,
- self.name)
+ module = imp.load_source(module_name, helper)
except:
- raise Bcfg2.Server.Plugin.PluginInitError
+ err = sys.exc_info()[1]
+ self.LintError("templatehelper-import-error",
+ "Failed to import %s: %s" %
+ (helper, err))
+ return
- def get_additional_data(self, metadata):
- return dict([(h._module_name, h)
- for h in list(self.helpers.entries.values())])
+ if not hasattr(module, "__export__"):
+ self.LintError("templatehelper-no-export",
+ "%s has no __export__ list" % helper)
+ return
+ elif not isinstance(module.__export__, list):
+ self.LintError("templatehelper-nonlist-export",
+ "__export__ is not a list in %s" % helper)
+ return
+
+ for sym in module.__export__:
+ if not hasattr(module, sym):
+ self.LintError("templatehelper-nonexistent-export",
+ "%s: exported symbol %s does not exist" %
+ (helper, sym))
+ elif sym in self.reserved_keywords:
+ self.LintError("templatehelper-reserved-export",
+ "%s: exported symbol %s is reserved" %
+ (helper, sym))
+ elif sym.startswith("_"):
+ self.LintError("templatehelper-underscore-export",
+ "%s: exported symbol %s starts with underscore" %
+ (helper, sym))
+
+ @classmethod
+ def Errors(cls):
+ return {"templatehelper-import-error":"error",
+ "templatehelper-no-export":"error",
+ "templatehelper-nonlist-export":"error",
+ "templatehelper-nonexistent-export":"error",
+ "templatehelper-reserved-export":"error",
+ "templatehelper-underscore-export":"warning"}
diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py
index b0d21545c..313a1bf03 100644
--- a/src/lib/Bcfg2/Server/Plugins/Trigger.py
+++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py
@@ -1,43 +1,52 @@
import os
+import pipes
import Bcfg2.Server.Plugin
+from subprocess import Popen, PIPE
+class TriggerFile(Bcfg2.Server.Plugin.FileBacked):
+ def HandleEvent(self, event=None):
+ return
-def async_run(prog, args):
- pid = os.fork()
- if pid:
- os.waitpid(pid, 0)
- else:
- dpid = os.fork()
- if not dpid:
- os.system(" ".join([prog] + args))
- os._exit(0)
+ def __str__(self):
+ return "%s: %s" % (self.__class__.__name__, self.name)
class Trigger(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Statistics):
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.DirectoryBacked):
"""Trigger is a plugin that calls external scripts (on the server)."""
name = 'Trigger'
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- try:
- os.stat(self.data)
- except:
- self.logger.error("Trigger: spool directory %s does not exist; "
- "unloading" % self.data)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def process_statistics(self, metadata, _):
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
+ self.core.fam)
+
+ def async_run(self, args):
+ pid = os.fork()
+ if pid:
+ os.waitpid(pid, 0)
+ else:
+ dpid = os.fork()
+ if not dpid:
+ self.debug_log("Running %s" % " ".join(pipes.quote(a)
+ for a in args))
+ proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ (out, err) = proc.communicate()
+ rv = proc.wait()
+ if rv != 0:
+ self.logger.error("Trigger: Error running %s (%s): %s" %
+ (args[0], rv, err))
+ elif err:
+ self.debug_log("Trigger: Error: %s" % err)
+ os._exit(0)
+
+
+ def end_client_run(self, metadata):
args = [metadata.hostname, '-p', metadata.profile, '-g',
':'.join([g for g in metadata.groups])]
- for notifier in os.listdir(self.data):
- if ((notifier[-1] == '~') or
- (notifier[:2] == '.#') or
- (notifier[-4:] == '.swp') or
- (notifier in ['SCCS', '.svn', '4913'])):
- continue
- npath = self.data + '/' + notifier
- self.logger.debug("Running %s %s" % (npath, " ".join(args)))
- async_run(npath, args)
+ for notifier in self.entries.keys():
+ npath = os.path.join(self.data, notifier)
+ self.async_run([npath] + args)
diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py
index f9f1b4e52..b33eeba28 100644
--- a/src/lib/Bcfg2/Server/Plugins/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/__init__.py
@@ -25,10 +25,8 @@ __all__ = [
'SSHbase',
'Snapshots',
'Statistics',
- 'Svcmgr',
'Svn',
'TCheetah',
'Trigger',
- 'SGenshi',
'TGenshi',
]