summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Server/Plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Server/Plugins')
-rw-r--r--src/lib/Bcfg2/Server/Plugins/AWSTags.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py39
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py1
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Defaults.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupLogic.py13
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py413
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py24
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py7
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py22
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py145
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py120
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py152
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py47
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py54
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py34
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py41
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Reporting.py15
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Rules.py27
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py26
26 files changed, 881 insertions, 413 deletions
diff --git a/src/lib/Bcfg2/Server/Plugins/AWSTags.py b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
index 0d6eefaaa..556805bde 100644
--- a/src/lib/Bcfg2/Server/Plugins/AWSTags.py
+++ b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
@@ -172,6 +172,11 @@ class AWSTags(Bcfg2.Server.Plugin.Plugin,
def start_client_run(self, metadata):
self.expire_cache(key=metadata.hostname)
+ if self.core.metadata_cache_mode == 'aggressive':
+ self.logger.warning("AWSTags is incompatible with aggressive "
+ "client metadata caching, try 'cautious' "
+ "or 'initial'")
+ self.core.metadata_cache.expire(metadata.hostname)
def get_additional_data(self, metadata):
return self.get_tags(metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index e38eeea89..4f5a79465 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -92,6 +92,10 @@ class Bundler(Plugin,
self.logger.error("Bundler: Failed to render templated bundle "
"%s: %s" % (bundlename, err))
continue
+ except:
+ self.logger.error("Bundler: Unexpected bundler error for %s" %
+ bundlename, exc_info=1)
+ continue
if data.get("independent", "false").lower() == "true":
data.tag = "Independent"
@@ -124,12 +128,12 @@ class Bundler(Plugin,
# dependent bundle -- add it to the list of
# bundles for this client
if child.get("name") not in bundles_added:
- bundles.append(child.get("name"))
+ bundles.add(child.get("name"))
bundles_added.add(child.get("name"))
if child.get('inherit_modification', 'false') == 'true':
if metadata.version_info >= \
Bcfg2VersionInfo('1.4.0pre2'):
- lxml.etree.SubElement(data, 'Bundle',
+ lxml.etree.SubElement(data, 'BoundBundle',
name=child.get('name'))
else:
self.logger.warning(
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
index cff9ff61e..71aec7658 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
@@ -12,14 +12,14 @@ from Bcfg2.Server.Plugins.Cfg import CfgGenerator
try:
from jinja2 import Environment, FileSystemLoader
HAS_JINJA2 = True
-except ImportError:
- HAS_JINJA2 = False
+ class RelEnvironment(Environment):
+ """Override join_path() to enable relative template paths."""
+ def join_path(self, template, parent):
+ return os.path.join(os.path.dirname(parent), template)
-class RelEnvironment(Environment):
- """Override join_path() to enable relative template paths."""
- def join_path(self, template, parent):
- return os.path.join(os.path.dirname(parent), template)
+except ImportError:
+ HAS_JINJA2 = False
class DefaultJinja2DataProvider(DefaultTemplateDataProvider):
@@ -42,15 +42,16 @@ class CfgJinja2Generator(CfgGenerator):
#: Handle .jinja2 files
__extensions__ = ['jinja2']
- #: ``__loader_cls__`` is the class that will be instantiated to
- #: load the template files. It must implement one public function,
- #: ``load()``, as :class:`genshi.template.TemplateLoader`.
- __loader_cls__ = FileSystemLoader
+ if HAS_JINJA2:
+ #: ``__loader_cls__`` is the class that will be instantiated to
+ #: load the template files. It must implement one public function,
+ #: ``load()``, as :class:`genshi.template.TemplateLoader`.
+ __loader_cls__ = FileSystemLoader
- #: ``__environment_cls__`` is the class that will be instantiated to
- #: store the jinja2 environment. It must implement one public function,
- #: ``get_template()``, as :class:`jinja2.Environment`.
- __environment_cls__ = RelEnvironment
+ #: ``__environment_cls__`` is the class that will be instantiated to
+ #: store the jinja2 environment. It must implement one public
+ #: function, ``get_template()``, as :class:`jinja2.Environment`.
+ __environment_cls__ = RelEnvironment
#: Ignore ``.jinja2_include`` files so they can be used with the
#: Jinja2 ``{% include ... %}`` directive without raising warnings.
@@ -68,7 +69,15 @@ class CfgJinja2Generator(CfgGenerator):
encoding = Bcfg2.Options.setup.encoding
self.loader = self.__loader_cls__('/',
encoding=encoding)
- self.environment = self.__environment_cls__(loader=self.loader)
+ try:
+ # keep_trailing_newline is new in Jinja2 2.7, and will
+ # fail with earlier versions
+ self.environment = \
+ self.__environment_cls__(loader=self.loader,
+ keep_trailing_newline=True)
+ except TypeError:
+ self.environment = \
+ self.__environment_cls__(loader=self.loader)
__init__.__doc__ = CfgGenerator.__init__.__doc__
def get_data(self, entry, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
index a158302be..241bce34c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
@@ -19,8 +19,8 @@ class CfgSSLCAKeyCreator(XMLCfgCreator):
self.logger.info("Cfg: Generating new SSL key for %s" % self.name)
spec = self.XMLMatch(metadata)
key = spec.find("Key")
- if not key:
- key = dict()
+ if key is None:
+ key = {}
ktype = key.get('type', 'rsa')
bits = key.get('bits', '2048')
if ktype == 'rsa':
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index 355e53588..dae03144a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -878,6 +878,7 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
options = Bcfg2.Server.Plugin.GroupSpool.options + [
Bcfg2.Options.BooleanOption(
'--cfg-validation', cf=('cfg', 'validation'), default=True,
+ dest="cfg_validation",
help='Run validation on Cfg files'),
Bcfg2.Options.Option(
cf=('cfg', 'category'), dest="cfg_category",
diff --git a/src/lib/Bcfg2/Server/Plugins/Defaults.py b/src/lib/Bcfg2/Server/Plugins/Defaults.py
index 79e2ca0e2..2242e3825 100644
--- a/src/lib/Bcfg2/Server/Plugins/Defaults.py
+++ b/src/lib/Bcfg2/Server/Plugins/Defaults.py
@@ -1,5 +1,6 @@
"""This generator provides rule-based entry mappings."""
+import Bcfg2.Options
import Bcfg2.Server.Plugin
import Bcfg2.Server.Plugins.Rules
@@ -9,7 +10,10 @@ class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
"""Set default attributes on bound entries"""
__author__ = 'bcfg-dev@mcs.anl.gov'
- options = Bcfg2.Server.Plugin.PrioDir.options
+ options = Bcfg2.Server.Plugin.PrioDir.options + [
+ Bcfg2.Options.BooleanOption(
+ cf=("defaults", "replace_name"), dest="defaults_replace_name",
+ help="Replace %{name} in attributes with name of target entry")]
# Rules is a Generator that happens to implement all of the
# functionality we want, so we overload it, but Defaults should
@@ -41,3 +45,9 @@ class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
def _regex_enabled(self):
""" Defaults depends on regex matching, so force it enabled """
return True
+
+ @property
+ def _replace_name_enabled(self):
+ """ Return True if the replace_name feature is enabled,
+ False otherwise """
+ return Bcfg2.Options.setup.defaults_replace_name
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
index b60f60e65..184b362f9 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
@@ -13,6 +13,17 @@ class GroupLogicConfig(Bcfg2.Server.Plugin.StructFile):
create = lxml.etree.Element("GroupLogic",
nsmap=dict(py="http://genshi.edgewall.org/"))
+ def __init__(self, filename, core):
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename,
+ should_monitor=True)
+ self.core = core
+
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+
def _match(self, item, metadata, *args):
if item.tag == 'Group' and not len(item.getchildren()):
return [item]
@@ -39,7 +50,7 @@ class GroupLogic(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
self.config = GroupLogicConfig(os.path.join(self.data, "groups.xml"),
- should_monitor=True)
+ core=core)
self._local = local()
def get_additional_groups(self, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index 553ddbc47..770419ba5 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -1,169 +1,264 @@
+""" A plugin to fetch data from a LDAP directory """
+
import imp
-import logging
+import os
import sys
import time
import traceback
-import Bcfg2.Server.Plugin
+from functools import partial
-logger = logging.getLogger('Bcfg2.Plugins.Ldap')
+import Bcfg2.Options
+import Bcfg2.Server.Cache
+import Bcfg2.Server.Plugin
+from Bcfg2.Logger import Debuggable
+from Bcfg2.Utils import ClassName, safe_module_name
try:
import ldap
+ HAS_LDAP = True
except ImportError:
- logger.error("Unable to load ldap module. Is python-ldap installed?")
- raise ImportError
+ HAS_LDAP = False
-# time in seconds between retries after failed LDAP connection
-RETRY_DELAY = 5
-# how many times to try reaching the LDAP server if a connection is broken
-# at the very minimum, one retry is needed to handle a restarted LDAP daemon
-RETRY_COUNT = 3
-SCOPE_MAP = {
- "base": ldap.SCOPE_BASE,
- "one": ldap.SCOPE_ONELEVEL,
- "sub": ldap.SCOPE_SUBTREE,
-}
+class ConfigFile(Bcfg2.Server.Plugin.FileBacked):
+ """ Config file for the Ldap plugin """
-LDAP_QUERIES = []
+ def __init__(self, name, core, plugin):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
+ self.core = core
+ self.plugin = plugin
+ self.queries = list()
+ self.fam.AddMonitor(name, self)
+ def Index(self):
+ """ Get the queries from the config file """
+ try:
+ module_name = os.path.splitext(os.path.basename(self.name))[0]
+ module = imp.load_source(safe_module_name('Ldap', module_name),
+ self.name)
+ except: # pylint: disable=W0702
+ err = sys.exc_info()[1]
+ self.logger.error("Ldap: Failed to import %s: %s" %
+ (self.name, err))
+ return
+
+ if not hasattr(module, "__queries__"):
+ self.logger.error("Ldap: %s has no __queries__ list" % self.name)
+ return
+
+ self.queries = list()
+ for query in module.__queries__:
+ try:
+ self.queries.append(getattr(module, query))
+ except AttributeError:
+ self.logger.warning(
+ "Ldap: %s exports %s, but has no such attribute" %
+ (self.name, query))
-def register_query(query):
- LDAP_QUERIES.append(query)
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+ self.plugin.expire_cache()
-class ConfigFile(Bcfg2.Server.Plugin.FileBacked):
- """
- Config file for the Ldap plugin
-
- The config file cannot be 'parsed' in the traditional sense as we would
- need some serious type checking ugliness to just get the LdapQuery
- subclasses. The alternative would be to have the user create a list with
- a predefined name that contains all queries.
- The approach implemented here is having the user call a registering
- decorator that updates a global variable in this module.
- """
- def __init__(self, filename):
- self.filename = filename
- Bcfg2.Server.Plugin.FileBacked.__init__(self, self.filename)
- self.fam.AddMonitor(self.filename, self)
-
- def Index(self):
- """
- Reregisters the queries in the config file
-
- The config will take care of actually registering the queries,
- so we just load it once and don't keep it.
- """
- global LDAP_QUERIES
- LDAP_QUERIES = []
- imp.load_source("ldap_cfg", self.filename)
+class Ldap(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.Connector):
+ """ The Ldap plugin allows adding data from an LDAP server
+ to your metadata. """
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['expire_cache']
-class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
- """
- The Ldap plugin allows adding data from an LDAP server to your metadata.
- """
- name = "Ldap"
experimental = True
- debug_flag = False
+
+ options = [
+ Bcfg2.Options.Option(
+ cf=('ldap', 'retries'), type=int, default=3,
+ dest='ldap_retries',
+ help='The number of times to retry reaching the '
+ 'LDAP server if a connection is broken'),
+ Bcfg2.Options.Option(
+ cf=('ldap', 'retry_delay'), type=float, default=5.0,
+ dest='ldap_retry_delay',
+ help='The time in seconds betreen retries'),
+ Bcfg2.Options.BooleanOption(
+ cf=('ldap', 'cache'), default=None, dest='ldap_cache',
+ help='Cache the results of the LDAP Queries until they '
+ 'are expired using the XML-RPC RMI')]
def __init__(self, core):
Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = ConfigFile(self.data + "/config.py")
- def debug_log(self, message, flag=None):
- if (flag is None) and self.debug_flag or flag:
- self.logger.error(message)
+ if not HAS_LDAP:
+ msg = "Python ldap module is required for Ldap plugin"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+
+ self.config = ConfigFile(os.path.join(self.data, 'config.py'),
+ core, self)
+ self._hosts = dict()
+
+ def _cache(self, query_name):
+ """ Return the :class:`Cache <Bcfg2.Server.Cache>` for the
+ given query name. """
+ return Bcfg2.Server.Cache.Cache('Ldap', 'results', query_name)
+
+ def _execute_query(self, query, metadata):
+ """ Return the cached result of the given query for this host or
+ execute the given query and cache the result. """
+ result = None
+
+ if Bcfg2.Options.setup.ldap_cache is not False:
+ cache = self._cache(query.name)
+ result = cache.get(metadata.hostname, None)
+
+ if result is None:
+ try:
+ self.debug_log("Processing query '%s'" % query.name)
+ result = query.get_result(metadata)
+ if Bcfg2.Options.setup.ldap_cache is not False:
+ cache[metadata.hostname] = result
+ except: # pylint: disable=W0702
+ self.logger.error(
+ "Exception during processing of query named '%s', query "
+ "results will be empty and may cause bind failures" %
+ query.name)
+ for line in traceback.format_exc().split('\n'):
+ self.logger.error(line)
+ return result
def get_additional_data(self, metadata):
- query = None
- try:
- data = {}
- self.debug_log("LdapPlugin debug: found queries " +
- str(LDAP_QUERIES))
- for QueryClass in LDAP_QUERIES:
- query = QueryClass()
+ data = {}
+ self.debug_log("Found queries %s" % self.config.queries)
+ for query_class in self.config.queries:
+ try:
+ query = query_class()
if query.is_applicable(metadata):
- self.debug_log("LdapPlugin debug: processing query '" +
- query.name + "'")
- data[query.name] = query.get_result(metadata)
+ self.debug_log("Processing query '%s'" % query.name)
+ data[query.name] = partial(
+ self._execute_query, query, metadata)
else:
- self.debug_log("LdapPlugin debug: query '" + query.name +
- "' not applicable to host '" +
- metadata.hostname + "'")
- return data
- except Exception:
- if hasattr(query, "name"):
- logger.error("LdapPlugin error: " +
- "Exception during processing of query named '" +
- str(query.name) +
- "', query results will be empty" +
- " and may cause bind failures")
- for line in traceback.format_exception(sys.exc_info()[0],
- sys.exc_info()[1],
- sys.exc_info()[2]):
- logger.error("LdapPlugin error: " +
- line.replace("\n", ""))
- return {}
-
-
-class LdapConnection(object):
- """
- Connection to an LDAP server.
- """
- def __init__(self, host="localhost", port=389,
+ self.debug_log("query '%s' not applicable to host '%s'" %
+ (query.name, metadata.hostname))
+ except: # pylint: disable=W0702
+ self.logger.error(
+ "Exception during preparation of query named '%s'. "
+ "Query will be ignored." % query_class.__name__)
+ for line in traceback.format_exc().split('\n'):
+ self.logger.error(line)
+
+ return Bcfg2.Server.Plugin.CallableDict(**data)
+
+ def start_client_run(self, metadata):
+ if Bcfg2.Options.setup.ldap_cache is None:
+ self.expire_cache(hostname=metadata.hostname)
+
+ def expire_cache(self, query=None, hostname=None):
+ """ Expire the cache. You can select the items to purge
+ per query and/or per host, or you can purge all cached
+ data. This is exposed as an XML-RPC RMI. """
+
+ tags = ['Ldap', 'results']
+ if query:
+ tags.append(query)
+ if hostname:
+ tags.append(hostname)
+
+ return Bcfg2.Server.Cache.expire(*tags)
+
+
+class LdapConnection(Debuggable):
+ """ Connection to an LDAP server. """
+
+ def __init__(self, host="localhost", port=389, uri=None, options=None,
binddn=None, bindpw=None):
+ Debuggable.__init__(self)
+
+ if HAS_LDAP:
+ msg = "Python ldap module is required for Ldap plugin"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+
self.host = host
self.port = port
+ self.uri = uri
+ self.options = options
self.binddn = binddn
self.bindpw = bindpw
self.conn = None
+ self.__scopes__ = {
+ 'base': ldap.SCOPE_BASE,
+ 'one': ldap.SCOPE_ONELEVEL,
+ 'sub': ldap.SCOPE_SUBTREE,
+ }
+
def __del__(self):
+ """ Disconnection if the instance is destroyed. """
+ self.disconnect()
+
+ def disconnect(self):
+ """ If a connection to an LDAP server is available, disconnect it. """
if self.conn:
- self.conn.unbind()
+ self.conn.unbund()
+ self.conn = None
+
+ def connect(self):
+ """ Open a connection to the configured LDAP server, and do a simple
+ bind ff both binddn and bindpw are set. """
+ self.disconnect()
+ self.conn = ldap.initialize(self.get_uri())
+
+ if self.options is not None:
+ for (option, value) in self.options.items():
+ self.conn.set_option(option, value)
- def init_conn(self):
- self.conn = ldap.initialize(self.url)
if self.binddn is not None and self.bindpw is not None:
self.conn.simple_bind_s(self.binddn, self.bindpw)
def run_query(self, query):
- result = None
- for attempt in range(RETRY_COUNT + 1):
- if attempt >= 1:
- logger.error("LdapPlugin error: " +
- "LDAP server down (retry " + str(attempt) + "/" +
- str(RETRY_COUNT) + ")")
+ """ Connect to the server and execute the query. If the server is
+ down, wait the configured amount and try to reconnect.
+
+ :param query: The query to execute on the LDAP server.
+ :type query: Bcfg.Server.Plugins.Ldap.LdapQuery
+ """
+ for attempt in range(Bcfg2.Options.setup.ldap_retries + 1):
try:
if not self.conn:
- self.init_conn()
- result = self.conn.search_s(
- query.base,
- SCOPE_MAP[query.scope],
- query.filter.replace("\\", "\\\\"),
- query.attrs,
- )
- break
+ self.connect()
+
+ return self.conn.search_s(
+ query.base, self.__scopes__[query.scope],
+ query.filter.replace('\\', '\\\\'), query.attrs)
+
except ldap.SERVER_DOWN:
self.conn = None
- time.sleep(RETRY_DELAY)
- return result
+ self.logger.error(
+ "LdapConnection: Server %s down. Retry %d/%d in %.2fs." %
+ (self.get_uri(), attempt + 1,
+ Bcfg2.Options.setup.ldap_retries,
+ Bcfg2.Options.setup.ldap_retry_delay))
+ time.sleep(Bcfg2.Options.setup.ldap_retry_delay)
+
+ return None
- @property
- def url(self):
- return "ldap://" + self.host + ":" + str(self.port)
+ def get_uri(self):
+ """ The URL of the LDAP server. """
+ if self.uri is None:
+ if self.port == 636:
+ return "ldaps://%s" % self.host
+ return "ldap://%s:%d" % (self.host, self.port)
+ return self.uri
class LdapQuery(object):
- """
- Query referencing an LdapConnection and providing several
- methods for query manipulation.
- """
+ """ Query referencing an LdapConnection and providing several
+ methods for query manipulation. """
+
+ #: Name of the Query, used to register it in additional data.
+ name = ClassName()
- name = "unknown"
base = ""
scope = "sub"
filter = "(objectClass=*)"
@@ -172,80 +267,48 @@ class LdapQuery(object):
result = None
def __unicode__(self):
- return "LdapQuery:" + self.name
+ return "LdapQuery: %s" % self.name
- def is_applicable(self, metadata):
- """
- Overrideable method to determine if the query is to be executed for
- the given metadata object.
- Defaults to true.
- """
- return True
+ def is_applicable(self, metadata): # pylint: disable=W0613
+ """ Check is the query should be executed for a given metadata
+ object.
- def prepare_query(self, metadata):
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
"""
- Overrideable method to alter the query based on metadata.
- Defaults to doing nothing.
-
- In most cases, you will do something like
+ return True
- self.filter = "(cn=" + metadata.hostname + ")"
+ def prepare_query(self, metadata, **kwargs): # pylint: disable=W0613
+ """ Prepares the query based on the client metadata. You can
+ for example modify the filter based on the client hostname.
- here.
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
"""
pass
- def process_result(self, metadata):
- """
- Overrideable method to post-process the query result.
- Defaults to returning the unaltered result.
- """
- return self.result
+ def process_result(self, metadata, **kwargs): # pylint: disable=W0613
+ """ Post-process the query result.
- def get_result(self, metadata):
- """
- Method to handle preparing, executing and processing the query.
- """
- if isinstance(self.connection, LdapConnection):
- self.prepare_query(metadata)
- self.result = self.connection.run_query(self)
- self.result = self.process_result(metadata)
- return self.result
- else:
- logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
- return None
-
-
-class LdapSubQuery(LdapQuery):
- """
- SubQueries are meant for internal use only and are not added
- to the metadata object. They are useful for situations where
- you need to run more than one query to obtain some data.
- """
- def prepare_query(self, metadata, **kwargs):
- """
- Overrideable method to alter the query based on metadata.
- Defaults to doing nothing.
- """
- pass
-
- def process_result(self, metadata, **kwargs):
- """
- Overrideable method to post-process the query result.
- Defaults to returning the unaltered result.
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
"""
return self.result
def get_result(self, metadata, **kwargs):
+ """ Handle the perparation, execution and processing of the query.
+
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError`
"""
- Method to handle preparing, executing and processing the query.
- """
- if isinstance(self.connection, LdapConnection):
+
+ if self.connection is not None:
self.prepare_query(metadata, **kwargs)
self.result = self.connection.run_query(self)
- return self.process_result(metadata, **kwargs)
+ self.result = self.process_result(metadata, **kwargs)
else:
- logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
- return None
+ raise Bcfg2.Server.Plugin.PluginExecutionError(
+ 'No connection defined for %s' % self.name)
+
+ return self.result
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index b850c1870..b912d3725 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -21,24 +21,25 @@ from Bcfg2.Compat import MutableMapping, all, any, wraps
# pylint: enable=W0622
from Bcfg2.version import Bcfg2VersionInfo
+try:
+ from django.db import models
+ HAS_DJANGO = True
+except ImportError:
+ HAS_DJANGO = False
+
# pylint: disable=C0103
ClientVersions = None
MetadataClientModel = None
# pylint: enable=C0103
-HAS_DJANGO = False
def load_django_models():
""" Load models for Django after option parsing has completed """
# pylint: disable=W0602
- global MetadataClientModel, ClientVersions, HAS_DJANGO
+ global MetadataClientModel, ClientVersions
# pylint: enable=W0602
- try:
- from django.db import models
- HAS_DJANGO = True
- except ImportError:
- HAS_DJANGO = False
+ if not HAS_DJANGO:
return
class MetadataClientModel(models.Model, # pylint: disable=W0621
@@ -1394,8 +1395,6 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# look at cert.cN
client = certinfo['commonName']
self.debug_log("Got cN %s; using as client name" % client)
- auth_type = self.auth.get(client,
- Bcfg2.Options.setup.authentication)
elif user == 'root':
id_method = 'address'
try:
@@ -1417,6 +1416,13 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# we have the client name
self.debug_log("Authenticating client %s" % client)
+ # validate id_method
+ auth_type = self.auth.get(client, Bcfg2.Options.setup.authentication)
+ if auth_type == 'cert' and id_method != 'cert':
+ self.logger.error("Client %s does not provide a cert, but only "
+ "cert auth is allowed" % client)
+ return False
+
# next we validate the address
if (id_method != 'uuid' and
not self.validate_client_address(client, address)):
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index d3c38ef19..067e2faad 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -92,17 +92,15 @@ class NagiosGen(Plugin, Generator):
for host in host_configs:
host_data.append(open(host, 'r').read())
- group_list = []
+ used_groups = set(['default'])
for line in "\n".join(host_data).splitlines():
# only include those groups which are actually used
if "hostgroup" in line:
- group_list += line.split()[1].split(',')
-
- group_list = list(set(group_list))
+ used_groups.update(line.split()[1].split(','))
for group in group_configs:
group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group)
- if group_name in group_list:
+ if group_name in used_groups:
groupfile = open(group, 'r')
group_data.append(groupfile.read())
groupfile.close()
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 461be9ba8..b314e60a0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -94,7 +94,12 @@ class Ohai(Bcfg2.Server.Plugin.Plugin,
return [self.probe]
def ReceiveData(self, meta, datalist):
- self.cache[meta.hostname] = datalist[0].text
+ if meta.hostname not in self.cache or \
+ self.cache[meta.hostname] != datalist[0].text:
+ self.cache[meta.hostname] = datalist[0].text
+
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire(meta.hostname)
def get_additional_data(self, meta):
if meta.hostname in self.cache:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
index 7de79e2f3..956cb9f51 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -6,6 +6,15 @@ from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
+def strip_suffix(pkgname):
+ """ Remove the ':any' suffix from a dependency name if it is present.
+ """
+ if pkgname.endswith(':any'):
+ return pkgname[:-4]
+ else:
+ return pkgname
+
+
class AptCollection(Collection):
""" Handle collections of APT sources. This is a no-op object
that simply inherits from
@@ -34,8 +43,12 @@ class AptCollection(Collection):
for source in self:
if source.rawurl:
- self.logger.info("Packages: Skipping rawurl %s" %
- source.rawurl)
+ if source.rawurl[-1] != '/':
+ source.rawurl = source.rawurl + "/"
+ index = source.rawurl.rfind("/", 0, -1)
+ lines.append("deb %s %s" %
+ (source.rawurl[:index],
+ source.rawurl[index + 1:]))
else:
lines.append("deb %s %s %s" % (source.url, source.version,
" ".join(source.components)))
@@ -44,7 +57,7 @@ class AptCollection(Collection):
(source.url,
source.version,
" ".join(source.components)))
- lines.append("")
+ lines.append("")
return "\n".join(lines)
@@ -73,6 +86,7 @@ class AptSource(Source):
bdeps = dict()
brecs = dict()
bprov = dict()
+ self.pkgnames = set()
self.essentialpkgs = set()
for fname in self.files:
if not self.rawurl:
@@ -111,6 +125,7 @@ class AptSource(Source):
cdeps = [re.sub(r'\s+', '',
re.sub(r'\(.*\)', '', cdep))
for cdep in dep.split('|')]
+ cdeps = [strip_suffix(cdep) for cdep in cdeps]
dyn_dname = "choice-%s-%s-%s" % (pkgname,
barch,
vindex)
@@ -124,6 +139,7 @@ class AptSource(Source):
else:
raw_dep = re.sub(r'\(.*\)', '', dep)
raw_dep = raw_dep.rstrip().strip()
+ raw_dep = strip_suffix(raw_dep)
if words[0] == 'Recommends':
brecs[barch][pkgname].append(raw_dep)
else:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py b/src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py
new file mode 100644
index 000000000..f47b8f22c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py
@@ -0,0 +1,35 @@
+""" Dummy backend for :mod:`Bcfg2.Server.Plugins.Packages` """
+
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+
+
+class DummyCollection(Collection):
+ """ Handle collections of Dummy sources. This is a no-op object
+ that simply inherits from
+ :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`,
+ overrides nothing, and defers all operations to :class:`PacSource`
+ """
+
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
+ # we define an __init__ that just calls the parent __init__,
+ # so that we can set the docstring on __init__ to something
+ # different from the parent __init__ -- namely, the parent
+ # __init__ docstring, minus everything after ``.. -----``,
+ # which we use to delineate the actual docs from the
+ # .. autoattribute hacks we have to do to get private
+ # attributes included in sphinx 1.0 """
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
+ debug=debug)
+ __init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
+
+
+class DummySource(Source):
+ """ Handle Dummy sources """
+
+ #: DummySource sets the ``type`` on Package entries to "dummy"
+ ptype = 'dummy'
+
+ def __init__(self, basepath, xsource):
+ xsource.set('rawurl', 'http://example.com/')
+ Source.__init__(self, basepath, xsource)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
index 0e15d2e15..6fc084cc4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -1,10 +1,62 @@
""" Pacman backend for :mod:`Bcfg2.Server.Plugins.Packages` """
+import os
import tarfile
+from Bcfg2.Compat import cPickle
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
+def parse_db_file(pkgfile):
+ """ Parse a Pacman database file, returning a dictionary with
+ section headings for keys and lists of strings for values.
+ (Reference: ``sync_db_read`` in ``lib/libalpm/be_sync.c``)
+ """
+
+ pkg = {}
+ section = None
+
+ for line in pkgfile:
+ line = line.strip()
+
+ if section is not None:
+ if not line:
+ section = None
+ else:
+ pkg[section].append(line)
+ elif len(line) >= 2 and line[0] == line[-1] == '%':
+ section = line
+ pkg[section] = []
+
+ return pkg
+
+
+def parse_dep(dep):
+ """ Parse a Pacman dependency string, returning the package name,
+ version restriction (or ``None``), and description (or ``None``).
+ (Reference: ``alpm_dep_from_string`` in ``lib/libalpm/deps.c``)
+ """
+
+ rest_desc = dep.split(': ', 1)
+ if len(rest_desc) == 1:
+ rest, desc = rest_desc[0], None
+ else:
+ rest, desc = rest_desc
+
+ # Search for '=' last, since '<=' and '>=' are possible.
+ for symb in ['<', '>', '=']:
+ idx = rest.find(symb)
+ if idx >= 0:
+ name = rest[:idx]
+ version = rest[idx:]
+ break
+ else:
+ name = rest
+ version = None
+
+ return name, version, desc
+
+
class PacCollection(Collection):
""" Handle collections of Pacman sources. This is a no-op object
that simply inherits from
@@ -24,6 +76,10 @@ class PacCollection(Collection):
debug=debug)
__init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
+ @property
+ def __package_groups__(self):
+ return True
+
class PacSource(Source):
""" Handle Pacman sources """
@@ -31,6 +87,25 @@ class PacSource(Source):
#: PacSource sets the ``type`` on Package entries to "pacman"
ptype = 'pacman'
+ def __init__(self, basepath, xsource):
+ self.pacgroups = {}
+
+ Source.__init__(self, basepath, xsource)
+ __init__.__doc__ = Source.__init__.__doc__
+
+ def load_state(self):
+ data = open(self.cachefile, 'rb')
+ (self.pkgnames, self.deps, self.provides,
+ self.recommends, self.pacgroups) = cPickle.load(data)
+ load_state.__doc__ = Source.load_state.__doc__
+
+ def save_state(self):
+ cache = open(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides,
+ self.recommends, self.pacgroups), cache, 2)
+ cache.close()
+ save_state.__doc__ = Source.save_state.__doc__
+
@property
def urls(self):
""" A list of URLs to the base metadata file for each
@@ -45,14 +120,12 @@ class PacSource(Source):
else:
raise Exception("PacSource : RAWUrl not supported (yet)")
- def read_files(self):
- bdeps = dict()
- bprov = dict()
-
- depfnames = ['Depends', 'Pre-Depends']
- if self.recommended:
- depfnames.append('Recommends')
-
+ def read_files(self): # pylint: disable=R0912
+ bdeps = {}
+ brecs = {}
+ bprov = {}
+ self.pkgnames = set()
+ self.pacgroups = {}
for fname in self.files:
if not self.rawurl:
barch = [x for x in fname.split('@') if x in self.arches][0]
@@ -62,8 +135,9 @@ class PacSource(Source):
barch = self.arches[0]
if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
+ bdeps[barch] = {}
+ brecs[barch] = {}
+ bprov[barch] = {}
try:
self.debug_log("Packages: try to read %s" % fname)
tar = tarfile.open(fname, "r")
@@ -71,11 +145,52 @@ class PacSource(Source):
self.logger.error("Packages: Failed to read file %s" % fname)
raise
+ packages = {}
for tarinfo in tar:
- if tarinfo.isdir():
- self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
- self.debug_log("Packages: added %s" %
- tarinfo.name.rsplit("-", 2)[0])
+ if not tarinfo.isfile():
+ continue
+ prefix = os.path.dirname(tarinfo.name)
+ if prefix not in packages:
+ packages[prefix] = {}
+ pkg = parse_db_file(tar.extractfile(tarinfo))
+ packages[prefix].update(pkg)
+
+ for pkg in packages.values():
+ pkgname = pkg['%NAME%'][0]
+ self.pkgnames.add(pkgname)
+ bdeps[barch][pkgname] = []
+ brecs[barch][pkgname] = []
+
+ if '%DEPENDS%' in pkg:
+ for dep in pkg['%DEPENDS%']:
+ dname = parse_dep(dep)[0]
+ bdeps[barch][pkgname].append(dname)
+
+ if '%OPTDEPENDS%' in pkg:
+ for dep in pkg['%OPTDEPENDS%']:
+ dname = parse_dep(dep)[0]
+ brecs[barch][pkgname].append(dname)
+
+ if '%PROVIDES%' in pkg:
+ for dep in pkg['%PROVIDES%']:
+ dname = parse_dep(dep)[0]
+ if dname not in bprov[barch]:
+ bprov[barch][dname] = set()
+ bprov[barch][dname].add(pkgname)
+
+ if '%GROUPS%' in pkg:
+ for group in pkg['%GROUPS%']:
+ if group not in self.pacgroups:
+ self.pacgroups[group] = []
+ self.pacgroups[group].append(pkgname)
+
tar.close()
- self.process_files(bdeps, bprov)
+ self.process_files(bdeps, bprov, brecs)
read_files.__doc__ = Source.read_files.__doc__
+
+ def get_group(self, metadata, group, ptype=None):
+ try:
+ return self.pacgroups[group]
+ except KeyError:
+ return []
+ get_group.__doc__ = Source.get_group.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
index 736cdcdd4..55dd4e488 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
@@ -56,6 +56,7 @@ class PkgngSource(Source):
def read_files(self):
bdeps = dict()
+ self.pkgnames = set()
for fname in self.files:
if not self.rawurl:
abi = [x
@@ -75,9 +76,7 @@ class PkgngSource(Source):
self.logger.error("Packages: Failed to read file %s" % fname)
raise
for line in reader.readlines():
- if not isinstance(line, str):
- line = line.decode('utf-8')
- pkg = json.loads(line)
+ pkg = json.loads(unicode(line, errors='ignore'))
pkgname = pkg['name']
self.pkgnames.add(pkgname)
if 'deps' in pkg:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
index c9f6ea14a..86f7698f7 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -135,22 +135,22 @@ class Source(Debuggable): # pylint: disable=R0902
#: A list of the text of all 'Component' attributes of this
#: source from XML
- self.components = [item.text for item in xsource.findall('Component')]
+ self.components = []
#: A list of the arches supported by this source
- self.arches = [item.text for item in xsource.findall('Arch')]
+ self.arches = []
#: A list of the the names of packages that are blacklisted
#: from this source
- self.blacklist = [item.text for item in xsource.findall('Blacklist')]
+ self.blacklist = []
#: A list of the the names of packages that are whitelisted in
#: this source
- self.whitelist = [item.text for item in xsource.findall('Whitelist')]
+ self.whitelist = []
#: Whether or not to include deb-src lines in the generated APT
#: configuration
- self.debsrc = xsource.get('debsrc', 'false') == 'true'
+ self.debsrc = False
#: A dict of repository options that will be included in the
#: configuration generated on the server side (if such is
@@ -162,51 +162,38 @@ class Source(Debuggable): # pylint: disable=R0902
#: configuration generated for the client (if that is
#: supported by the backend)
self.client_options = dict()
- opts = xsource.findall("Options")
- for el in opts:
- repoopts = dict([(k, v)
- for k, v in el.attrib.items()
- if k != "clientonly" and k != "serveronly"])
- if el.get("clientonly", "false").lower() == "false":
- self.server_options.update(repoopts)
- if el.get("serveronly", "false").lower() == "false":
- self.client_options.update(repoopts)
#: A list of URLs to GPG keys that apply to this source
- self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
+ self.gpgkeys = []
#: Whether or not to include essential packages from this source
- self.essential = xsource.get('essential', 'true').lower() == 'true'
+ self.essential = True
#: Whether or not to include recommended packages from this source
- self.recommended = xsource.get('recommended',
- 'false').lower() == 'true'
+ self.recommended = False
#: The "rawurl" attribute from :attr:`xsource`, if applicable.
#: A trailing slash is automatically appended to this if there
#: wasn't one already present.
- self.rawurl = xsource.get('rawurl', '')
- if self.rawurl and not self.rawurl.endswith("/"):
- self.rawurl += "/"
+ self.rawurl = None
#: The "url" attribute from :attr:`xsource`, if applicable. A
#: trailing slash is automatically appended to this if there
#: wasn't one already present.
- self.url = xsource.get('url', '')
- if self.url and not self.url.endswith("/"):
- self.url += "/"
+ self.url = None
#: The "version" attribute from :attr:`xsource`
- self.version = xsource.get('version', '')
+ self.version = None
#: The "name" attribute from :attr:`xsource`
- self.name = xsource.get('name', None)
+ self.name = None
#: A list of predicates that are used to determine if this
#: source applies to a given
#: :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata`
#: object.
self.conditions = []
+
#: Formerly, :ref:`server-plugins-generators-packages` only
#: supported applying package sources to groups; that is, they
#: could not be assigned by more complicated logic like
@@ -214,22 +201,6 @@ class Source(Debuggable): # pylint: disable=R0902
#: attribute attempts to provide for some limited backwards
#: compat with older code that relies on this.
self.groups = []
- for el in xsource.iterancestors():
- if el.tag == "Group":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") not in m.groups)
- else:
- self.groups.append(el.get("name"))
- self.conditions.append(lambda m, el=el:
- el.get("name") in m.groups)
- elif el.tag == "Client":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") != m.hostname)
- else:
- self.conditions.append(lambda m, el=el:
- el.get("name") == m.hostname)
#: A set of all package names in this source. This will not
#: necessarily be populated, particularly by backends that
@@ -253,6 +224,8 @@ class Source(Debuggable): # pylint: disable=R0902
#: symbols>``. This will not necessarily be populated.
self.recommends = dict()
+ self._init_attributes(xsource)
+
#: The file (or directory) used for this source's cache data
self.cachefile = os.path.join(self.basepath,
"cache-%s" % self.cachekey)
@@ -292,6 +265,69 @@ class Source(Debuggable): # pylint: disable=R0902
setting['name'] = self.get_repo_name(setting)
self.url_map.extend(usettings)
+ def _init_attributes(self, xsource):
+ """
+ This functions evaluates the Source tag and parses all
+ attributes. Override this function in a sub class to
+ parse specific attributes. Do not use ``__init__`` because
+ ``Source.__init__`` may call other functions that already
+ need this specific fields. This functions is called before
+ any other function.
+
+ :param xsource: The XML tag that describes this source
+ :type source: lxml.etree._Element
+ """
+
+ self.components = [item.text for item in xsource.findall('Component')]
+ self.arches = [item.text for item in xsource.findall('Arch')]
+ self.blacklist = [item.text for item in xsource.findall('Blacklist')]
+ self.whitelist = [item.text for item in xsource.findall('Whitelist')]
+ self.debsrc = xsource.get('debsrc', 'false') == 'true'
+
+ opts = xsource.findall("Options")
+ for el in opts:
+ repoopts = dict([(k, v)
+ for k, v in el.attrib.items()
+ if k != "clientonly" and k != "serveronly"])
+ if el.get("clientonly", "false").lower() == "false":
+ self.server_options.update(repoopts)
+ if el.get("serveronly", "false").lower() == "false":
+ self.client_options.update(repoopts)
+
+ self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
+
+ self.essential = xsource.get('essential', 'true').lower() == 'true'
+ self.recommended = xsource.get('recommended',
+ 'false').lower() == 'true'
+
+ self.rawurl = xsource.get('rawurl', '')
+ if self.rawurl and not self.rawurl.endswith("/"):
+ self.rawurl += "/"
+
+ self.url = xsource.get('url', '')
+ if self.url and not self.url.endswith("/"):
+ self.url += "/"
+
+ self.version = xsource.get('version', '')
+ self.name = xsource.get('name', None)
+
+ for el in xsource.iterancestors():
+ if el.tag == "Group":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") not in m.groups)
+ else:
+ self.groups.append(el.get("name"))
+ self.conditions.append(lambda m, el=el:
+ el.get("name") in m.groups)
+ elif el.tag == "Client":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") != m.hostname)
+ else:
+ self.conditions.append(lambda m, el=el:
+ el.get("name") == m.hostname)
+
@property
def cachekey(self):
""" A unique key for this source that will be used to generate
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index dbe3f9ce5..846fb89cd 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -456,16 +456,13 @@ class YumCollection(Collection):
reponame = basereponame
added = False
+ rid = 1
while not added:
try:
config.add_section(reponame)
added = True
except ConfigParser.DuplicateSectionError:
- match = re.search(r'-(\d+)', reponame)
- if match:
- rid = int(match.group(1)) + 1
- else:
- rid = 1
+ rid += 1
reponame = "%s-%d" % (basereponame, rid)
config.set(reponame, "name", reponame)
@@ -1004,8 +1001,20 @@ class YumSource(Source):
ptype = 'yum'
def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
+ self.filemap = dict()
+ self.file_to_arch = dict()
+ self.needed_paths = set()
+ self.packages = dict()
+ self.yumgroups = dict()
self.pulp_id = None
+ self.repo = None
+
+ Source.__init__(self, basepath, xsource)
+ __init__.__doc__ = Source.__init__.__doc__
+
+ def _init_attributes(self, xsource):
+ Source._init_attributes(self, xsource)
+
if HAS_PULP and xsource.get("pulp_id"):
self.pulp_id = xsource.get("pulp_id")
@@ -1034,15 +1043,11 @@ class YumSource(Source):
self.repo['relative_path'])
self.arches = [self.repo['arch']]
- self.packages = dict()
self.deps = dict([('global', dict())])
self.provides = dict([('global', dict())])
self.filemap = dict([(x, dict())
for x in ['global'] + self.arches])
- self.needed_paths = set()
- self.file_to_arch = dict()
- self.yumgroups = dict()
- __init__.__doc__ = Source.__init__.__doc__
+ _init_attributes.__doc__ = Source._init_attributes.__doc__
@property
def use_yum(self):
@@ -1130,6 +1135,94 @@ class YumSource(Source):
self.file_to_arch[self.escape_url(fullurl)] = arch
return urls
+ # pylint: disable=R0911,R0912
+ # disabling the pylint errors above because we are interesting in
+ # replicating the flow of the RPM code.
+ def _compare_rpm_versions(self, str1, str2):
+ """ Compare RPM versions.
+
+ This is an attempt to reimplement RPM's rpmvercmp method in python.
+
+ :param str1: package 1 version string
+ :param str2: package 2 version string
+ :return: 1 - str1 is newer than str2
+ 0 - str1 and str2 are the same version
+ -1 - str2 is newer than str1"""
+ if str1 == str2:
+ return 0
+
+ front_strip_re = re.compile('^[^A-Za-z0-9~]+')
+ risdigit = re.compile('(^[0-9]+)')
+ risalpha = re.compile('(^[A-Za-z])')
+ lzeroes = re.compile('^0+')
+
+ while len(str1) > 0 or len(str2) > 0:
+ str1 = front_strip_re.sub('', str1)
+ str2 = front_strip_re.sub('', str2)
+
+ if len(str1) == 0 or len(str2) == 0:
+ break
+
+ # handle the tilde separator
+ if str1[0] == '~' and str2[0] == '~':
+ str1 = str1[1:]
+ str2 = str2[1:]
+ elif str1[0] == '~':
+ return -1
+ elif str2[0] == '~':
+ return 1
+
+ # grab continuous segments from each string
+ isnum = False
+ if risdigit.match(str1):
+ segment1 = risdigit.split(str1)[1]
+ str1 = risdigit.split(str1)[2]
+ if risdigit.match(str2):
+ segment2 = risdigit.split(str2)[1]
+ str2 = risdigit.split(str2)[2]
+ else:
+ segment2 = ''
+ isnum = True
+ else:
+ segment1 = risalpha.split(str1)[1]
+ str1 = risalpha.split(str1)[2]
+ if risalpha.match(str2):
+ segment2 = risalpha.split(str2)[1]
+ str2 = risalpha.split(str2)[2]
+ else:
+ segment2 = ''
+
+ # numeric segments are always newer than alpha segments
+ if len(segment2) == 0:
+ if isnum:
+ return 1
+ return -1
+
+ if isnum:
+ # discard leading zeroes
+ segment1 = lzeroes.sub('', segment1)
+ segment2 = lzeroes.sub('', segment2)
+ # higher number has more digits
+ if len(segment1) > len(segment2):
+ return 1
+ elif len(segment2) > len(segment1):
+ return -1
+ # do a simple string comparison
+ if segment1 > segment2:
+ return 1
+ elif segment2 > segment1:
+ return -1
+
+ # if one of the strings is empty, the version of the longer
+ # string is higher
+ if len(str1) > len(str2):
+ return 1
+ elif len(str2) > len(str1):
+ return -1
+ else:
+ return 0
+ # pylint: enable=R0911,R0912
+
@track_statistics()
def read_files(self):
""" When using the builtin yum parser, read and parse locally
@@ -1198,13 +1291,33 @@ class YumSource(Source):
if arch not in self.packages:
self.packages[arch] = set()
if arch not in self.deps:
- self.deps[arch] = dict()
+ self.deps[arch] = {}
if arch not in self.provides:
- self.provides[arch] = dict()
+ self.provides[arch] = {}
+ versionmap = {}
for pkg in data.getchildren():
if not pkg.tag.endswith('package'):
continue
pkgname = pkg.find(XP + 'name').text
+ vtag = pkg.find(XP + 'version')
+ epoch = vtag.get('epoch')
+ version = vtag.get('ver')
+ release = vtag.get('rel')
+ if pkgname in self.packages[arch]:
+ # skip if version older than a previous version
+ if (self._compare_rpm_versions(
+ epoch, versionmap[pkgname]['epoch']) < 0):
+ continue
+ elif (self._compare_rpm_versions(
+ version, versionmap[pkgname]['version']) < 0):
+ continue
+ elif (self._compare_rpm_versions(
+ release, versionmap[pkgname]['release']) < 0):
+ continue
+ versionmap[pkgname] = {}
+ versionmap[pkgname]['epoch'] = epoch
+ versionmap[pkgname]['version'] = version
+ versionmap[pkgname]['release'] = release
self.packages[arch].add(pkgname)
pdata = pkg.find(XP + 'format')
@@ -1256,10 +1369,15 @@ class YumSource(Source):
arch = [a for a in self.arches if a in metadata.groups]
if not arch:
return False
- return ((package in self.packages['global'] or
- package in self.packages[arch[0]]) and
- package not in self.blacklist and
- (len(self.whitelist) == 0 or package in self.whitelist))
+ try:
+ return ((package in self.packages['global'] or
+ package in self.packages[arch[0]]) and
+ package not in self.blacklist and
+ (len(self.whitelist) == 0 or package in self.whitelist))
+ except KeyError:
+ self.logger.debug("Packages: Unable to find %s for arch %s" %
+ (package, arch[0]))
+ return False
is_package.__doc__ = Source.is_package.__doc__
def get_vpkgs(self, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
index b2e43bde7..89cc23090 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
@@ -274,29 +274,31 @@ class HelperSubcommand(Bcfg2.Options.Subcommand):
# whether or not this command accepts input on stdin
accept_input = True
- def __init__(self):
- Bcfg2.Options.Subcommand.__init__(self)
- self.verbosity = 0
+ # logging level
+ verbosity = 0
+
+ def run(self, setup):
if Bcfg2.Options.setup.debug:
self.verbosity = 5
elif Bcfg2.Options.setup.verbose:
self.verbosity = 1
- def run(self, setup):
- try:
- data = json.loads(sys.stdin.read())
- except ValueError:
- self.logger.error("Error decoding JSON input: %s" %
- sys.exc_info()[1])
- print(json.dumps(self.fallback))
- return 2
+ data = None
+ if self.accept_input:
+ try:
+ data = json.loads(sys.stdin.read())
+ except ValueError:
+ self.logger.error("Error decoding JSON input: %s" %
+ sys.exc_info()[1])
+ print(json.dumps(self.fallback))
+ return 2
try:
print(json.dumps(self._run(setup, data)))
except: # pylint: disable=W0702
self.logger.error("Unexpected error running %s: %s" %
- self.__class__.__name__.lower(),
- sys.exc_info()[1], exc_info=1)
+ (self.__class__.__name__.lower(),
+ sys.exc_info()[1]), exc_info=1)
print(json.dumps(self.fallback))
return 2
return 0
@@ -310,10 +312,13 @@ class DepSolverSubcommand(HelperSubcommand): # pylint: disable=W0223
""" Base class for helper commands that use the depsolver (i.e.,
only resolve dependencies, don't modify the cache) """
- def __init__(self):
- HelperSubcommand.__init__(self)
+ # DepSolver instance used in _run function
+ depsolver = None
+
+ def run(self, setup):
self.depsolver = DepSolver(Bcfg2.Options.setup.yum_config,
self.verbosity)
+ HelperSubcommand.run(self, setup)
class CacheManagerSubcommand(HelperSubcommand): # pylint: disable=W0223
@@ -322,10 +327,13 @@ class CacheManagerSubcommand(HelperSubcommand): # pylint: disable=W0223
fallback = False
accept_input = False
- def __init__(self):
- HelperSubcommand.__init__(self)
+ # CacheManager instance used in _run function
+ cachemgr = None
+
+ def run(self, setup):
self.cachemgr = CacheManager(Bcfg2.Options.setup.yum_config,
self.verbosity)
+ HelperSubcommand.run(self, setup)
class Clean(CacheManagerSubcommand):
@@ -376,10 +384,7 @@ class CLI(Bcfg2.Options.CommandRegistry):
""" The bcfg2-yum-helper CLI """
options = [
Bcfg2.Options.PathOption(
- "-c", "--yum-config", help="Yum config file"),
- Bcfg2.Options.PositionalArgument(
- "command", help="Yum helper command",
- choices=['clean', 'complete', 'get_groups'])]
+ "-c", "--yum-config", help="Yum config file")]
def __init__(self):
Bcfg2.Options.CommandRegistry.__init__(self)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index 3aa5c415f..95b4baa3e 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -10,7 +10,7 @@ import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Cache
import Bcfg2.Server.Plugin
-from Bcfg2.Compat import urlopen, HTTPError, URLError, MutableMapping
+from Bcfg2.Compat import urlopen, HTTPError, URLError
from Bcfg2.Server.Plugins.Packages.Collection import Collection, \
get_collection_class
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
@@ -36,52 +36,6 @@ class PackagesBackendAction(Bcfg2.Options.ComponentAction):
fail_silently = True
-class OnDemandDict(MutableMapping):
- """ This maps a set of keys to a set of value-getting functions;
- the values are populated on-the-fly by the functions as the values
- are needed (and not before). This is used by
- :func:`Bcfg2.Server.Plugins.Packages.Packages.get_additional_data`;
- see the docstring for that function for details on why.
-
- Unlike a dict, you should not specify values for for the righthand
- side of this mapping, but functions that get values. E.g.:
-
- .. code-block:: python
-
- d = OnDemandDict(foo=load_foo,
- bar=lambda: "bar");
- """
-
- def __init__(self, **getters):
- self._values = dict()
- self._getters = dict(**getters)
-
- def __getitem__(self, key):
- if key not in self._values:
- self._values[key] = self._getters[key]()
- return self._values[key]
-
- def __setitem__(self, key, getter):
- self._getters[key] = getter
-
- def __delitem__(self, key):
- del self._values[key]
- del self._getters[key]
-
- def __len__(self):
- return len(self._getters)
-
- def __iter__(self):
- return iter(self._getters.keys())
-
- def __repr__(self):
- rv = dict(self._values)
- for key in self._getters.keys():
- if key not in rv:
- rv[key] = 'unknown'
- return str(rv)
-
-
class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
@@ -103,7 +57,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
help="Packages backends to load",
type=Bcfg2.Options.Types.comma_list,
action=PackagesBackendAction,
- default=['Yum', 'Apt', 'Pac', 'Pkgng']),
+ default=['Yum', 'Apt', 'Pac', 'Pkgng', 'Dummy']),
Bcfg2.Options.PathOption(
cf=("packages", "cache"), dest="packages_cache",
help="Path to the Packages cache",
@@ -578,7 +532,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
def get_additional_data(self, metadata):
""" Return additional data for the given client. This will be
- an :class:`Bcfg2.Server.Plugins.Packages.OnDemandDict`
+ an :class:`Bcfg2.Server.Plugin.OnDemandDict`
containing two keys:
* ``sources``, whose value is a list of data returned from
@@ -610,7 +564,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
get_collection() until it's absolutely necessary. """
return self.get_collection(metadata).get_additional_data()
- return OnDemandDict(
+ return Bcfg2.Server.Plugin.OnDemandDict(
sources=get_sources,
get_config=lambda: self.get_config)
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index 76aab69b5..270bfa62d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -15,7 +15,12 @@ import Bcfg2.Server.FileMonitor
from Bcfg2.Logger import Debuggable
from Bcfg2.Server.Statistics import track_statistics
-HAS_DJANGO = False
+try:
+ from django.db import models
+ HAS_DJANGO = True
+except ImportError:
+ HAS_DJANGO = False
+
# pylint: disable=C0103
ProbesDataModel = None
ProbesGroupsModel = None
@@ -25,13 +30,10 @@ ProbesGroupsModel = None
def load_django_models():
""" Load models for Django after option parsing has completed """
# pylint: disable=W0602
- global ProbesDataModel, ProbesGroupsModel, HAS_DJANGO
+ global ProbesDataModel, ProbesGroupsModel
# pylint: enable=W0602
- try:
- from django.db import models
- HAS_DJANGO = True
- except ImportError:
- HAS_DJANGO = False
+
+ if not HAS_DJANGO:
return
class ProbesDataModel(models.Model, # pylint: disable=W0621,W0612
@@ -74,6 +76,7 @@ class ProbeStore(Debuggable):
def __init__(self, core, datadir): # pylint: disable=W0613
Debuggable.__init__(self)
+ self.core = core
self._groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups")
self._datacache = Bcfg2.Server.Cache.Cache("Probes", "probedata")
@@ -134,7 +137,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
groupdata = ProbesGroupsModel.objects.filter(hostname=hostname)
self._groupcache[hostname] = list(set(r.group for r in groupdata))
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
@Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock
def set_groups(self, hostname, groups):
@@ -155,7 +158,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
ProbesGroupsModel.objects.filter(
hostname=hostname).exclude(group__in=groups).delete()
if olddata != groups:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
def _load_data(self, hostname):
Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
@@ -168,7 +171,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
time.mktime(pdata.timestamp.timetuple())
ts_set = True
self._datacache[hostname][pdata.probe] = ProbeData(pdata.data)
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
@Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock
def set_data(self, hostname, data):
@@ -198,7 +201,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
qset.delete()
expire_metadata = True
if expire_metadata:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
class XMLProbeStore(ProbeStore):
@@ -234,7 +237,7 @@ class XMLProbeStore(ProbeStore):
self._groupcache[client.get('name')].append(
pdata.get('name'))
- Bcfg2.Server.Cache.expire("Metadata")
+ self.core.metadata_cache.expire()
def _load_groups(self, hostname):
self._load_data(hostname)
@@ -274,7 +277,7 @@ class XMLProbeStore(ProbeStore):
olddata = self._groupcache.get(hostname, [])
self._groupcache[hostname] = groups
if olddata != groups:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
def set_data(self, hostname, data):
Bcfg2.Server.Cache.expire("Probes", "probedata", hostname)
@@ -285,7 +288,7 @@ class XMLProbeStore(ProbeStore):
self._datacache[hostname][probe] = pdata
expire_metadata |= olddata != data
if expire_metadata:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
class ClientProbeDataSet(dict):
@@ -304,7 +307,8 @@ class ProbeData(str): # pylint: disable=E0012,R0924
.json, and .yaml properties to provide convenient ways to use
ProbeData objects as XML, JSON, or YAML data """
def __new__(cls, data):
- if isinstance(data, unicode):
+ # prevent double encoding utf-8 in python3
+ if isinstance(data, unicode) and not isinstance(data, str):
return str.__new__(cls, data.encode('utf-8'))
else:
return str.__new__(cls, data)
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index c4dd75e60..e6549b714 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -35,13 +35,17 @@ LOGGER = logging.getLogger(__name__)
class PropertyFile(object):
""" Base Properties file handler """
- def __init__(self, name):
+ def __init__(self, name, core):
"""
:param name: The filename of this properties file.
+ :type name: string
+ :param core: The Bcfg2.Server.Core initializing the Properties plugin
+ :type core: Bcfg2.Server.Core
.. automethod:: _write
"""
self.name = name
+ self.core = core
def write(self):
""" Write the data in this data structure back to the property
@@ -69,6 +73,12 @@ class PropertyFile(object):
file. """
raise NotImplementedError
+ def _expire_metadata_cache(self):
+ """ Expires the metadata cache, if it is required by the caching
+ mode. """
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+
def validate_data(self):
""" Verify that the data in this file is valid. """
raise NotImplementedError
@@ -81,9 +91,9 @@ class PropertyFile(object):
class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
"""Handle JSON Properties files."""
- def __init__(self, name):
+ def __init__(self, name, core):
Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
- PropertyFile.__init__(self, name)
+ PropertyFile.__init__(self, name, core)
self.json = None
def Index(self):
@@ -93,10 +103,13 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
err = sys.exc_info()[1]
raise PluginExecutionError("Could not load JSON data from %s: %s" %
(self.name, err))
+ self._expire_metadata_cache()
+ Index.__doc__ = Bcfg2.Server.Plugin.FileBacked.Index.__doc__
def _write(self):
json.dump(self.json, open(self.name, 'wb'))
return True
+ _write.__doc__ = PropertyFile._write.__doc__
def validate_data(self):
try:
@@ -105,6 +118,7 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
err = sys.exc_info()[1]
raise PluginExecutionError("Data for %s cannot be dumped to JSON: "
"%s" % (self.name, err))
+ validate_data.__doc__ = PropertyFile.validate_data.__doc__
def __str__(self):
return str(self.json)
@@ -116,11 +130,10 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
""" Handle YAML Properties files. """
- def __init__(self, name):
+ def __init__(self, name, core):
Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
- PropertyFile.__init__(self, name)
+ PropertyFile.__init__(self, name, core)
self.yaml = None
- __init__.__doc__ = Bcfg2.Server.Plugin.FileBacked.__init__.__doc__
def Index(self):
try:
@@ -129,6 +142,7 @@ class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
err = sys.exc_info()[1]
raise PluginExecutionError("Could not load YAML data from %s: %s" %
(self.name, err))
+ self._expire_metadata_cache()
Index.__doc__ = Bcfg2.Server.Plugin.FileBacked.Index.__doc__
def _write(self):
@@ -155,10 +169,15 @@ class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile):
""" Handle XML Properties files. """
- def __init__(self, name, should_monitor=False):
+ def __init__(self, name, core, should_monitor=False):
Bcfg2.Server.Plugin.StructFile.__init__(self, name,
should_monitor=should_monitor)
- PropertyFile.__init__(self, name)
+ PropertyFile.__init__(self, name, core)
+
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+ self._expire_metadata_cache()
+ Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__
def _write(self):
open(self.name, "wb").write(
@@ -258,11 +277,11 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
:class:`PropertyFile`
"""
if fname.endswith(".xml"):
- return XMLPropertyFile(fname)
+ return XMLPropertyFile(fname, self.core)
elif HAS_JSON and fname.endswith(".json"):
- return JSONPropertyFile(fname)
+ return JSONPropertyFile(fname, self.core)
elif HAS_YAML and (fname.endswith(".yaml") or fname.endswith(".yml")):
- return YAMLPropertyFile(fname)
+ return YAMLPropertyFile(fname, self.core)
else:
raise Bcfg2.Server.Plugin.PluginExecutionError(
"Properties: Unknown extension %s" % fname)
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
index 59fbe6f03..e2d8a058f 100644
--- a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -117,7 +117,7 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
self.logger.warning("PuppetENC is incompatible with aggressive "
"client metadata caching, try 'cautious' or "
"'initial' instead")
- self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+ self.core.metadata_cache.expire()
def end_statistics(self, metadata):
self.end_client_run(self, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Reporting.py b/src/lib/Bcfg2/Server/Plugins/Reporting.py
index 5c73546b4..e372006c7 100644
--- a/src/lib/Bcfg2/Server/Plugins/Reporting.py
+++ b/src/lib/Bcfg2/Server/Plugins/Reporting.py
@@ -9,12 +9,15 @@ from Bcfg2.Reporting.Transport.base import TransportError
from Bcfg2.Server.Plugin import Statistics, PullSource, Threaded, \
PluginInitError, PluginExecutionError
-# required for reporting
try:
- import south # pylint: disable=W0611
- HAS_SOUTH = True
+ import django
+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7:
+ HAS_REPORTING = True
+ else:
+ import south # pylint: disable=W0611
+ HAS_REPORTING = True
except ImportError:
- HAS_SOUTH = False
+ HAS_REPORTING = False
def _rpc_call(method):
@@ -48,8 +51,8 @@ class Reporting(Statistics, Threaded, PullSource):
self.whoami = platform.node()
self.transport = None
- if not HAS_SOUTH:
- msg = "Django south is required for Reporting"
+ if not HAS_REPORTING:
+ msg = "Django 1.7+ or Django south is required for Reporting"
self.logger.error(msg)
raise PluginInitError(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/Rules.py b/src/lib/Bcfg2/Server/Plugins/Rules.py
index a3f682ed6..cf659251c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Rules.py
+++ b/src/lib/Bcfg2/Server/Plugins/Rules.py
@@ -1,10 +1,17 @@
"""This generator provides rule-based entry mappings."""
+import copy
import re
+import string
import Bcfg2.Options
import Bcfg2.Server.Plugin
+class NameTemplate(string.Template):
+ """Simple subclass of string.Template with a custom delimiter."""
+ delimiter = '%'
+
+
class Rules(Bcfg2.Server.Plugin.PrioDir):
"""This is a generator that handles service assignments."""
__author__ = 'bcfg-dev@mcs.anl.gov'
@@ -12,7 +19,10 @@ class Rules(Bcfg2.Server.Plugin.PrioDir):
options = Bcfg2.Server.Plugin.PrioDir.options + [
Bcfg2.Options.BooleanOption(
cf=("rules", "regex"), dest="rules_regex",
- help="Allow regular expressions in Rules")]
+ help="Allow regular expressions in Rules"),
+ Bcfg2.Options.BooleanOption(
+ cf=("rules", "replace_name"), dest="rules_replace_name",
+ help="Replace %{name} in attributes with name of target entry")]
def __init__(self, core):
Bcfg2.Server.Plugin.PrioDir.__init__(self, core)
@@ -46,7 +56,22 @@ class Rules(Bcfg2.Server.Plugin.PrioDir):
return True
return False
+ def _apply(self, entry, data):
+ if self._replace_name_enabled:
+ data = copy.deepcopy(data)
+ for key, val in list(data.attrib.items()):
+ data.attrib[key] = NameTemplate(val).safe_substitute(
+ name=entry.get('name'))
+
+ Bcfg2.Server.Plugin.PrioDir._apply(self, entry, data)
+
@property
def _regex_enabled(self):
""" Return True if rules regexes are enabled, False otherwise """
return Bcfg2.Options.setup.rules_regex
+
+ @property
+ def _replace_name_enabled(self):
+ """ Return True if the replace_name feature is enabled,
+ False otherwise """
+ return Bcfg2.Options.setup.rules_replace_name
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index e4fb9b565..08acc4d8d 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -103,6 +103,7 @@ class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet):
class SSHbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.PullTarget):
"""
@@ -120,6 +121,10 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
private key for (hostname)
ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host
public key for (hostname)
+ ssh_host_ed25519_key.H_(hostname) -> the v2 ssh host
+ private key for (hostname)
+ ssh_host_ed25519_key.pub.H_(hostname) -> the v2 ssh host
+ public key for (hostname)
ssh_known_hosts -> the current known hosts file. this
is regenerated each time a new key is generated.
@@ -127,10 +132,12 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
__author__ = 'bcfg-dev@mcs.anl.gov'
keypatterns = ["ssh_host_dsa_key",
"ssh_host_ecdsa_key",
+ "ssh_host_ed25519_key",
"ssh_host_rsa_key",
"ssh_host_key",
"ssh_host_dsa_key.pub",
"ssh_host_ecdsa_key.pub",
+ "ssh_host_ed25519_key.pub",
"ssh_host_rsa_key.pub",
"ssh_host_key.pub"]
@@ -141,6 +148,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core):
Bcfg2.Server.Plugin.Plugin.__init__(self, core)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.PullTarget.__init__(self)
self.ipcache = {}
@@ -210,7 +218,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
try:
names[cmeta.hostname].update(
self.get_namecache_entry(ip))
- except socket.gaierror:
+ except socket.herror:
continue
names[cmeta.hostname] = sorted(names[cmeta.hostname])
@@ -284,6 +292,10 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.debug_log("New public key %s; invalidating "
"ssh_known_hosts cache" % event.filename)
self.skn = False
+
+ if self.core.metadata_cache_mode in ['cautious',
+ 'aggressive']:
+ self.core.metadata_cache.expire()
return
if event.filename == 'info.xml':
@@ -332,7 +344,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.ipcache[client] = False
msg = "Failed to find IP address for %s: %s" % (client,
result.error)
- self.logger(msg)
+ self.logger.error(msg)
raise PluginExecutionError(msg)
def get_namecache_entry(self, cip):
@@ -342,7 +354,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
if self.namecache[cip]:
return self.namecache[cip]
else:
- raise socket.gaierror
+ raise socket.herror
else:
# add an entry that has not been cached
try:
@@ -353,7 +365,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.namecache[cip] = []
self.namecache[cip].extend(rvlookup[1])
return self.namecache[cip]
- except socket.gaierror:
+ except socket.herror:
self.namecache[cip] = False
self.logger.error("Failed to find any names associated with "
"IP address %s" % cip)
@@ -415,7 +427,8 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
def GenerateHostKeyPair(self, client, filename):
"""Generate new host key pair for client."""
- match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa)_)?key)', filename)
+ match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa|ed25519)_)?key)',
+ filename)
if match:
hostkey = "%s.H_%s" % (match.group(1), client)
if match.group(2):
@@ -489,3 +502,15 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Failed to pull %s. This file does not "
"currently exist on the client" %
entry.get('name'))
+
+ def get_additional_data(self, metadata):
+ data = dict()
+ for key in self.keypatterns:
+ if key.endswith(".pub"):
+ try:
+ keyfile = "/etc/ssh/" + key
+ entry = self.entries[keyfile].best_matching(metadata)
+ data[key] = entry.data
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ pass
+ return data
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index cec2de297..ff67571fa 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -7,24 +7,18 @@ import lxml.etree
from Bcfg2.Server.Plugin import Plugin, Connector, DirectoryBacked, \
TemplateDataProvider, DefaultTemplateDataProvider
from Bcfg2.Logger import Debuggable
+from Bcfg2.Utils import safe_module_name
MODULE_RE = re.compile(r'(?P<filename>(?P<module>[^\/]+)\.py)$')
-def safe_module_name(module):
- """ Munge the name of a TemplateHelper module to avoid collisions
- with other Python modules. E.g., if someone has a helper named
- 'ldap.py', it should not be added to ``sys.modules`` as ``ldap``,
- but rather as something more obscure. """
- return '__TemplateHelper_%s' % module
-
-
class HelperModule(Debuggable):
""" Representation of a TemplateHelper module """
- def __init__(self, name):
+ def __init__(self, name, core):
Debuggable.__init__(self)
self.name = name
+ self.core = core
#: The name of the module as used by get_additional_data().
#: the name of the file with .py stripped off.
@@ -51,9 +45,14 @@ class HelperModule(Debuggable):
if event and event.code2str() not in ['exists', 'changed', 'created']:
return
+ # expire the metadata cache, because the module might have changed
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+
try:
- module = imp.load_source(safe_module_name(self._module_name),
- self.name)
+ module = imp.load_source(
+ safe_module_name('TemplateHelper', self._module_name),
+ self.name)
except: # pylint: disable=W0702
# this needs to be a blanket except because the
# imp.load_source() call can raise literally any error,
@@ -107,7 +106,6 @@ class TemplateHelper(Plugin, Connector, DirectoryBacked, TemplateDataProvider):
__author__ = 'chris.a.st.pierre@gmail.com'
ignore = re.compile(r'^(\.#.*|.*~|\..*\.(sw[px])|.*\.py[co])$')
patterns = MODULE_RE
- __child__ = HelperModule
def __init__(self, core):
Plugin.__init__(self, core)
@@ -115,6 +113,10 @@ class TemplateHelper(Plugin, Connector, DirectoryBacked, TemplateDataProvider):
DirectoryBacked.__init__(self, self.data)
TemplateDataProvider.__init__(self)
+ # The HelperModule needs access to the core, so we have to construct
+ # it manually and add the custom argument.
+ self.__child__ = lambda fname: HelperModule(fname, core)
+
def get_additional_data(self, _):
return dict([(h._module_name, h) # pylint: disable=W0212
for h in self.entries.values()])