summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Server/Plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Server/Plugins')
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Guppy.py1
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py41
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py111
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py106
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py135
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py43
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py6
12 files changed, 373 insertions, 97 deletions
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index ed349c87c..a7fa92201 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -414,7 +414,6 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
self._handlers = None
__init__.__doc__ = Bcfg2.Server.Plugin.EntrySet.__doc__
-
def set_debug(self, debug):
rv = Bcfg2.Server.Plugin.EntrySet.set_debug(self, debug)
for entry in self.entries.values():
@@ -780,8 +779,8 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
options = Bcfg2.Server.Plugin.GroupSpool.options + [
Bcfg2.Options.BooleanOption(
- '--cfg-validation', cf=('cfg', 'validation'), default=True,
- help='Run validation on Cfg files'),
+ '--cfg-validation', cf=('cfg', 'validation'), default=True,
+ help='Run validation on Cfg files'),
Bcfg2.Options.Option(
cf=("cfg", "handlers"), dest="cfg_handlers",
help="Cfg handlers to load",
diff --git a/src/lib/Bcfg2/Server/Plugins/Guppy.py b/src/lib/Bcfg2/Server/Plugins/Guppy.py
index 6d6df3cc3..c5969f978 100644
--- a/src/lib/Bcfg2/Server/Plugins/Guppy.py
+++ b/src/lib/Bcfg2/Server/Plugins/Guppy.py
@@ -34,6 +34,7 @@ class Guppy(Bcfg2.Server.Plugin.Plugin):
"""Guppy is a debugging plugin to help trace memory leaks"""
__author__ = 'bcfg-dev@mcs.anl.gov'
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable', 'Disable']
+ __child_rmi__ = __rmi__[:]
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index a2eeffc3d..24adee4f4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -40,7 +40,6 @@ def load_django_models():
hostname = models.CharField(max_length=255, primary_key=True)
version = models.CharField(max_length=31, null=True)
-
class ClientVersions(MutableMapping,
Bcfg2.Server.Plugin.DatabaseBacked):
""" dict-like object to make it easier to access client bcfg2
@@ -495,6 +494,7 @@ class MetadataGroup(tuple): # pylint: disable=E0012,R0924
class Metadata(Bcfg2.Server.Plugin.Metadata,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.ClientRunHooks,
Bcfg2.Server.Plugin.DatabaseBacked):
"""This class contains data for bcfg2 server metadata."""
@@ -513,6 +513,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def __init__(self, core, datastore, watch_clients=True):
Bcfg2.Server.Plugin.Metadata.__init__(self)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
self.watch_clients = watch_clients
@@ -768,7 +769,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
def remove_client(self, client_name):
- """Remove a bundle."""
+ """Remove a client."""
if self._use_db:
try:
client = MetadataClientModel.objects.get(hostname=client_name)
@@ -953,13 +954,16 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.groups[gname]
self.states['groups.xml'] = True
+ def expire_cache(self, key=None):
+ self.core.metadata_cache.expire(key)
+
def HandleEvent(self, event):
"""Handle update events for data files."""
for handles, event_handler in self.handlers.items():
if handles(event):
# clear the entire cache when we get an event for any
# metadata file
- self.core.metadata_cache.expire()
+ self.expire_cache()
event_handler(event)
if False not in list(self.states.values()) and self.debug_flag:
@@ -997,17 +1001,21 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- profiles = [g for g in self.clientgroups[client]
- if g in self.groups and self.groups[g].is_profile]
- self.logger.info("Changing %s profile from %s to %s" %
- (client, profiles, profile))
- self.update_client(client, dict(profile=profile))
- if client in self.clientgroups:
- for prof in profiles:
- self.clientgroups[client].remove(prof)
- self.clientgroups[client].append(profile)
+ metadata = self.core.build_metadata(client)
+ if metadata.profile != profile:
+ self.logger.info("Changing %s profile from %s to %s" %
+ (client, metadata.profile, profile))
+ self.update_client(client, dict(profile=profile))
+ if client in self.clientgroups:
+ if metadata.profile in self.clientgroups[client]:
+ self.clientgroups[client].remove(metadata.profile)
+ self.clientgroups[client].append(profile)
+ else:
+ self.clientgroups[client] = [profile]
else:
- self.clientgroups[client] = [profile]
+ self.logger.debug(
+ "Ignoring %s request to change profile from %s to %s"
+ % (client, metadata.profile, profile))
else:
self.logger.info("Creating new client: %s, profile %s" %
(client, profile))
@@ -1023,8 +1031,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.add_client(client, dict(profile=profile))
self.clients.append(client)
self.clientgroups[client] = [profile]
- if not self._use_db:
- self.clients_xml.write()
+ if not self._use_db:
+ self.clients_xml.write()
def set_version(self, client, version):
"""Set version for provided client."""
@@ -1074,7 +1082,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
raise Bcfg2.Server.Plugin.MetadataConsistencyError(err)
return self.addresses[address][0]
try:
- cname = socket.gethostbyaddr(address)[0].lower()
+ cname = socket.getnameinfo(addresspair,
+ socket.NI_NAMEREQD)[0].lower()
if cname in self.aliases:
return self.aliases[cname]
return cname
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index 9603cd518..dcd495d77 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -39,8 +39,8 @@ class NagiosGen(Plugin, Generator):
def createhostconfig(self, entry, metadata):
"""Build host specific configuration file."""
try:
- host_address = socket.gethostbyname(metadata.hostname)
- except socket.gaierror:
+ host_address = socket.getaddrinfo(metadata.hostname, None)[0][4][0]
+ except socket.error:
self.logger.error()
raise PluginExecutionError("Failed to find IP address for %s" %
metadata.hostname)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
index 1ff097471..0df8624f6 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -579,6 +579,10 @@ class Collection(list, Debuggable):
self.filter_unknown(unknown)
return packages, unknown
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__,
+ list.__repr__(self))
+
def get_collection_class(source_type):
""" Given a source type, determine the class of Collection object
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
index 1a56d77c4..1af046ec0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -79,13 +79,12 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile):
:type event: Bcfg2.Server.FileMonitor.Event
:returns: None
"""
- Bcfg2.Server.Plugin.StructFile.HandleEvent(self, event=event)
if event and event.filename != self.name:
for fpath in self.extras:
if fpath == os.path.abspath(event.filename):
self.parsed.add(fpath)
break
-
+ Bcfg2.Server.Plugin.StructFile.HandleEvent(self, event=event)
if self.loaded:
self.logger.info("Reloading Packages plugin")
self.pkg_obj.Reload()
@@ -102,10 +101,11 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile):
def Index(self):
Bcfg2.Server.Plugin.StructFile.Index(self)
self.entries = []
- for xsource in self.xdata.findall('.//Source'):
- source = self.source_from_xml(xsource)
- if source is not None:
- self.entries.append(source)
+ if self.loaded:
+ for xsource in self.xdata.findall('.//Source'):
+ source = self.source_from_xml(xsource)
+ if source is not None:
+ self.entries.append(source)
Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__ + """
``Index`` is responsible for calling :func:`source_from_xml`
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index 4bbcc59f7..0d49473c6 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -53,14 +53,15 @@ The Yum Backend
import os
import re
import sys
+import time
import copy
import errno
import socket
import logging
import lxml.etree
-import Bcfg2.Options
import Bcfg2.Server.Plugin
import Bcfg2.Server.FileMonitor
+from lockfile import FileLock
from Bcfg2.Utils import Executor
# pylint: disable=W0622
from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \
@@ -274,6 +275,8 @@ class YumCollection(Collection):
.. private-include: _add_gpg_instances, _get_pulp_consumer
"""
+ _helper = None
+
#: Options that are included in the [packages:yum] section of the
#: config but that should not be included in the temporary
#: yum.conf we write out
@@ -287,19 +290,25 @@ class YumCollection(Collection):
debug=debug)
self.keypath = os.path.join(self.cachepath, "keys")
- self._helper = None
+ #: A :class:`Bcfg2.Utils.Executor` object to use to run
+ #: external commands
+ self.cmd = Executor()
+
if self.use_yum:
#: Define a unique cache file for this collection to use
#: for cached yum metadata
self.cachefile = os.path.join(self.cachepath,
"cache-%s" % self.cachekey)
- if not os.path.exists(self.cachefile):
- os.mkdir(self.cachefile)
#: The path to the server-side config file used when
#: resolving packages with the Python yum libraries
self.cfgfile = os.path.join(self.cachefile, "yum.conf")
- self.write_config()
+
+ if not os.path.exists(self.cachefile):
+ self.debug_log("Creating common cache %s" % self.cachefile)
+ os.mkdir(self.cachefile)
+ if not self.disableMetaData:
+ self.setup_data()
self.cmd = Executor()
else:
self.cachefile = None
@@ -322,7 +331,27 @@ class YumCollection(Collection):
self.logger.error("Could not create Pulp consumer "
"cert directory at %s: %s" %
(certdir, err))
- self.pulp_cert_set = PulpCertificateSet(certdir)
+ self.__class__.pulp_cert_set = PulpCertificateSet(certdir)
+
+ @property
+ def disableMetaData(self):
+ """ Report whether or not metadata processing is enabled.
+ This duplicates code in Packages/__init__.py, and can probably
+ be removed in Bcfg2 1.4 when we have a module-level setup
+ object. """
+ if self.setup is None:
+ return True
+ try:
+ return not self.setup.cfp.getboolean("packages", "resolver")
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+ except ValueError:
+ # for historical reasons we also accept "enabled" and
+ # "disabled"
+ return self.setup.cfp.get(
+ "packages",
+ "metadata",
+ default="enabled").lower() == "disabled"
@property
def __package_groups__(self):
@@ -337,15 +366,17 @@ class YumCollection(Collection):
forking, but apparently not); finally we check in /usr/sbin,
the default location. """
if not self._helper:
- self._helper = Bcfg2.Options.setup.yum_helper
- if not self._helper:
+ # pylint: disable=W0212
+ self.__class__._helper = Bcfg2.Options.setup.yum_helper
+ if not self.__class__._helper:
# first see if bcfg2-yum-helper is in PATH
try:
self.debug_log("Checking for bcfg2-yum-helper in $PATH")
self.cmd.run(['bcfg2-yum-helper'])
- self._helper = 'bcfg2-yum-helper'
+ self.__class__._helper = 'bcfg2-yum-helper'
except OSError:
- self._helper = "/usr/sbin/bcfg2-yum-helper"
+ self.__class__._helper = "/usr/sbin/bcfg2-yum-helper"
+ # pylint: enable=W0212
return self._helper
@property
@@ -382,6 +413,7 @@ class YumCollection(Collection):
# the rpmdb is so hopelessly intertwined with yum that we
# have to totally reinvent the dependency resolver.
mainopts = dict(cachedir='/',
+ persistdir='/',
installroot=self.cachefile,
keepcache="0",
debuglevel="0",
@@ -846,6 +878,17 @@ class YumCollection(Collection):
if not self.use_yum:
return Collection.complete(self, packagelist)
+ lock = FileLock(os.path.join(self.cachefile, "lock"))
+ slept = 0
+ while lock.is_locked():
+ if slept > 30:
+ self.logger.warning("Packages: Timeout waiting for yum cache "
+ "to release its lock")
+ return set(), set()
+ self.logger.debug("Packages: Yum cache is locked, waiting...")
+ time.sleep(3)
+ slept += 3
+
if packagelist:
try:
result = self.call_helper(
@@ -890,28 +933,30 @@ class YumCollection(Collection):
cmd.append("-d")
cmd.append(command)
self.debug_log("Packages: running %s" % " ".join(cmd))
+
if inputdata:
- result = self.cmd.run(cmd, inputdata=json.dumps(inputdata))
+ result = self.cmd.run(cmd, timeout=self.setup['client_timeout'],
+ inputdata=json.dumps(inputdata))
else:
- result = self.cmd.run(cmd)
+ result = self.cmd.run(cmd, timeout=self.setup['client_timeout'])
if not result.success:
- errlines = result.error.splitlines()
self.logger.error("Packages: error running bcfg2-yum-helper: %s" %
- errlines[0])
- for line in errlines[1:]:
- self.logger.error("Packages: %s" % line)
+ result.error)
elif result.stderr:
- errlines = result.stderr.splitlines()
self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
- errlines[0])
- for line in errlines[1:]:
- self.debug_log("Packages: %s" % line)
+ result.stderr)
+
try:
return json.loads(result.stdout)
except ValueError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: error reading bcfg2-yum-helper "
- "output: %s" % err)
+ if result.stdout:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Error reading bcfg2-yum-helper "
+ "output: %s" % err)
+ self.logger.error("Packages: bcfg2-yum-helper output: %s" %
+ result.stdout)
+ else:
+ self.logger.error("Packages: No bcfg2-yum-helper output")
raise
def setup_data(self, force_update=False):
@@ -924,8 +969,7 @@ class YumCollection(Collection):
If using the yum Python libraries, this cleans up cached yum
metadata, regenerates the server-side yum config (in order to
catch any new sources that have been added to this server),
- and then cleans up cached yum metadata again, in case the new
- config has any preexisting cache.
+ then regenerates the yum cache.
:param force_update: Ignore all local cache and setup data
from its original upstream sources (i.e.,
@@ -936,23 +980,22 @@ class YumCollection(Collection):
return Collection.setup_data(self, force_update)
if force_update:
- # we call this twice: one to clean up data from the old
- # config, and once to clean up data from the new config
+ # clean up data from the old config
try:
self.call_helper("clean")
except ValueError:
# error reported by call_helper
pass
- os.unlink(self.cfgfile)
+ if os.path.exists(self.cfgfile):
+ os.unlink(self.cfgfile)
self.write_config()
- if force_update:
- try:
- self.call_helper("clean")
- except ValueError:
- # error reported by call_helper
- pass
+ try:
+ self.call_helper("makecache")
+ except ValueError:
+ # error reported by call_helper
+ pass
class YumSource(Source):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
index ee0203351..32db0b32d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
@@ -10,6 +10,8 @@ import yum
import logging
import Bcfg2.Options
import Bcfg2.Logger
+from Bcfg2.Compat import wraps
+from lockfile import FileLock, LockTimeout
try:
import json
except ImportError:
@@ -41,8 +43,8 @@ def pkgtup_to_string(package):
return ''.join(str(e) for e in rv)
-class DepSolver(object):
- """ Yum dependency solver """
+class YumHelper(object):
+ """ Yum helper base object """
def __init__(self, cfgfile, verbose=1):
self.cfgfile = cfgfile
@@ -56,6 +58,16 @@ class DepSolver(object):
self.yumbase._getConfig(cfgfile, debuglevel=verbose)
# pylint: enable=E1121,W0212
self.logger = logging.getLogger(self.__class__.__name__)
+
+
+class DepSolver(YumHelper):
+ """ Yum dependency solver. This is used for operations that only
+ read from the yum cache, and thus operates in cacheonly mode. """
+
+ def __init__(self, cfgfile, verbose=1):
+ YumHelper.__init__(self, cfgfile, verbose=verbose)
+ # internally, yum uses an integer, not a boolean, for conf.cache
+ self.yumbase.conf.cache = 1
self._groups = None
def get_groups(self):
@@ -180,6 +192,45 @@ class DepSolver(object):
packages.add(txmbr.pkgtup)
return list(packages), list(unknown)
+
+def acquire_lock(func):
+ """ decorator for CacheManager methods that gets and release a
+ lock while the method runs """
+ @wraps(func)
+ def inner(self, *args, **kwargs):
+ """ Get and release a lock while running the function this
+ wraps. """
+ self.logger.debug("Acquiring lock at %s" % self.lockfile)
+ while not self.lock.i_am_locking():
+ try:
+ self.lock.acquire(timeout=60) # wait up to 60 seconds
+ except LockTimeout:
+ self.lock.break_lock()
+ self.lock.acquire()
+ try:
+ func(self, *args, **kwargs)
+ finally:
+ self.lock.release()
+ self.logger.debug("Released lock at %s" % self.lockfile)
+
+ return inner
+
+
+class CacheManager(YumHelper):
+ """ Yum cache manager. Unlike :class:`DepSolver`, this can write
+ to the yum cache, and so is used for operations that muck with the
+ cache. (Technically, :func:`CacheManager.clean_cache` could be in
+ either DepSolver or CacheManager, but for consistency I've put it
+ here.) """
+
+ def __init__(self, cfgfile, verbose=1):
+ YumHelper.__init__(self, cfgfile, verbose=verbose)
+ self.lockfile = \
+ os.path.join(os.path.dirname(self.yumbase.conf.config_file_path),
+ "lock")
+ self.lock = FileLock(self.lockfile)
+
+ @acquire_lock
def clean_cache(self):
""" clean the yum cache """
for mdtype in ["Headers", "Packages", "Sqlite", "Metadata",
@@ -192,6 +243,27 @@ class DepSolver(object):
if not msg.startswith("0 "):
self.logger.info(msg)
+ @acquire_lock
+ def populate_cache(self):
+ """ populate the yum cache """
+ for repo in self.yumbase.repos.findRepos('*'):
+ repo.metadata_expire = 0
+ repo.mdpolicy = "group:all"
+ self.yumbase.doRepoSetup()
+ self.yumbase.repos.doSetup()
+ for repo in self.yumbase.repos.listEnabled():
+ # this populates the cache as a side effect
+ repo.repoXML # pylint: disable=W0104
+ try:
+ repo.getGroups()
+ except yum.Errors.RepoMDError:
+ pass # this repo has no groups
+ self.yumbase.repos.populateSack(mdtype='metadata', cacheonly=1)
+ self.yumbase.repos.populateSack(mdtype='filelists', cacheonly=1)
+ self.yumbase.repos.populateSack(mdtype='otherdata', cacheonly=1)
+ # this does something with the groups cache as a side effect
+ self.yumbase.comps # pylint: disable=W0104
+
class HelperSubcommand(Bcfg2.Options.Subcommand):
# the value to JSON encode and print out if the command fails
@@ -207,8 +279,6 @@ class HelperSubcommand(Bcfg2.Options.Subcommand):
self.verbosity = 5
elif Bcfg2.Options.setup.verbose:
self.verbosity = 1
- self.depsolver = DepSolver(Bcfg2.Options.setup.yum_config,
- self.verbosity)
def run(self, setup):
try:
@@ -233,16 +303,36 @@ class HelperSubcommand(Bcfg2.Options.Subcommand):
raise NotImplementedError
-class Clean(HelperSubcommand):
+class DepSolverSubcommand(HelperSubcommand):
+ def __init__(self):
+ HelperSubcommand.__init__(self)
+ self.depsolver = DepSolver(Bcfg2.Options.setup.yum_config,
+ self.verbosity)
+
+
+class CacheManagerSubcommand(HelperSubcommand):
fallback = False
accept_input = False
+ def __init__(self):
+ HelperSubcommand.__init__(self)
+ self.cachemgr = CacheManager(Bcfg2.Options.setup.yum_config,
+ self.verbosity)
+
+
+class Clean(CacheManagerSubcommand):
+ def _run(self, setup, data): # pylint: disable=W0613
+ self.cachemgr.clean_cache()
+ return True
+
+
+class MakeCache(CacheManagerSubcommand):
def _run(self, setup, data): # pylint: disable=W0613
- self.depsolver.clean_cache()
+ self.cachemgr.populate_cache()
return True
-class Complete(HelperSubcommand):
+class Complete(DepSolverSubcommand):
fallback = dict(packages=[], unknown=[])
def _run(self, _, data):
@@ -253,7 +343,7 @@ class Complete(HelperSubcommand):
return dict(packages=list(packages), unknown=list(unknown))
-class GetGroups(HelperSubcommand):
+class GetGroups(DepSolverSubcommand):
def _run(self, _, data):
rv = dict()
for gdata in data:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index 5b7c76765..e6240f39a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -9,7 +9,7 @@ import shutil
import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Plugin
-from Bcfg2.Compat import urlopen, HTTPError, URLError
+from Bcfg2.Compat import urlopen, HTTPError, URLError, MutableMapping
from Bcfg2.Server.Plugins.Packages.Collection import Collection, \
get_collection_class
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
@@ -33,7 +33,54 @@ class PackagesBackendAction(Bcfg2.Options.ComponentAction):
module = True
+class OnDemandDict(MutableMapping):
+ """ This maps a set of keys to a set of value-getting functions;
+ the values are populated on-the-fly by the functions as the values
+ are needed (and not before). This is used by
+ :func:`Bcfg2.Server.Plugins.Packages.Packages.get_additional_data`;
+ see the docstring for that function for details on why.
+
+ Unlike a dict, you should not specify values for for the righthand
+ side of this mapping, but functions that get values. E.g.:
+
+ .. code-block:: python
+
+ d = OnDemandDict(foo=load_foo,
+ bar=lambda: "bar");
+ """
+
+ def __init__(self, **getters):
+ self._values = dict()
+ self._getters = dict(**getters)
+
+ def __getitem__(self, key):
+ if key not in self._values:
+ self._values[key] = self._getters[key]()
+ return self._values[key]
+
+ def __setitem__(self, key, getter):
+ self._getters[key] = getter
+
+ def __delitem__(self, key):
+ del self._values[key]
+ del self._getters[key]
+
+ def __len__(self):
+ return len(self._getters)
+
+ def __iter__(self):
+ return iter(self._getters.keys())
+
+ def __repr__(self):
+ rv = dict(self._values)
+ for key in self._getters.keys():
+ if key not in rv:
+ rv[key] = 'unknown'
+ return str(rv)
+
+
class Packages(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.Connector,
@@ -87,8 +134,12 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: and :func:`Reload`
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
+ __child_rmi__ = Bcfg2.Server.Plugin.Plugin.__child_rmi__ + \
+ [('Refresh', 'expire_cache'), ('Reload', 'expire_cache')]
+
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.StructureValidator.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
@@ -141,8 +192,21 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: object when one is requested, so each entry is very
#: short-lived -- it's purged at the end of each client run.
self.clients = dict()
- # pylint: enable=C0301
+ #: groupcache caches group lookups. It maps Collections (via
+ #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`)
+ #: to sets of package groups, and thence to the packages
+ #: indicated by those groups.
+ self.groupcache = dict()
+
+ #: pkgcache caches complete package sets. It maps Collections
+ #: (via
+ #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`)
+ #: to sets of initial packages, and thence to the final
+ #: (complete) package selections resolved from the initial
+ #: packages
+ self.pkgcache = dict()
+ # pylint: enable=C0301
__init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__
def set_debug(self, debug):
@@ -349,14 +413,24 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
for el in to_remove:
el.getparent().remove(el)
- gpkgs = collection.get_groups(groups)
- for pkgs in gpkgs.values():
+ groups.sort()
+ # check for this set of groups in the group cache
+ gkey = hash(tuple(groups))
+ if gkey not in self.groupcache[collection.cachekey]:
+ self.groupcache[collection.cachekey][gkey] = \
+ collection.get_groups(groups)
+ for pkgs in self.groupcache[collection.cachekey][gkey].values():
base.update(pkgs)
# essential pkgs are those marked as such by the distribution
base.update(collection.get_essential())
- packages, unknown = collection.complete(base)
+ # check for this set of packages in the package cache
+ pkey = hash(tuple(base))
+ if pkey not in self.pkgcache[collection.cachekey]:
+ self.pkgcache[collection.cachekey][pkey] = \
+ collection.complete(base)
+ packages, unknown = self.pkgcache[collection.cachekey][pkey]
if unknown:
self.logger.info("Packages: Got %d unknown entries" % len(unknown))
self.logger.info("Packages: %s" % list(unknown))
@@ -382,6 +456,9 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self._load_config()
return True
+ def expire_cache(self, _=None):
+ self.Reload()
+
def _load_config(self, force_update=False):
"""
Load the configuration data and setup sources
@@ -409,9 +486,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if not self.disableMetaData:
collection.setup_data(force_update)
- # clear Collection caches
+ # clear Collection and package caches
self.clients = dict()
self.collections = dict()
+ self.groupcache = dict()
+ self.pkgcache = dict()
for source in self.sources.entries:
cachefiles.add(source.cachefile)
@@ -503,7 +582,8 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if len(sclasses) > 1:
self.logger.warning("Packages: Multiple source types found for "
"%s: %s" %
- ",".join([s.__name__ for s in sclasses]))
+ (metadata.hostname,
+ ",".join([s.__name__ for s in sclasses])))
cclass = Collection
elif len(sclasses) == 0:
self.logger.error("Packages: No sources found for %s" %
@@ -523,24 +603,47 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if cclass != Collection:
self.clients[metadata.hostname] = ckey
self.collections[ckey] = collection
+ self.groupcache.setdefault(ckey, dict())
+ self.pkgcache.setdefault(ckey, dict())
return collection
def get_additional_data(self, metadata):
""" Return additional data for the given client. This will be
- a dict containing a single key, ``sources``, whose value is a
- list of data returned from
- :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.get_additional_data`,
- namely, a list of
- :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.url_map`
- data.
+ an :class:`Bcfg2.Server.Plugins.Packages.OnDemandDict`
+ containing two keys:
+
+ * ``sources``, whose value is a list of data returned from
+ :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.get_additional_data`,
+ namely, a list of
+ :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.url_map`
+ data; and
+ * ``get_config``, whose value is the
+ :func:`Bcfg2.Server.Plugins.Packages.Packages.get_config`
+ function, which can be used to get the Packages config for
+ other systems.
+
+ This uses an OnDemandDict instead of just a normal dict
+ because loading a source collection can be a fairly
+ time-consuming process, particularly for the first time. As a
+ result, when all metadata objects are built at once (such as
+ after the server is restarted, or far more frequently if
+ Metadata caching is disabled), this function would be a major
+ bottleneck if we tried to build all collections at the same
+ time. Instead, they're merely built on-demand.
:param metadata: The client metadata
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:return: dict of lists of ``url_map`` data
"""
- collection = self.get_collection(metadata)
- return dict(sources=collection.get_additional_data(),
- get_config=self.get_config)
+ def get_sources():
+ """ getter for the 'sources' key of the OnDemandDict
+ returned by this function. This delays calling
+ get_collection() until it's absolutely necessary. """
+ return self.get_collection(metadata).get_additional_data
+
+ return OnDemandDict(
+ sources=get_sources,
+ get_config=lambda: self.get_config)
def end_client_run(self, metadata):
""" Hook to clear the cache for this client in
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index 9b485e29b..0d264a5a6 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -195,14 +195,16 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
class Probes(Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.Connector,
Bcfg2.Server.Plugin.DatabaseBacked):
""" A plugin to gather information from a client machine """
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
try:
@@ -262,7 +264,7 @@ class Probes(Bcfg2.Server.Plugin.Probing,
ProbesDataModel.objects.filter(
hostname=client.hostname).exclude(
- probe__in=self.probedata[client.hostname]).delete()
+ probe__in=self.probedata[client.hostname]).delete()
for group in self.cgroups[client.hostname]:
try:
@@ -277,14 +279,19 @@ class Probes(Bcfg2.Server.Plugin.Probing,
group=group)
ProbesGroupsModel.objects.filter(
hostname=client.hostname).exclude(
- group__in=self.cgroups[client.hostname]).delete()
+ group__in=self.cgroups[client.hostname]).delete()
+
+ def expire_cache(self, key=None):
+ self.load_data(client=key)
- def load_data(self):
+ def load_data(self, client=None):
""" Load probe data from the appropriate backend (probed.xml
or the database) """
if self._use_db:
- return self._load_data_db()
+ return self._load_data_db(client=client)
else:
+ # the XML backend doesn't support loading data for single
+ # clients, so it reloads all data
return self._load_data_xml()
def _load_data_xml(self):
@@ -309,20 +316,36 @@ class Probes(Bcfg2.Server.Plugin.Probing,
elif pdata.tag == 'Group':
self.cgroups[client.get('name')].append(pdata.get('name'))
- def _load_data_db(self):
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+
+ def _load_data_db(self, client=None):
""" Load probe data from the database """
- self.probedata = {}
- self.cgroups = {}
- for pdata in ProbesDataModel.objects.all():
+ if client is None:
+ self.probedata = {}
+ self.cgroups = {}
+ probedata = ProbesDataModel.objects.all()
+ groupdata = ProbesGroupsModel.objects.all()
+ else:
+ self.probedata.pop(client, None)
+ self.cgroups.pop(client, None)
+ probedata = ProbesDataModel.objects.filter(hostname=client)
+ groupdata = ProbesGroupsModel.objects.filter(hostname=client)
+
+ for pdata in probedata:
if pdata.hostname not in self.probedata:
self.probedata[pdata.hostname] = ClientProbeDataSet(
timestamp=time.mktime(pdata.timestamp.timetuple()))
self.probedata[pdata.hostname][pdata.probe] = ProbeData(pdata.data)
- for pgroup in ProbesGroupsModel.objects.all():
+ for pgroup in groupdata:
if pgroup.hostname not in self.cgroups:
self.cgroups[pgroup.hostname] = []
self.cgroups[pgroup.hostname].append(pgroup.group)
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata,
+ key=client)
+
@track_statistics()
def GetProbes(self, meta):
return self.probes.get_probe_data(meta)
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
index 3b367573b..a02f012a0 100644
--- a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -117,7 +117,7 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
self.logger.warning("PuppetENC is incompatible with aggressive "
"client metadata caching, try 'cautious' or "
"'initial' instead")
- self.core.cache.expire()
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
def end_statistics(self, metadata):
self.end_client_run(self, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index 186d61c6e..c858b881b 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -93,6 +93,7 @@ class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet):
class SSHbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.PullTarget):
"""
@@ -126,6 +127,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.PullTarget.__init__(self)
self.ipcache = {}
@@ -150,9 +152,11 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.entries["/etc/ssh/" + keypattern] = \
HostKeyEntrySet(keypattern, self.data)
self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk
-
self.cmd = Executor()
+ def expire_cache(self, key=None):
+ self.__skn = False
+
def get_skn(self):
"""Build memory cache of the ssh known hosts file."""
if not self.__skn: