summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Server
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Server')
-rw-r--r--src/lib/Bcfg2/Server/Admin.py48
-rw-r--r--src/lib/Bcfg2/Server/BuiltinCore.py21
-rw-r--r--src/lib/Bcfg2/Server/Cache.py17
-rw-r--r--src/lib/Bcfg2/Server/CherrypyCore.py16
-rw-r--r--src/lib/Bcfg2/Server/Core.py53
-rwxr-xr-xsrc/lib/Bcfg2/Server/Encryption.py2
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Inotify.py1
-rw-r--r--src/lib/Bcfg2/Server/Info.py97
-rw-r--r--src/lib/Bcfg2/Server/Lint/Bundler.py4
-rw-r--r--src/lib/Bcfg2/Server/Lint/MergeFiles.py3
-rw-r--r--src/lib/Bcfg2/Server/Lint/RequiredAttrs.py166
-rw-r--r--src/lib/Bcfg2/Server/Lint/TemplateHelper.py9
-rw-r--r--src/lib/Bcfg2/Server/Lint/__init__.py6
-rw-r--r--src/lib/Bcfg2/Server/MultiprocessingCore.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugin/__init__.py1
-rw-r--r--src/lib/Bcfg2/Server/Plugin/helpers.py100
-rw-r--r--src/lib/Bcfg2/Server/Plugins/AWSTags.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py39
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py1
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Defaults.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupLogic.py13
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py413
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py24
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py7
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py22
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py145
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py120
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py152
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py47
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py54
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py34
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py41
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Reporting.py15
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Rules.py27
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py26
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/models.py2
-rw-r--r--src/lib/Bcfg2/Server/Reports/updatefix.py8
-rw-r--r--src/lib/Bcfg2/Server/migrations/0001_initial.py45
-rw-r--r--src/lib/Bcfg2/Server/migrations/__init__.py0
-rw-r--r--src/lib/Bcfg2/Server/models.py29
-rw-r--r--src/lib/Bcfg2/Server/south_migrations/0001_initial.py70
-rw-r--r--src/lib/Bcfg2/Server/south_migrations/__init__.py0
49 files changed, 1436 insertions, 560 deletions
diff --git a/src/lib/Bcfg2/Server/Admin.py b/src/lib/Bcfg2/Server/Admin.py
index c294e6be5..77bca88eb 100644
--- a/src/lib/Bcfg2/Server/Admin.py
+++ b/src/lib/Bcfg2/Server/Admin.py
@@ -25,15 +25,19 @@ import Bcfg2.Server.Plugins.Metadata
try:
from django.core.exceptions import ImproperlyConfigured
from django.core import management
+ import django
import django.conf
import Bcfg2.Server.models
HAS_DJANGO = True
- try:
- import south # pylint: disable=W0611
+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7:
HAS_REPORTS = True
- except ImportError:
- HAS_REPORTS = False
+ else:
+ try:
+ import south # pylint: disable=W0611
+ HAS_REPORTS = True
+ except ImportError:
+ HAS_REPORTS = False
except ImportError:
HAS_DJANGO = False
HAS_REPORTS = False
@@ -439,6 +443,25 @@ class Compare(AdminCmd):
print("")
+class ExpireCache(_ProxyAdminCmd):
+ """ Expire the metadata cache """
+
+ options = _ProxyAdminCmd.options + [
+ Bcfg2.Options.PositionalArgument(
+ "hostname", nargs="*", default=[],
+ help="Expire cache for the given host(s)")]
+
+ def run(self, setup):
+ clients = None
+ if setup.hostname is not None and len(setup.hostname) > 0:
+ clients = setup.hostname
+
+ try:
+ self.proxy.expire_metadata_cache(clients)
+ except Bcfg2.Client.Proxy.ProxyError:
+ self.errExit("Proxy Error: %s" % sys.exc_info()[1])
+
+
class Init(AdminCmd):
"""Interactively initialize a new repository."""
@@ -643,7 +666,7 @@ bcfg2 = %s
def create_key(self):
"""Creates a bcfg2.key at the directory specifed by keypath."""
cmd = Executor(timeout=120)
- subject = "/C=%s/ST=%s/L=%s/CN=%s'" % (
+ subject = "/C=%s/ST=%s/L=%s/CN=%s" % (
self.data['country'], self.data['state'], self.data['location'],
self.data['shostname'])
key = cmd.run(["openssl", "req", "-batch", "-x509", "-nodes",
@@ -877,6 +900,7 @@ if HAS_DJANGO:
Django management system """
command = None
args = []
+ kwargs = {}
def run(self, _):
'''Call a django command'''
@@ -885,7 +909,7 @@ if HAS_DJANGO:
else:
command = self.__class__.__name__.lower()
args = [command] + self.args
- management.call_command(*args)
+ management.call_command(*args, **self.kwargs)
class DBShell(_DjangoProxyCmd):
""" Call the Django 'dbshell' command on the database """
@@ -901,7 +925,6 @@ if HAS_DJANGO:
""" Sync the Django ORM with the configured database """
def run(self, setup):
- Bcfg2.Server.models.load_models()
try:
Bcfg2.DBSettings.sync_databases(
interactive=False,
@@ -915,6 +938,17 @@ if HAS_DJANGO:
self.logger.error("Database update failed: %s" % err)
raise SystemExit(1)
+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7:
+ class Makemigrations(_DjangoProxyCmd):
+ """ Call the 'makemigrations' command on the database """
+ args = ['Reporting']
+
+ else:
+ class Schemamigration(_DjangoProxyCmd):
+ """ Call the South 'schemamigration' command on the database """
+ args = ['Bcfg2.Reporting']
+ kwargs = {'auto': True}
+
if HAS_REPORTS:
import datetime
diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py
index e138c57e4..dc5cc46fb 100644
--- a/src/lib/Bcfg2/Server/BuiltinCore.py
+++ b/src/lib/Bcfg2/Server/BuiltinCore.py
@@ -34,7 +34,8 @@ class BuiltinCore(NetworkCore):
daemon_args = dict(uid=Bcfg2.Options.setup.daemon_uid,
gid=Bcfg2.Options.setup.daemon_gid,
umask=int(Bcfg2.Options.setup.umask, 8),
- detach_process=True)
+ detach_process=True,
+ files_preserve=self._logfilehandles())
if Bcfg2.Options.setup.daemon:
daemon_args['pidfile'] = TimeoutPIDLockFile(
Bcfg2.Options.setup.daemon, acquire_timeout=5)
@@ -44,6 +45,24 @@ class BuiltinCore(NetworkCore):
self.context = daemon.DaemonContext(**daemon_args)
__init__.__doc__ = NetworkCore.__init__.__doc__.split('.. -----')[0]
+ def _logfilehandles(self, logger=None):
+ """ Get a list of all filehandles logger, that have to be handled
+ with DaemonContext.files_preserve to keep looging working.
+
+ :param logger: The logger to get the file handles of. By default,
+ self.logger is used.
+ :type logger: logging.Logger
+ """
+ if logger is None:
+ logger = self.logger
+
+ handles = [handler.stream.fileno()
+ for handler in logger.handlers
+ if hasattr(handler, 'stream')]
+ if logger.parent:
+ handles += self._logfilehandles(logger.parent)
+ return handles
+
def _dispatch(self, method, args, dispatch_dict):
""" Dispatch XML-RPC method calls
diff --git a/src/lib/Bcfg2/Server/Cache.py b/src/lib/Bcfg2/Server/Cache.py
index d05eb0bf6..b3b906b2c 100644
--- a/src/lib/Bcfg2/Server/Cache.py
+++ b/src/lib/Bcfg2/Server/Cache.py
@@ -96,15 +96,19 @@ class _Cache(MutableMapping):
return len(list(iter(self)))
def expire(self, key=None):
- """ expire all items, or a specific item, from the cache """
+ """ expire all items, or a specific item, from the cache
+
+ :returns: number of expired entries
+ """
+
if key is None:
- expire(*self._tags)
+ return expire(*self._tags)
else:
tags = self._tags | set([key])
# py 2.5 doesn't support mixing *args and explicit keyword
# args
kwargs = dict(exact=True)
- expire(*tags, **kwargs)
+ return expire(*tags, **kwargs)
def __repr__(self):
return repr(dict(self))
@@ -152,7 +156,10 @@ def expire(*tags, **kwargs):
""" Expire all items, a set of items, or one specific item from
the cache. If ``exact`` is set to True, then if the given tag set
doesn't match exactly one item in the cache, nothing will be
- expired. """
+ expired.
+
+ :returns: number of expired entries
+ """
exact = kwargs.pop("exact", False)
count = 0
if not tags:
@@ -170,6 +177,8 @@ def expire(*tags, **kwargs):
for hook in _hooks:
hook(tags, exact, count)
+ return count
+
def add_expire_hook(func):
""" Add a hook that will be called when an item is expired from
diff --git a/src/lib/Bcfg2/Server/CherrypyCore.py b/src/lib/Bcfg2/Server/CherrypyCore.py
index 3cb0e291b..05c6c5a94 100644
--- a/src/lib/Bcfg2/Server/CherrypyCore.py
+++ b/src/lib/Bcfg2/Server/CherrypyCore.py
@@ -110,17 +110,21 @@ class CherrypyCore(NetworkCore):
return cherrypy.serving.response.body
def _daemonize(self):
- """ Drop privileges with
- :class:`cherrypy.process.plugins.DropPrivileges`, daemonize
- with :class:`cherrypy.process.plugins.Daemonizer`, and write a
+ """ Drop privileges, daemonize
+ with :class:`cherrypy.process.plugins.Daemonizer` and write a
PID file with :class:`cherrypy.process.plugins.PIDFile`. """
+ self._drop_privileges()
+ Daemonizer(cherrypy.engine).subscribe()
+ PIDFile(cherrypy.engine, Bcfg2.Options.setup.daemon).subscribe()
+ return True
+
+ def _drop_privileges(self):
+ """ Drop privileges with
+ :class:`cherrypy.process.plugins.DropPrivileges` """
DropPrivileges(cherrypy.engine,
uid=Bcfg2.Options.setup.daemon_uid,
gid=Bcfg2.Options.setup.daemon_gid,
umask=int(Bcfg2.Options.setup.umask, 8)).subscribe()
- Daemonizer(cherrypy.engine).subscribe()
- PIDFile(cherrypy.engine, Bcfg2.Options.setup.daemon).subscribe()
- return True
def _run(self):
""" Start the server listening. """
diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py
index 03ab40343..a1ee24e18 100644
--- a/src/lib/Bcfg2/Server/Core.py
+++ b/src/lib/Bcfg2/Server/Core.py
@@ -11,6 +11,7 @@ import threading
import time
import inspect
import lxml.etree
+import daemon
import Bcfg2.Server
import Bcfg2.Logger
import Bcfg2.Options
@@ -26,6 +27,7 @@ from Bcfg2.Server.Statistics import track_statistics
try:
from django.core.exceptions import ImproperlyConfigured
+ import django
import django.conf
HAS_DJANGO = True
except ImportError:
@@ -82,10 +84,14 @@ def close_db_connection(func):
""" The decorated function """
rv = func(self, *args, **kwargs)
if self._database_available: # pylint: disable=W0212
- from django import db
self.logger.debug("%s: Closing database connection" %
threading.current_thread().getName())
- db.close_connection()
+
+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7:
+ for connection in django.db.connections.all():
+ connection.close()
+ else:
+ django.db.close_connection() # pylint: disable=E1101
return rv
return inner
@@ -113,7 +119,8 @@ class DefaultACL(Plugin, ClientACLs):
def check_acl_ip(self, address, rmi):
return (("." not in rmi and
not rmi.endswith("_debug") and
- rmi != 'get_statistics') or
+ rmi != 'get_statistics' and
+ rmi != 'expire_metadata_cache') or
address[0] == "127.0.0.1")
# in core we frequently want to catch all exceptions, regardless of
@@ -324,6 +331,10 @@ class Core(object):
select.select([famfd], [], [], 2)
elif not self.fam.pending():
terminate.wait(15)
+
+ if self.terminate.isSet():
+ break
+
if self.fam.pending():
try:
self._update_vcs_revision()
@@ -429,6 +440,7 @@ class Core(object):
self.logger.error("Unexpected instantiation failure for plugin %s"
% plugin, exc_info=1)
+ @close_db_connection
def shutdown(self):
""" Perform plugin and FAM shutdown tasks. """
if not self._running:
@@ -443,10 +455,6 @@ class Core(object):
for plugin in list(self.plugins.values()):
plugin.shutdown()
self.logger.info("%s: All plugins shut down" % self.name)
- if self._database_available:
- from django import db
- self.logger.info("%s: Closing database connection" % self.name)
- db.close_connection()
@property
def metadata_cache_mode(self):
@@ -681,7 +689,7 @@ class Core(object):
self.logger.debug("Building configuration for %s" % client)
start = time.time()
config = lxml.etree.Element("Configuration", version='2.0',
- revision=self.revision)
+ revision=str(self.revision))
try:
meta = self.build_metadata(client)
except MetadataConsistencyError:
@@ -1365,6 +1373,21 @@ class Core(object):
return "This method is deprecated and will be removed in a future " + \
"release\n%s" % self.fam.set_debug(debug)
+ @exposed
+ def expire_metadata_cache(self, _, hostnames=None):
+ """ Expire the metadata cache for one or all clients
+
+ :param hostnames: A list of hostnames to expire the metadata
+ cache for or None. If None the cache of
+ all clients will be expired.
+ :type hostnames: None or list of strings
+ """
+ if hostnames is not None:
+ for hostname in hostnames:
+ self.metadata_cache.expire(hostname)
+ else:
+ self.metadata_cache.expire()
+
class NetworkCore(Core):
""" A server core that actually listens on the network, can be
@@ -1424,9 +1447,9 @@ class NetworkCore(Core):
"\n.. automethod:: _daemonize\n"
def __str__(self):
- if hasattr(Bcfg2.Options.setup, "location"):
+ if hasattr(Bcfg2.Options.setup, "server"):
return "%s(%s)" % (self.__class__.__name__,
- Bcfg2.Options.setup.location)
+ Bcfg2.Options.setup.server)
else:
return Core.__str__(self)
@@ -1486,3 +1509,13 @@ class NetworkCore(Core):
""" Daemonize the server and write the pidfile. This must be
overridden by a core implementation. """
raise NotImplementedError
+
+ def _drop_privileges(self):
+ """ This is called if not daemonized and running as root to
+ drop the privileges to the configured daemon_uid and daemon_gid.
+ """
+ daemon.daemon.change_process_owner(
+ Bcfg2.Options.setup.daemon_uid,
+ Bcfg2.Options.setup.daemon_gid)
+ self.logger.debug("Dropped privileges to %s:%s." %
+ (os.getuid(), os.getgid()))
diff --git a/src/lib/Bcfg2/Server/Encryption.py b/src/lib/Bcfg2/Server/Encryption.py
index b60302871..c6cd4232e 100755
--- a/src/lib/Bcfg2/Server/Encryption.py
+++ b/src/lib/Bcfg2/Server/Encryption.py
@@ -176,7 +176,7 @@ def ssl_encrypt(plaintext, passwd, algorithm=None, salt=None):
def is_encrypted(val):
""" Make a best guess if the value is encrypted or not. This just
checks to see if ``val`` is a base64-encoded string whose content
- starts with "Salted__", so it may have (rare) false positives. It
+ starts with "Salted\\_\\_", so it may have (rare) false positives. It
will not have false negatives. """
try:
return b64decode(val).startswith("Salted__")
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
index c4b34a469..8f6e136fd 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
@@ -214,6 +214,7 @@ class Inotify(Pseudo, pyinotify.ProcessEvent):
def shutdown(self):
if self.started and self.notifier:
self.notifier.stop()
+ Pseudo.shutdown(self)
shutdown.__doc__ = Pseudo.shutdown.__doc__
def list_watches(self):
diff --git a/src/lib/Bcfg2/Server/Info.py b/src/lib/Bcfg2/Server/Info.py
index 6af561089..044dcdf0c 100644
--- a/src/lib/Bcfg2/Server/Info.py
+++ b/src/lib/Bcfg2/Server/Info.py
@@ -1,5 +1,5 @@
-""" Subcommands and helpers for bcfg2-info """
# -*- coding: utf-8 -*-
+""" Subcommands and helpers for bcfg2-info """
import os
import sys
@@ -12,6 +12,7 @@ import fnmatch
import argparse
import operator
import lxml.etree
+import traceback
from code import InteractiveConsole
import Bcfg2.Logger
import Bcfg2.Options
@@ -142,9 +143,7 @@ class Debug(InfoCmd):
if setup.cmd_list:
console = InteractiveConsole(locals())
for command in setup.cmd_list.readlines():
- command = command.strip()
- if command:
- console.push(command)
+ console.push(command.rstrip())
if not setup.non_interactive:
print("Dropping to interpreter; press ^D to resume")
self.interpreters[setup.interpreter](self.core.get_locals())
@@ -369,6 +368,7 @@ class Automatch(InfoCmd):
class ExpireCache(InfoCmd):
""" Expire the metadata cache """
+ only_interactive = True
options = [
Bcfg2.Options.PositionalArgument(
@@ -376,12 +376,20 @@ class ExpireCache(InfoCmd):
help="Expire cache for the given host(s)")]
def run(self, setup):
- if setup.clients:
- for client in self.get_client_list(setup.clients):
- self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata,
- key=client)
+ if setup.hostname:
+ for client in self.get_client_list(setup.hostname):
+ self.core.metadata_cache.expire(client)
else:
- self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+ self.core.metadata_cache.expire()
+
+
+class EventDebug(InfoCmd):
+ """ Enable debugging output for FAM events """
+ only_interactive = True
+ aliases = ['event_debug']
+
+ def run(self, _):
+ self.core.fam.set_debug(True)
class Bundles(InfoCmd):
@@ -672,18 +680,35 @@ class Query(InfoCmd):
print("\n".join(res))
+class Quit(InfoCmd):
+ """ Exit program """
+ only_interactive = True
+ aliases = ['exit', 'EOF']
+
+ def run(self, _):
+ raise SystemExit(0)
+
+
class Shell(InfoCmd):
""" Open an interactive shell to run multiple bcfg2-info commands """
interactive = False
def run(self, setup):
try:
- self.core.cmdloop('Welcome to bcfg2-info\n'
- 'Type "help" for more information')
+ self.core.cli.cmdloop('Welcome to bcfg2-info\n'
+ 'Type "help" for more information')
except KeyboardInterrupt:
print("\nCtrl-C pressed, exiting...")
+class Update(InfoCmd):
+ """ Process pending filesystem events """
+ only_interactive = True
+
+ def run(self, _):
+ self.core.fam.handle_events_in_interval(0.1)
+
+
class ProfileTemplates(InfoCmd):
""" Benchmark template rendering times """
@@ -796,36 +821,18 @@ if HAS_PROFILE:
display_trace(prof)
-class InfoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
+class InfoCore(Bcfg2.Server.Core.Core):
"""Main class for bcfg2-info."""
- def __init__(self):
- cmd.Cmd.__init__(self)
+ def __init__(self, cli):
Bcfg2.Server.Core.Core.__init__(self)
- self.prompt = 'bcfg2-info> '
+ self.cli = cli
def get_locals(self):
""" Expose the local variables of the core to subcommands that
need to reference them (i.e., the interactive interpreter) """
return locals()
- def do_quit(self, _):
- """ quit|exit - Exit program """
- raise SystemExit(0)
-
- do_EOF = do_quit
- do_exit = do_quit
-
- def do_eventdebug(self, _):
- """ eventdebug - Enable debugging output for FAM events """
- self.fam.set_debug(True)
-
- do_event_debug = do_eventdebug
-
- def do_update(self, _):
- """ update - Process pending filesystem events """
- self.fam.handle_events_in_interval(0.1)
-
def run(self):
self.load_plugins()
self.block_for_fam_events(handle_events=True)
@@ -840,12 +847,15 @@ class InfoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
Bcfg2.Server.Core.Core.shutdown(self)
-class CLI(Bcfg2.Options.CommandRegistry):
+class CLI(cmd.Cmd, Bcfg2.Options.CommandRegistry):
""" The bcfg2-info CLI """
options = [Bcfg2.Options.BooleanOption("-p", "--profile", help="Profile")]
def __init__(self):
+ cmd.Cmd.__init__(self)
Bcfg2.Options.CommandRegistry.__init__(self)
+ self.prompt = 'bcfg2-info> '
+
self.register_commands(globals().values(), parent=InfoCmd)
parser = Bcfg2.Options.get_parser(
description="Inspect a running Bcfg2 server",
@@ -860,7 +870,7 @@ class CLI(Bcfg2.Options.CommandRegistry):
else:
if Bcfg2.Options.setup.profile:
print("Profiling functionality not available.")
- self.core = InfoCore()
+ self.core = InfoCore(self)
for command in self.commands.values():
command.core = self.core
@@ -877,3 +887,22 @@ class CLI(Bcfg2.Options.CommandRegistry):
def shutdown(self):
Bcfg2.Options.CommandRegistry.shutdown(self)
self.core.shutdown()
+
+ def get_names(self):
+ """ Overwrite cmd.Cmd.get_names to use the instance to get the
+ methods and not the class, because the CommandRegistry
+ dynamically adds methods for the registed subcommands. """
+ return dir(self)
+
+ def onecmd(self, line):
+ """ Overwrite cmd.Cmd.onecmd to catch all exceptions (except
+ SystemExit) print an error message and continue cmdloop. """
+ try:
+ cmd.Cmd.onecmd(self, line)
+ except SystemExit:
+ raise
+ except: # pylint: disable=W0702
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ lines = traceback.format_exception(exc_type, exc_value,
+ exc_traceback)
+ self.stdout.write(''.join(lines))
diff --git a/src/lib/Bcfg2/Server/Lint/Bundler.py b/src/lib/Bcfg2/Server/Lint/Bundler.py
index 576e157ad..7b024229b 100644
--- a/src/lib/Bcfg2/Server/Lint/Bundler.py
+++ b/src/lib/Bcfg2/Server/Lint/Bundler.py
@@ -29,8 +29,12 @@ class Bundler(ServerPlugin):
# when given a list of files on stdin, this check is
# useless, so skip it
groupdata = self.metadata.groups_xml.xdata
+
ref_bundles = set([b.get("name")
for b in groupdata.findall("//Bundle")])
+ for bundle in self.core.plugins['Bundler'].bundles.values():
+ ref_bundles |= set([rb.get("name") for rb in
+ bundle.xdata.findall(".//RequiredBundle")])
allbundles = self.core.plugins['Bundler'].bundles.keys()
for bundle in ref_bundles:
diff --git a/src/lib/Bcfg2/Server/Lint/MergeFiles.py b/src/lib/Bcfg2/Server/Lint/MergeFiles.py
index 8e6a926ae..3a6251594 100644
--- a/src/lib/Bcfg2/Server/Lint/MergeFiles.py
+++ b/src/lib/Bcfg2/Server/Lint/MergeFiles.py
@@ -6,6 +6,7 @@ import copy
from difflib import SequenceMatcher
import Bcfg2.Server.Lint
from Bcfg2.Server.Plugins.Cfg import CfgGenerator
+from Bcfg2.Utils import is_string
def threshold(val):
@@ -50,6 +51,8 @@ class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
for filename, entryset in self.core.plugins['Cfg'].entries.items():
candidates = dict([(f, e) for f, e in entryset.entries.items()
if (isinstance(e, CfgGenerator) and
+ is_string(e.data,
+ Bcfg2.Options.setup.encoding) and
f not in ignore and
not f.endswith(".crypt"))])
similar, identical = self.get_similar(candidates)
diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
index ebf4c4954..56b4e7477 100644
--- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
@@ -47,70 +47,118 @@ def is_device_mode(val):
return re.match(r'^\d+$', val)
+def is_vcs_type(val):
+ """ Return True if val is a supported vcs type handled by the
+ current client tool """
+ return (val != 'Path' and
+ hasattr(Bcfg2.Client.Tools.VCS.VCS, 'Install%s' % val))
+
+
class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
""" Verify attributes for configuration entries that cannot be
verified with an XML schema alone. """
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
- self.required_attrs = dict(
- Path=dict(
- device=dict(name=is_filename,
- owner=is_username,
- group=is_username,
- dev_type=lambda v: v in device_map),
- directory=dict(name=is_filename, owner=is_username,
- group=is_username, mode=is_octal_mode),
- file=dict(name=is_filename, owner=is_username,
- group=is_username, mode=is_octal_mode,
- __text__=None),
- hardlink=dict(name=is_filename, to=is_filename),
- symlink=dict(name=is_filename),
- ignore=dict(name=is_filename),
- nonexistent=dict(name=is_filename),
- permissions=dict(name=is_filename, owner=is_username,
- group=is_username, mode=is_octal_mode),
- vcs=dict(vcstype=lambda v: (v != 'Path' and
- hasattr(Bcfg2.Client.Tools.VCS.VCS,
- "Install%s" % v)),
- revision=None, sourceurl=None)),
- Service={"__any__": dict(name=None),
- "smf": dict(name=None, FMRI=None)},
- Action={None: dict(name=None,
- timing=lambda v: v in ['pre', 'post', 'both'],
- when=lambda v: v in ['modified', 'always'],
- status=lambda v: v in ['ignore', 'check'],
- command=None)},
- ACL=dict(
- default=dict(scope=lambda v: v in ['user', 'group'],
- perms=lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}',
- v)),
- access=dict(scope=lambda v: v in ['user', 'group'],
- perms=lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}',
- v)),
- mask=dict(perms=lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}',
- v))),
- Package={"__any__": dict(name=None)},
- SEBoolean={None: dict(name=None,
- value=lambda v: v in ['on', 'off'])},
- SEModule={None: dict(name=None, __text__=None)},
- SEPort={
- None: dict(name=lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)',
- v),
- selinuxtype=is_selinux_type)},
- SEFcontext={None: dict(name=None, selinuxtype=is_selinux_type)},
- SENode={None: dict(name=lambda v: "/" in v,
- selinuxtype=is_selinux_type,
- proto=lambda v: v in ['ipv6', 'ipv4'])},
- SELogin={None: dict(name=is_username,
- selinuxuser=is_selinux_user)},
- SEUser={None: dict(name=is_selinux_user,
- roles=lambda v: all(is_selinux_user(u)
- for u in " ".split(v)),
- prefix=None)},
- SEInterface={None: dict(name=None, selinuxtype=is_selinux_type)},
- SEPermissive={None: dict(name=is_selinux_type)},
- POSIXGroup={None: dict(name=is_username)},
- POSIXUser={None: dict(name=is_username)})
+ self.required_attrs = {
+ 'Path': {
+ '__any__': {'name': is_filename},
+ 'augeas': {'owner': is_username, 'group': is_username,
+ 'mode': is_octal_mode},
+ 'device': {'owner': is_username, 'group': is_username,
+ 'mode': is_octal_mode,
+ 'dev_type': lambda v: v in device_map},
+ 'directory': {'owner': is_username, 'group': is_username,
+ 'mode': is_octal_mode},
+ 'file': {'owner': is_username, 'group': is_username,
+ 'mode': is_octal_mode, '__text__': None},
+ 'hardlink': {'owner': is_username, 'group': is_username,
+ 'mode': is_octal_mode, 'to': is_filename},
+ 'symlink': {},
+ 'ignore': {},
+ 'nonexistent': {},
+ 'permissions': {'owner': is_username, 'group': is_username,
+ 'mode': is_octal_mode},
+ 'vcs': {'vcstype': is_vcs_type, 'revision': None,
+ 'sourceurl': None},
+ },
+ 'Service': {
+ '__any__': {'name': None},
+ 'smf': {'name': None, 'FMRI': None}
+ },
+ 'Action': {
+ None: {
+ 'name': None,
+ 'timing': lambda v: v in ['pre', 'post', 'both'],
+ 'when': lambda v: v in ['modified', 'always'],
+ 'status': lambda v: v in ['ignore', 'check'],
+ 'command': None,
+ },
+ },
+ 'ACL': {
+ 'default': {
+ 'scope': lambda v: v in ['user', 'group'],
+ 'perms': lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v),
+ },
+ 'access': {
+ 'scope': lambda v: v in ['user', 'group'],
+ 'perms': lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v),
+ },
+ 'mask': {
+ 'perms': lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v),
+ },
+ },
+ 'Package': {
+ '__any__': {'name': None},
+ },
+ 'SEBoolean': {
+ None: {
+ 'name': None,
+ 'value': lambda v: v in ['on', 'off'],
+ },
+ },
+ 'SEModule': {
+ None: {'name': None, '__text__': None},
+ },
+ 'SEPort': {
+ None: {
+ 'name': lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)', v),
+ 'selinuxtype': is_selinux_type,
+ },
+ },
+ 'SEFcontext': {
+ None: {'name': None, 'selinuxtype': is_selinux_type},
+ },
+ 'SENode': {
+ None: {
+ 'name': lambda v: "/" in v,
+ 'selinuxtype': is_selinux_type,
+ 'proto': lambda v: v in ['ipv6', 'ipv4']
+ },
+ },
+ 'SELogin': {
+ None: {'name': is_username, 'selinuxuser': is_selinux_user},
+ },
+ 'SEUser': {
+ None: {
+ 'name': is_selinux_user,
+ 'roles': lambda v: all(is_selinux_user(u)
+ for u in " ".split(v)),
+ 'prefix': None,
+ },
+ },
+ 'SEInterface': {
+ None: {'name': None, 'selinuxtype': is_selinux_type},
+ },
+ 'SEPermissive': {
+ None: {'name': is_selinux_type},
+ },
+ 'POSIXGroup': {
+ None: {'name': is_username},
+ },
+ 'POSIXUser': {
+ None: {'name': is_username},
+ },
+ }
def Run(self):
self.check_packages()
diff --git a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
index 9d05516f1..98faa269d 100644
--- a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
@@ -4,8 +4,8 @@
import sys
import imp
from Bcfg2.Server.Lint import ServerPlugin
-from Bcfg2.Server.Plugins.TemplateHelper import HelperModule, MODULE_RE, \
- safe_module_name
+from Bcfg2.Server.Plugins.TemplateHelper import HelperModule, MODULE_RE
+from Bcfg2.Utils import safe_module_name
class TemplateHelper(ServerPlugin):
@@ -26,7 +26,7 @@ class TemplateHelper(ServerPlugin):
ServerPlugin.__init__(self, *args, **kwargs)
# we instantiate a dummy helper to discover which keywords and
# defaults are reserved
- dummy = HelperModule("foo.py")
+ dummy = HelperModule("foo.py", None)
self.reserved_keywords = dir(dummy)
self.reserved_defaults = dummy.reserved_defaults
@@ -44,7 +44,8 @@ class TemplateHelper(ServerPlugin):
module_name = MODULE_RE.search(helper).group(1)
try:
- module = imp.load_source(safe_module_name(module_name), helper)
+ module = imp.load_source(
+ safe_module_name('TemplateHelper', module_name), helper)
except: # pylint: disable=W0702
err = sys.exc_info()[1]
self.LintError("templatehelper-import-error",
diff --git a/src/lib/Bcfg2/Server/Lint/__init__.py b/src/lib/Bcfg2/Server/Lint/__init__.py
index 61f704206..66c8180f4 100644
--- a/src/lib/Bcfg2/Server/Lint/__init__.py
+++ b/src/lib/Bcfg2/Server/Lint/__init__.py
@@ -14,6 +14,7 @@ import time
import lxml.etree
+
import Bcfg2.Options
import Bcfg2.Server.Core
import Bcfg2.Server.Plugins
@@ -326,7 +327,10 @@ class LintPluginOption(Bcfg2.Options.Option):
plugins = [p.__name__ for p in namespace.plugins]
for loader, name, _is_pkg in walk_packages(path=__path__):
try:
- module = loader.find_module(name).load_module(name)
+ module_name = 'Bcfg2.Server.Lint.%s' % name
+ module = loader \
+ .find_module(module_name) \
+ .load_module(module_name)
plugin = getattr(module, name)
if plugin.__serverplugin__ is None or \
plugin.__serverplugin__ in plugins:
diff --git a/src/lib/Bcfg2/Server/MultiprocessingCore.py b/src/lib/Bcfg2/Server/MultiprocessingCore.py
index 724b34d8d..4bf3e4a27 100644
--- a/src/lib/Bcfg2/Server/MultiprocessingCore.py
+++ b/src/lib/Bcfg2/Server/MultiprocessingCore.py
@@ -334,9 +334,9 @@ class MultiprocessingCore(BuiltinCore):
self.children = None
def __str__(self):
- if hasattr(Bcfg2.Options.setup, "location"):
+ if hasattr(Bcfg2.Options.setup, "server"):
return "%s(%s; %s children)" % (self.__class__.__name__,
- Bcfg2.Options.setup.location,
+ Bcfg2.Options.setup.server,
len(self._all_children))
else:
return "%s(%s children)" % (self.__class__.__name__,
diff --git a/src/lib/Bcfg2/Server/Plugin/__init__.py b/src/lib/Bcfg2/Server/Plugin/__init__.py
index e28e458b3..69fc90b2f 100644
--- a/src/lib/Bcfg2/Server/Plugin/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugin/__init__.py
@@ -11,7 +11,6 @@ documentation it's not necessary to use the submodules. E.g., you can
from Bcfg2.Server.Plugin.base import Plugin
"""
-
import Bcfg2.Options
# pylint: disable=W0401
diff --git a/src/lib/Bcfg2/Server/Plugin/helpers.py b/src/lib/Bcfg2/Server/Plugin/helpers.py
index 245cfc256..ca0fe8188 100644
--- a/src/lib/Bcfg2/Server/Plugin/helpers.py
+++ b/src/lib/Bcfg2/Server/Plugin/helpers.py
@@ -13,7 +13,7 @@ import Bcfg2.Server
import Bcfg2.Options
import Bcfg2.Server.FileMonitor
from Bcfg2.Logger import Debuggable
-from Bcfg2.Compat import CmpMixin, wraps
+from Bcfg2.Compat import CmpMixin, MutableMapping, wraps
from Bcfg2.Server.Plugin.base import Plugin
from Bcfg2.Server.Plugin.interfaces import Generator, TemplateDataProvider
from Bcfg2.Server.Plugin.exceptions import SpecificityError, \
@@ -1066,7 +1066,22 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
data = candidate
break
- entry.text = data.text
+ self._apply(entry, data)
+
+ def _apply(self, entry, data):
+ """ Apply all available values from data onto entry. This
+ sets the available attributes (for all attribues unset in
+ the entry), adds all children and copies the text from data
+ to entry.
+
+ :param entry: The entry to apply the changes
+ :type entry: lxml.etree._Element
+ :param data: The entry to get the data from
+ :type data: lxml.etree._Element
+ """
+
+ if data.text is not None and data.text.strip() != '':
+ entry.text = data.text
for item in data.getchildren():
entry.append(copy.copy(item))
@@ -1685,3 +1700,84 @@ class GroupSpool(Plugin, Generator):
return
reqid = self.fam.AddMonitor(name, self)
self.handles[reqid] = relative
+
+
+class CallableDict(MutableMapping):
+ """ This maps a set of keys to a set of value-getting functions;
+ the values are populated on-the-fly by the functions as the values
+ are needed (and not before). This is for example used by
+ :func:`Bcfg2.Server.Plugins.Packages.Packages.get_additional_data`;
+ see the docstring for that function for details on why.
+
+ Unlike a dict, you can specify values or functions for the
+ righthand side of this mapping. If you specify a function, it will
+ be evaluated everytime you access the value. E.g.:
+
+ .. code-block:: python
+
+ d = CallableDict(foo=load_foo,
+ bar="bar")
+ """
+
+ def __init__(self, **getters):
+ self._getters = dict(**getters)
+
+ def __getitem__(self, key):
+ if callable(self._getters[key]):
+ return self._getters[key]()
+ else:
+ return self._getters[key]
+
+ def __setitem__(self, key, getter):
+ self._getters[key] = getter
+
+ def __delitem__(self, key):
+ del self._getters[key]
+
+ def __len__(self):
+ return len(self._getters)
+
+ def __iter__(self):
+ return iter(self._getters.keys())
+
+ def _current_data(self):
+ """ Return a dict with the current available static data
+ and ``unknown`` for all callable values.
+ """
+ rv = dict()
+ for key in self._getters.keys():
+ if callable(self._getters[key]):
+ rv[key] = 'unknown'
+ else:
+ rv[key] = self._getters[key]
+ return rv
+
+ def __repr__(self):
+ return str(self._current_data())
+
+
+class OnDemandDict(CallableDict):
+ """ This is like a :class:`CallableDict` but it will cache
+ the results of the callable getters, so that it is only evaluated
+ once when you first access it.
+ """
+
+ def __init__(self, **getters):
+ CallableDict.__init__(self, **getters)
+ self._values = dict()
+
+ def __getitem__(self, key):
+ if key not in self._values:
+ self._values[key] = super(OnDemandDict, self).__getitem__(key)
+ return self._values[key]
+
+ def __delitem__(self, key):
+ super(OnDemandDict, self).__delitem__(key)
+ del self._values[key]
+
+ def _current_data(self):
+ rv = super(OnDemandDict, self)._current_data()
+ for (key, value) in rv.items():
+ if key in self._values:
+ rv[key] = value
+ return rv
diff --git a/src/lib/Bcfg2/Server/Plugins/AWSTags.py b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
index 0d6eefaaa..556805bde 100644
--- a/src/lib/Bcfg2/Server/Plugins/AWSTags.py
+++ b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
@@ -172,6 +172,11 @@ class AWSTags(Bcfg2.Server.Plugin.Plugin,
def start_client_run(self, metadata):
self.expire_cache(key=metadata.hostname)
+ if self.core.metadata_cache_mode == 'aggressive':
+ self.logger.warning("AWSTags is incompatible with aggressive "
+ "client metadata caching, try 'cautious' "
+ "or 'initial'")
+ self.core.metadata_cache.expire(metadata.hostname)
def get_additional_data(self, metadata):
return self.get_tags(metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index e38eeea89..4f5a79465 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -92,6 +92,10 @@ class Bundler(Plugin,
self.logger.error("Bundler: Failed to render templated bundle "
"%s: %s" % (bundlename, err))
continue
+ except:
+ self.logger.error("Bundler: Unexpected bundler error for %s" %
+ bundlename, exc_info=1)
+ continue
if data.get("independent", "false").lower() == "true":
data.tag = "Independent"
@@ -124,12 +128,12 @@ class Bundler(Plugin,
# dependent bundle -- add it to the list of
# bundles for this client
if child.get("name") not in bundles_added:
- bundles.append(child.get("name"))
+ bundles.add(child.get("name"))
bundles_added.add(child.get("name"))
if child.get('inherit_modification', 'false') == 'true':
if metadata.version_info >= \
Bcfg2VersionInfo('1.4.0pre2'):
- lxml.etree.SubElement(data, 'Bundle',
+ lxml.etree.SubElement(data, 'BoundBundle',
name=child.get('name'))
else:
self.logger.warning(
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
index cff9ff61e..71aec7658 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
@@ -12,14 +12,14 @@ from Bcfg2.Server.Plugins.Cfg import CfgGenerator
try:
from jinja2 import Environment, FileSystemLoader
HAS_JINJA2 = True
-except ImportError:
- HAS_JINJA2 = False
+ class RelEnvironment(Environment):
+ """Override join_path() to enable relative template paths."""
+ def join_path(self, template, parent):
+ return os.path.join(os.path.dirname(parent), template)
-class RelEnvironment(Environment):
- """Override join_path() to enable relative template paths."""
- def join_path(self, template, parent):
- return os.path.join(os.path.dirname(parent), template)
+except ImportError:
+ HAS_JINJA2 = False
class DefaultJinja2DataProvider(DefaultTemplateDataProvider):
@@ -42,15 +42,16 @@ class CfgJinja2Generator(CfgGenerator):
#: Handle .jinja2 files
__extensions__ = ['jinja2']
- #: ``__loader_cls__`` is the class that will be instantiated to
- #: load the template files. It must implement one public function,
- #: ``load()``, as :class:`genshi.template.TemplateLoader`.
- __loader_cls__ = FileSystemLoader
+ if HAS_JINJA2:
+ #: ``__loader_cls__`` is the class that will be instantiated to
+ #: load the template files. It must implement one public function,
+ #: ``load()``, as :class:`genshi.template.TemplateLoader`.
+ __loader_cls__ = FileSystemLoader
- #: ``__environment_cls__`` is the class that will be instantiated to
- #: store the jinja2 environment. It must implement one public function,
- #: ``get_template()``, as :class:`jinja2.Environment`.
- __environment_cls__ = RelEnvironment
+ #: ``__environment_cls__`` is the class that will be instantiated to
+ #: store the jinja2 environment. It must implement one public
+ #: function, ``get_template()``, as :class:`jinja2.Environment`.
+ __environment_cls__ = RelEnvironment
#: Ignore ``.jinja2_include`` files so they can be used with the
#: Jinja2 ``{% include ... %}`` directive without raising warnings.
@@ -68,7 +69,15 @@ class CfgJinja2Generator(CfgGenerator):
encoding = Bcfg2.Options.setup.encoding
self.loader = self.__loader_cls__('/',
encoding=encoding)
- self.environment = self.__environment_cls__(loader=self.loader)
+ try:
+ # keep_trailing_newline is new in Jinja2 2.7, and will
+ # fail with earlier versions
+ self.environment = \
+ self.__environment_cls__(loader=self.loader,
+ keep_trailing_newline=True)
+ except TypeError:
+ self.environment = \
+ self.__environment_cls__(loader=self.loader)
__init__.__doc__ = CfgGenerator.__init__.__doc__
def get_data(self, entry, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
index a158302be..241bce34c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
@@ -19,8 +19,8 @@ class CfgSSLCAKeyCreator(XMLCfgCreator):
self.logger.info("Cfg: Generating new SSL key for %s" % self.name)
spec = self.XMLMatch(metadata)
key = spec.find("Key")
- if not key:
- key = dict()
+ if key is None:
+ key = {}
ktype = key.get('type', 'rsa')
bits = key.get('bits', '2048')
if ktype == 'rsa':
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index 355e53588..dae03144a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -878,6 +878,7 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
options = Bcfg2.Server.Plugin.GroupSpool.options + [
Bcfg2.Options.BooleanOption(
'--cfg-validation', cf=('cfg', 'validation'), default=True,
+ dest="cfg_validation",
help='Run validation on Cfg files'),
Bcfg2.Options.Option(
cf=('cfg', 'category'), dest="cfg_category",
diff --git a/src/lib/Bcfg2/Server/Plugins/Defaults.py b/src/lib/Bcfg2/Server/Plugins/Defaults.py
index 79e2ca0e2..2242e3825 100644
--- a/src/lib/Bcfg2/Server/Plugins/Defaults.py
+++ b/src/lib/Bcfg2/Server/Plugins/Defaults.py
@@ -1,5 +1,6 @@
"""This generator provides rule-based entry mappings."""
+import Bcfg2.Options
import Bcfg2.Server.Plugin
import Bcfg2.Server.Plugins.Rules
@@ -9,7 +10,10 @@ class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
"""Set default attributes on bound entries"""
__author__ = 'bcfg-dev@mcs.anl.gov'
- options = Bcfg2.Server.Plugin.PrioDir.options
+ options = Bcfg2.Server.Plugin.PrioDir.options + [
+ Bcfg2.Options.BooleanOption(
+ cf=("defaults", "replace_name"), dest="defaults_replace_name",
+ help="Replace %{name} in attributes with name of target entry")]
# Rules is a Generator that happens to implement all of the
# functionality we want, so we overload it, but Defaults should
@@ -41,3 +45,9 @@ class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
def _regex_enabled(self):
""" Defaults depends on regex matching, so force it enabled """
return True
+
+ @property
+ def _replace_name_enabled(self):
+ """ Return True if the replace_name feature is enabled,
+ False otherwise """
+ return Bcfg2.Options.setup.defaults_replace_name
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
index b60f60e65..184b362f9 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
@@ -13,6 +13,17 @@ class GroupLogicConfig(Bcfg2.Server.Plugin.StructFile):
create = lxml.etree.Element("GroupLogic",
nsmap=dict(py="http://genshi.edgewall.org/"))
+ def __init__(self, filename, core):
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename,
+ should_monitor=True)
+ self.core = core
+
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+
def _match(self, item, metadata, *args):
if item.tag == 'Group' and not len(item.getchildren()):
return [item]
@@ -39,7 +50,7 @@ class GroupLogic(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
self.config = GroupLogicConfig(os.path.join(self.data, "groups.xml"),
- should_monitor=True)
+ core=core)
self._local = local()
def get_additional_groups(self, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index 553ddbc47..770419ba5 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -1,169 +1,264 @@
+""" A plugin to fetch data from a LDAP directory """
+
import imp
-import logging
+import os
import sys
import time
import traceback
-import Bcfg2.Server.Plugin
+from functools import partial
-logger = logging.getLogger('Bcfg2.Plugins.Ldap')
+import Bcfg2.Options
+import Bcfg2.Server.Cache
+import Bcfg2.Server.Plugin
+from Bcfg2.Logger import Debuggable
+from Bcfg2.Utils import ClassName, safe_module_name
try:
import ldap
+ HAS_LDAP = True
except ImportError:
- logger.error("Unable to load ldap module. Is python-ldap installed?")
- raise ImportError
+ HAS_LDAP = False
-# time in seconds between retries after failed LDAP connection
-RETRY_DELAY = 5
-# how many times to try reaching the LDAP server if a connection is broken
-# at the very minimum, one retry is needed to handle a restarted LDAP daemon
-RETRY_COUNT = 3
-SCOPE_MAP = {
- "base": ldap.SCOPE_BASE,
- "one": ldap.SCOPE_ONELEVEL,
- "sub": ldap.SCOPE_SUBTREE,
-}
+class ConfigFile(Bcfg2.Server.Plugin.FileBacked):
+ """ Config file for the Ldap plugin """
-LDAP_QUERIES = []
+ def __init__(self, name, core, plugin):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
+ self.core = core
+ self.plugin = plugin
+ self.queries = list()
+ self.fam.AddMonitor(name, self)
+ def Index(self):
+ """ Get the queries from the config file """
+ try:
+ module_name = os.path.splitext(os.path.basename(self.name))[0]
+ module = imp.load_source(safe_module_name('Ldap', module_name),
+ self.name)
+ except: # pylint: disable=W0702
+ err = sys.exc_info()[1]
+ self.logger.error("Ldap: Failed to import %s: %s" %
+ (self.name, err))
+ return
+
+ if not hasattr(module, "__queries__"):
+ self.logger.error("Ldap: %s has no __queries__ list" % self.name)
+ return
+
+ self.queries = list()
+ for query in module.__queries__:
+ try:
+ self.queries.append(getattr(module, query))
+ except AttributeError:
+ self.logger.warning(
+ "Ldap: %s exports %s, but has no such attribute" %
+ (self.name, query))
-def register_query(query):
- LDAP_QUERIES.append(query)
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+ self.plugin.expire_cache()
-class ConfigFile(Bcfg2.Server.Plugin.FileBacked):
- """
- Config file for the Ldap plugin
-
- The config file cannot be 'parsed' in the traditional sense as we would
- need some serious type checking ugliness to just get the LdapQuery
- subclasses. The alternative would be to have the user create a list with
- a predefined name that contains all queries.
- The approach implemented here is having the user call a registering
- decorator that updates a global variable in this module.
- """
- def __init__(self, filename):
- self.filename = filename
- Bcfg2.Server.Plugin.FileBacked.__init__(self, self.filename)
- self.fam.AddMonitor(self.filename, self)
-
- def Index(self):
- """
- Reregisters the queries in the config file
-
- The config will take care of actually registering the queries,
- so we just load it once and don't keep it.
- """
- global LDAP_QUERIES
- LDAP_QUERIES = []
- imp.load_source("ldap_cfg", self.filename)
+class Ldap(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.Connector):
+ """ The Ldap plugin allows adding data from an LDAP server
+ to your metadata. """
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['expire_cache']
-class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
- """
- The Ldap plugin allows adding data from an LDAP server to your metadata.
- """
- name = "Ldap"
experimental = True
- debug_flag = False
+
+ options = [
+ Bcfg2.Options.Option(
+ cf=('ldap', 'retries'), type=int, default=3,
+ dest='ldap_retries',
+ help='The number of times to retry reaching the '
+ 'LDAP server if a connection is broken'),
+ Bcfg2.Options.Option(
+ cf=('ldap', 'retry_delay'), type=float, default=5.0,
+ dest='ldap_retry_delay',
+ help='The time in seconds betreen retries'),
+ Bcfg2.Options.BooleanOption(
+ cf=('ldap', 'cache'), default=None, dest='ldap_cache',
+ help='Cache the results of the LDAP Queries until they '
+ 'are expired using the XML-RPC RMI')]
def __init__(self, core):
Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = ConfigFile(self.data + "/config.py")
- def debug_log(self, message, flag=None):
- if (flag is None) and self.debug_flag or flag:
- self.logger.error(message)
+ if not HAS_LDAP:
+ msg = "Python ldap module is required for Ldap plugin"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+
+ self.config = ConfigFile(os.path.join(self.data, 'config.py'),
+ core, self)
+ self._hosts = dict()
+
+ def _cache(self, query_name):
+ """ Return the :class:`Cache <Bcfg2.Server.Cache>` for the
+ given query name. """
+ return Bcfg2.Server.Cache.Cache('Ldap', 'results', query_name)
+
+ def _execute_query(self, query, metadata):
+ """ Return the cached result of the given query for this host or
+ execute the given query and cache the result. """
+ result = None
+
+ if Bcfg2.Options.setup.ldap_cache is not False:
+ cache = self._cache(query.name)
+ result = cache.get(metadata.hostname, None)
+
+ if result is None:
+ try:
+ self.debug_log("Processing query '%s'" % query.name)
+ result = query.get_result(metadata)
+ if Bcfg2.Options.setup.ldap_cache is not False:
+ cache[metadata.hostname] = result
+ except: # pylint: disable=W0702
+ self.logger.error(
+ "Exception during processing of query named '%s', query "
+ "results will be empty and may cause bind failures" %
+ query.name)
+ for line in traceback.format_exc().split('\n'):
+ self.logger.error(line)
+ return result
def get_additional_data(self, metadata):
- query = None
- try:
- data = {}
- self.debug_log("LdapPlugin debug: found queries " +
- str(LDAP_QUERIES))
- for QueryClass in LDAP_QUERIES:
- query = QueryClass()
+ data = {}
+ self.debug_log("Found queries %s" % self.config.queries)
+ for query_class in self.config.queries:
+ try:
+ query = query_class()
if query.is_applicable(metadata):
- self.debug_log("LdapPlugin debug: processing query '" +
- query.name + "'")
- data[query.name] = query.get_result(metadata)
+ self.debug_log("Processing query '%s'" % query.name)
+ data[query.name] = partial(
+ self._execute_query, query, metadata)
else:
- self.debug_log("LdapPlugin debug: query '" + query.name +
- "' not applicable to host '" +
- metadata.hostname + "'")
- return data
- except Exception:
- if hasattr(query, "name"):
- logger.error("LdapPlugin error: " +
- "Exception during processing of query named '" +
- str(query.name) +
- "', query results will be empty" +
- " and may cause bind failures")
- for line in traceback.format_exception(sys.exc_info()[0],
- sys.exc_info()[1],
- sys.exc_info()[2]):
- logger.error("LdapPlugin error: " +
- line.replace("\n", ""))
- return {}
-
-
-class LdapConnection(object):
- """
- Connection to an LDAP server.
- """
- def __init__(self, host="localhost", port=389,
+ self.debug_log("query '%s' not applicable to host '%s'" %
+ (query.name, metadata.hostname))
+ except: # pylint: disable=W0702
+ self.logger.error(
+ "Exception during preparation of query named '%s'. "
+ "Query will be ignored." % query_class.__name__)
+ for line in traceback.format_exc().split('\n'):
+ self.logger.error(line)
+
+ return Bcfg2.Server.Plugin.CallableDict(**data)
+
+ def start_client_run(self, metadata):
+ if Bcfg2.Options.setup.ldap_cache is None:
+ self.expire_cache(hostname=metadata.hostname)
+
+ def expire_cache(self, query=None, hostname=None):
+ """ Expire the cache. You can select the items to purge
+ per query and/or per host, or you can purge all cached
+ data. This is exposed as an XML-RPC RMI. """
+
+ tags = ['Ldap', 'results']
+ if query:
+ tags.append(query)
+ if hostname:
+ tags.append(hostname)
+
+ return Bcfg2.Server.Cache.expire(*tags)
+
+
+class LdapConnection(Debuggable):
+ """ Connection to an LDAP server. """
+
+ def __init__(self, host="localhost", port=389, uri=None, options=None,
binddn=None, bindpw=None):
+ Debuggable.__init__(self)
+
+ if HAS_LDAP:
+ msg = "Python ldap module is required for Ldap plugin"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+
self.host = host
self.port = port
+ self.uri = uri
+ self.options = options
self.binddn = binddn
self.bindpw = bindpw
self.conn = None
+ self.__scopes__ = {
+ 'base': ldap.SCOPE_BASE,
+ 'one': ldap.SCOPE_ONELEVEL,
+ 'sub': ldap.SCOPE_SUBTREE,
+ }
+
def __del__(self):
+ """ Disconnection if the instance is destroyed. """
+ self.disconnect()
+
+ def disconnect(self):
+ """ If a connection to an LDAP server is available, disconnect it. """
if self.conn:
- self.conn.unbind()
+ self.conn.unbund()
+ self.conn = None
+
+ def connect(self):
+ """ Open a connection to the configured LDAP server, and do a simple
+ bind ff both binddn and bindpw are set. """
+ self.disconnect()
+ self.conn = ldap.initialize(self.get_uri())
+
+ if self.options is not None:
+ for (option, value) in self.options.items():
+ self.conn.set_option(option, value)
- def init_conn(self):
- self.conn = ldap.initialize(self.url)
if self.binddn is not None and self.bindpw is not None:
self.conn.simple_bind_s(self.binddn, self.bindpw)
def run_query(self, query):
- result = None
- for attempt in range(RETRY_COUNT + 1):
- if attempt >= 1:
- logger.error("LdapPlugin error: " +
- "LDAP server down (retry " + str(attempt) + "/" +
- str(RETRY_COUNT) + ")")
+ """ Connect to the server and execute the query. If the server is
+ down, wait the configured amount and try to reconnect.
+
+ :param query: The query to execute on the LDAP server.
+ :type query: Bcfg.Server.Plugins.Ldap.LdapQuery
+ """
+ for attempt in range(Bcfg2.Options.setup.ldap_retries + 1):
try:
if not self.conn:
- self.init_conn()
- result = self.conn.search_s(
- query.base,
- SCOPE_MAP[query.scope],
- query.filter.replace("\\", "\\\\"),
- query.attrs,
- )
- break
+ self.connect()
+
+ return self.conn.search_s(
+ query.base, self.__scopes__[query.scope],
+ query.filter.replace('\\', '\\\\'), query.attrs)
+
except ldap.SERVER_DOWN:
self.conn = None
- time.sleep(RETRY_DELAY)
- return result
+ self.logger.error(
+ "LdapConnection: Server %s down. Retry %d/%d in %.2fs." %
+ (self.get_uri(), attempt + 1,
+ Bcfg2.Options.setup.ldap_retries,
+ Bcfg2.Options.setup.ldap_retry_delay))
+ time.sleep(Bcfg2.Options.setup.ldap_retry_delay)
+
+ return None
- @property
- def url(self):
- return "ldap://" + self.host + ":" + str(self.port)
+ def get_uri(self):
+ """ The URL of the LDAP server. """
+ if self.uri is None:
+ if self.port == 636:
+ return "ldaps://%s" % self.host
+ return "ldap://%s:%d" % (self.host, self.port)
+ return self.uri
class LdapQuery(object):
- """
- Query referencing an LdapConnection and providing several
- methods for query manipulation.
- """
+ """ Query referencing an LdapConnection and providing several
+ methods for query manipulation. """
+
+ #: Name of the Query, used to register it in additional data.
+ name = ClassName()
- name = "unknown"
base = ""
scope = "sub"
filter = "(objectClass=*)"
@@ -172,80 +267,48 @@ class LdapQuery(object):
result = None
def __unicode__(self):
- return "LdapQuery:" + self.name
+ return "LdapQuery: %s" % self.name
- def is_applicable(self, metadata):
- """
- Overrideable method to determine if the query is to be executed for
- the given metadata object.
- Defaults to true.
- """
- return True
+ def is_applicable(self, metadata): # pylint: disable=W0613
+ """ Check is the query should be executed for a given metadata
+ object.
- def prepare_query(self, metadata):
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
"""
- Overrideable method to alter the query based on metadata.
- Defaults to doing nothing.
-
- In most cases, you will do something like
+ return True
- self.filter = "(cn=" + metadata.hostname + ")"
+ def prepare_query(self, metadata, **kwargs): # pylint: disable=W0613
+ """ Prepares the query based on the client metadata. You can
+ for example modify the filter based on the client hostname.
- here.
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
"""
pass
- def process_result(self, metadata):
- """
- Overrideable method to post-process the query result.
- Defaults to returning the unaltered result.
- """
- return self.result
+ def process_result(self, metadata, **kwargs): # pylint: disable=W0613
+ """ Post-process the query result.
- def get_result(self, metadata):
- """
- Method to handle preparing, executing and processing the query.
- """
- if isinstance(self.connection, LdapConnection):
- self.prepare_query(metadata)
- self.result = self.connection.run_query(self)
- self.result = self.process_result(metadata)
- return self.result
- else:
- logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
- return None
-
-
-class LdapSubQuery(LdapQuery):
- """
- SubQueries are meant for internal use only and are not added
- to the metadata object. They are useful for situations where
- you need to run more than one query to obtain some data.
- """
- def prepare_query(self, metadata, **kwargs):
- """
- Overrideable method to alter the query based on metadata.
- Defaults to doing nothing.
- """
- pass
-
- def process_result(self, metadata, **kwargs):
- """
- Overrideable method to post-process the query result.
- Defaults to returning the unaltered result.
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
"""
return self.result
def get_result(self, metadata, **kwargs):
+ """ Handle the perparation, execution and processing of the query.
+
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError`
"""
- Method to handle preparing, executing and processing the query.
- """
- if isinstance(self.connection, LdapConnection):
+
+ if self.connection is not None:
self.prepare_query(metadata, **kwargs)
self.result = self.connection.run_query(self)
- return self.process_result(metadata, **kwargs)
+ self.result = self.process_result(metadata, **kwargs)
else:
- logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
- return None
+ raise Bcfg2.Server.Plugin.PluginExecutionError(
+ 'No connection defined for %s' % self.name)
+
+ return self.result
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index b850c1870..b912d3725 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -21,24 +21,25 @@ from Bcfg2.Compat import MutableMapping, all, any, wraps
# pylint: enable=W0622
from Bcfg2.version import Bcfg2VersionInfo
+try:
+ from django.db import models
+ HAS_DJANGO = True
+except ImportError:
+ HAS_DJANGO = False
+
# pylint: disable=C0103
ClientVersions = None
MetadataClientModel = None
# pylint: enable=C0103
-HAS_DJANGO = False
def load_django_models():
""" Load models for Django after option parsing has completed """
# pylint: disable=W0602
- global MetadataClientModel, ClientVersions, HAS_DJANGO
+ global MetadataClientModel, ClientVersions
# pylint: enable=W0602
- try:
- from django.db import models
- HAS_DJANGO = True
- except ImportError:
- HAS_DJANGO = False
+ if not HAS_DJANGO:
return
class MetadataClientModel(models.Model, # pylint: disable=W0621
@@ -1394,8 +1395,6 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# look at cert.cN
client = certinfo['commonName']
self.debug_log("Got cN %s; using as client name" % client)
- auth_type = self.auth.get(client,
- Bcfg2.Options.setup.authentication)
elif user == 'root':
id_method = 'address'
try:
@@ -1417,6 +1416,13 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# we have the client name
self.debug_log("Authenticating client %s" % client)
+ # validate id_method
+ auth_type = self.auth.get(client, Bcfg2.Options.setup.authentication)
+ if auth_type == 'cert' and id_method != 'cert':
+ self.logger.error("Client %s does not provide a cert, but only "
+ "cert auth is allowed" % client)
+ return False
+
# next we validate the address
if (id_method != 'uuid' and
not self.validate_client_address(client, address)):
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index d3c38ef19..067e2faad 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -92,17 +92,15 @@ class NagiosGen(Plugin, Generator):
for host in host_configs:
host_data.append(open(host, 'r').read())
- group_list = []
+ used_groups = set(['default'])
for line in "\n".join(host_data).splitlines():
# only include those groups which are actually used
if "hostgroup" in line:
- group_list += line.split()[1].split(',')
-
- group_list = list(set(group_list))
+ used_groups.update(line.split()[1].split(','))
for group in group_configs:
group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group)
- if group_name in group_list:
+ if group_name in used_groups:
groupfile = open(group, 'r')
group_data.append(groupfile.read())
groupfile.close()
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 461be9ba8..b314e60a0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -94,7 +94,12 @@ class Ohai(Bcfg2.Server.Plugin.Plugin,
return [self.probe]
def ReceiveData(self, meta, datalist):
- self.cache[meta.hostname] = datalist[0].text
+ if meta.hostname not in self.cache or \
+ self.cache[meta.hostname] != datalist[0].text:
+ self.cache[meta.hostname] = datalist[0].text
+
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire(meta.hostname)
def get_additional_data(self, meta):
if meta.hostname in self.cache:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
index 7de79e2f3..956cb9f51 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -6,6 +6,15 @@ from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
+def strip_suffix(pkgname):
+ """ Remove the ':any' suffix from a dependency name if it is present.
+ """
+ if pkgname.endswith(':any'):
+ return pkgname[:-4]
+ else:
+ return pkgname
+
+
class AptCollection(Collection):
""" Handle collections of APT sources. This is a no-op object
that simply inherits from
@@ -34,8 +43,12 @@ class AptCollection(Collection):
for source in self:
if source.rawurl:
- self.logger.info("Packages: Skipping rawurl %s" %
- source.rawurl)
+ if source.rawurl[-1] != '/':
+ source.rawurl = source.rawurl + "/"
+ index = source.rawurl.rfind("/", 0, -1)
+ lines.append("deb %s %s" %
+ (source.rawurl[:index],
+ source.rawurl[index + 1:]))
else:
lines.append("deb %s %s %s" % (source.url, source.version,
" ".join(source.components)))
@@ -44,7 +57,7 @@ class AptCollection(Collection):
(source.url,
source.version,
" ".join(source.components)))
- lines.append("")
+ lines.append("")
return "\n".join(lines)
@@ -73,6 +86,7 @@ class AptSource(Source):
bdeps = dict()
brecs = dict()
bprov = dict()
+ self.pkgnames = set()
self.essentialpkgs = set()
for fname in self.files:
if not self.rawurl:
@@ -111,6 +125,7 @@ class AptSource(Source):
cdeps = [re.sub(r'\s+', '',
re.sub(r'\(.*\)', '', cdep))
for cdep in dep.split('|')]
+ cdeps = [strip_suffix(cdep) for cdep in cdeps]
dyn_dname = "choice-%s-%s-%s" % (pkgname,
barch,
vindex)
@@ -124,6 +139,7 @@ class AptSource(Source):
else:
raw_dep = re.sub(r'\(.*\)', '', dep)
raw_dep = raw_dep.rstrip().strip()
+ raw_dep = strip_suffix(raw_dep)
if words[0] == 'Recommends':
brecs[barch][pkgname].append(raw_dep)
else:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py b/src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py
new file mode 100644
index 000000000..f47b8f22c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Dummy.py
@@ -0,0 +1,35 @@
+""" Dummy backend for :mod:`Bcfg2.Server.Plugins.Packages` """
+
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+
+
+class DummyCollection(Collection):
+ """ Handle collections of Dummy sources. This is a no-op object
+ that simply inherits from
+ :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`,
+ overrides nothing, and defers all operations to :class:`PacSource`
+ """
+
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
+ # we define an __init__ that just calls the parent __init__,
+ # so that we can set the docstring on __init__ to something
+ # different from the parent __init__ -- namely, the parent
+ # __init__ docstring, minus everything after ``.. -----``,
+ # which we use to delineate the actual docs from the
+ # .. autoattribute hacks we have to do to get private
+ # attributes included in sphinx 1.0 """
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
+ debug=debug)
+ __init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
+
+
+class DummySource(Source):
+ """ Handle Dummy sources """
+
+ #: DummySource sets the ``type`` on Package entries to "dummy"
+ ptype = 'dummy'
+
+ def __init__(self, basepath, xsource):
+ xsource.set('rawurl', 'http://example.com/')
+ Source.__init__(self, basepath, xsource)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
index 0e15d2e15..6fc084cc4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -1,10 +1,62 @@
""" Pacman backend for :mod:`Bcfg2.Server.Plugins.Packages` """
+import os
import tarfile
+from Bcfg2.Compat import cPickle
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
+def parse_db_file(pkgfile):
+ """ Parse a Pacman database file, returning a dictionary with
+ section headings for keys and lists of strings for values.
+ (Reference: ``sync_db_read`` in ``lib/libalpm/be_sync.c``)
+ """
+
+ pkg = {}
+ section = None
+
+ for line in pkgfile:
+ line = line.strip()
+
+ if section is not None:
+ if not line:
+ section = None
+ else:
+ pkg[section].append(line)
+ elif len(line) >= 2 and line[0] == line[-1] == '%':
+ section = line
+ pkg[section] = []
+
+ return pkg
+
+
+def parse_dep(dep):
+ """ Parse a Pacman dependency string, returning the package name,
+ version restriction (or ``None``), and description (or ``None``).
+ (Reference: ``alpm_dep_from_string`` in ``lib/libalpm/deps.c``)
+ """
+
+ rest_desc = dep.split(': ', 1)
+ if len(rest_desc) == 1:
+ rest, desc = rest_desc[0], None
+ else:
+ rest, desc = rest_desc
+
+ # Search for '=' last, since '<=' and '>=' are possible.
+ for symb in ['<', '>', '=']:
+ idx = rest.find(symb)
+ if idx >= 0:
+ name = rest[:idx]
+ version = rest[idx:]
+ break
+ else:
+ name = rest
+ version = None
+
+ return name, version, desc
+
+
class PacCollection(Collection):
""" Handle collections of Pacman sources. This is a no-op object
that simply inherits from
@@ -24,6 +76,10 @@ class PacCollection(Collection):
debug=debug)
__init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
+ @property
+ def __package_groups__(self):
+ return True
+
class PacSource(Source):
""" Handle Pacman sources """
@@ -31,6 +87,25 @@ class PacSource(Source):
#: PacSource sets the ``type`` on Package entries to "pacman"
ptype = 'pacman'
+ def __init__(self, basepath, xsource):
+ self.pacgroups = {}
+
+ Source.__init__(self, basepath, xsource)
+ __init__.__doc__ = Source.__init__.__doc__
+
+ def load_state(self):
+ data = open(self.cachefile, 'rb')
+ (self.pkgnames, self.deps, self.provides,
+ self.recommends, self.pacgroups) = cPickle.load(data)
+ load_state.__doc__ = Source.load_state.__doc__
+
+ def save_state(self):
+ cache = open(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides,
+ self.recommends, self.pacgroups), cache, 2)
+ cache.close()
+ save_state.__doc__ = Source.save_state.__doc__
+
@property
def urls(self):
""" A list of URLs to the base metadata file for each
@@ -45,14 +120,12 @@ class PacSource(Source):
else:
raise Exception("PacSource : RAWUrl not supported (yet)")
- def read_files(self):
- bdeps = dict()
- bprov = dict()
-
- depfnames = ['Depends', 'Pre-Depends']
- if self.recommended:
- depfnames.append('Recommends')
-
+ def read_files(self): # pylint: disable=R0912
+ bdeps = {}
+ brecs = {}
+ bprov = {}
+ self.pkgnames = set()
+ self.pacgroups = {}
for fname in self.files:
if not self.rawurl:
barch = [x for x in fname.split('@') if x in self.arches][0]
@@ -62,8 +135,9 @@ class PacSource(Source):
barch = self.arches[0]
if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
+ bdeps[barch] = {}
+ brecs[barch] = {}
+ bprov[barch] = {}
try:
self.debug_log("Packages: try to read %s" % fname)
tar = tarfile.open(fname, "r")
@@ -71,11 +145,52 @@ class PacSource(Source):
self.logger.error("Packages: Failed to read file %s" % fname)
raise
+ packages = {}
for tarinfo in tar:
- if tarinfo.isdir():
- self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
- self.debug_log("Packages: added %s" %
- tarinfo.name.rsplit("-", 2)[0])
+ if not tarinfo.isfile():
+ continue
+ prefix = os.path.dirname(tarinfo.name)
+ if prefix not in packages:
+ packages[prefix] = {}
+ pkg = parse_db_file(tar.extractfile(tarinfo))
+ packages[prefix].update(pkg)
+
+ for pkg in packages.values():
+ pkgname = pkg['%NAME%'][0]
+ self.pkgnames.add(pkgname)
+ bdeps[barch][pkgname] = []
+ brecs[barch][pkgname] = []
+
+ if '%DEPENDS%' in pkg:
+ for dep in pkg['%DEPENDS%']:
+ dname = parse_dep(dep)[0]
+ bdeps[barch][pkgname].append(dname)
+
+ if '%OPTDEPENDS%' in pkg:
+ for dep in pkg['%OPTDEPENDS%']:
+ dname = parse_dep(dep)[0]
+ brecs[barch][pkgname].append(dname)
+
+ if '%PROVIDES%' in pkg:
+ for dep in pkg['%PROVIDES%']:
+ dname = parse_dep(dep)[0]
+ if dname not in bprov[barch]:
+ bprov[barch][dname] = set()
+ bprov[barch][dname].add(pkgname)
+
+ if '%GROUPS%' in pkg:
+ for group in pkg['%GROUPS%']:
+ if group not in self.pacgroups:
+ self.pacgroups[group] = []
+ self.pacgroups[group].append(pkgname)
+
tar.close()
- self.process_files(bdeps, bprov)
+ self.process_files(bdeps, bprov, brecs)
read_files.__doc__ = Source.read_files.__doc__
+
+ def get_group(self, metadata, group, ptype=None):
+ try:
+ return self.pacgroups[group]
+ except KeyError:
+ return []
+ get_group.__doc__ = Source.get_group.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
index 736cdcdd4..55dd4e488 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
@@ -56,6 +56,7 @@ class PkgngSource(Source):
def read_files(self):
bdeps = dict()
+ self.pkgnames = set()
for fname in self.files:
if not self.rawurl:
abi = [x
@@ -75,9 +76,7 @@ class PkgngSource(Source):
self.logger.error("Packages: Failed to read file %s" % fname)
raise
for line in reader.readlines():
- if not isinstance(line, str):
- line = line.decode('utf-8')
- pkg = json.loads(line)
+ pkg = json.loads(unicode(line, errors='ignore'))
pkgname = pkg['name']
self.pkgnames.add(pkgname)
if 'deps' in pkg:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
index c9f6ea14a..86f7698f7 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -135,22 +135,22 @@ class Source(Debuggable): # pylint: disable=R0902
#: A list of the text of all 'Component' attributes of this
#: source from XML
- self.components = [item.text for item in xsource.findall('Component')]
+ self.components = []
#: A list of the arches supported by this source
- self.arches = [item.text for item in xsource.findall('Arch')]
+ self.arches = []
#: A list of the the names of packages that are blacklisted
#: from this source
- self.blacklist = [item.text for item in xsource.findall('Blacklist')]
+ self.blacklist = []
#: A list of the the names of packages that are whitelisted in
#: this source
- self.whitelist = [item.text for item in xsource.findall('Whitelist')]
+ self.whitelist = []
#: Whether or not to include deb-src lines in the generated APT
#: configuration
- self.debsrc = xsource.get('debsrc', 'false') == 'true'
+ self.debsrc = False
#: A dict of repository options that will be included in the
#: configuration generated on the server side (if such is
@@ -162,51 +162,38 @@ class Source(Debuggable): # pylint: disable=R0902
#: configuration generated for the client (if that is
#: supported by the backend)
self.client_options = dict()
- opts = xsource.findall("Options")
- for el in opts:
- repoopts = dict([(k, v)
- for k, v in el.attrib.items()
- if k != "clientonly" and k != "serveronly"])
- if el.get("clientonly", "false").lower() == "false":
- self.server_options.update(repoopts)
- if el.get("serveronly", "false").lower() == "false":
- self.client_options.update(repoopts)
#: A list of URLs to GPG keys that apply to this source
- self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
+ self.gpgkeys = []
#: Whether or not to include essential packages from this source
- self.essential = xsource.get('essential', 'true').lower() == 'true'
+ self.essential = True
#: Whether or not to include recommended packages from this source
- self.recommended = xsource.get('recommended',
- 'false').lower() == 'true'
+ self.recommended = False
#: The "rawurl" attribute from :attr:`xsource`, if applicable.
#: A trailing slash is automatically appended to this if there
#: wasn't one already present.
- self.rawurl = xsource.get('rawurl', '')
- if self.rawurl and not self.rawurl.endswith("/"):
- self.rawurl += "/"
+ self.rawurl = None
#: The "url" attribute from :attr:`xsource`, if applicable. A
#: trailing slash is automatically appended to this if there
#: wasn't one already present.
- self.url = xsource.get('url', '')
- if self.url and not self.url.endswith("/"):
- self.url += "/"
+ self.url = None
#: The "version" attribute from :attr:`xsource`
- self.version = xsource.get('version', '')
+ self.version = None
#: The "name" attribute from :attr:`xsource`
- self.name = xsource.get('name', None)
+ self.name = None
#: A list of predicates that are used to determine if this
#: source applies to a given
#: :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata`
#: object.
self.conditions = []
+
#: Formerly, :ref:`server-plugins-generators-packages` only
#: supported applying package sources to groups; that is, they
#: could not be assigned by more complicated logic like
@@ -214,22 +201,6 @@ class Source(Debuggable): # pylint: disable=R0902
#: attribute attempts to provide for some limited backwards
#: compat with older code that relies on this.
self.groups = []
- for el in xsource.iterancestors():
- if el.tag == "Group":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") not in m.groups)
- else:
- self.groups.append(el.get("name"))
- self.conditions.append(lambda m, el=el:
- el.get("name") in m.groups)
- elif el.tag == "Client":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") != m.hostname)
- else:
- self.conditions.append(lambda m, el=el:
- el.get("name") == m.hostname)
#: A set of all package names in this source. This will not
#: necessarily be populated, particularly by backends that
@@ -253,6 +224,8 @@ class Source(Debuggable): # pylint: disable=R0902
#: symbols>``. This will not necessarily be populated.
self.recommends = dict()
+ self._init_attributes(xsource)
+
#: The file (or directory) used for this source's cache data
self.cachefile = os.path.join(self.basepath,
"cache-%s" % self.cachekey)
@@ -292,6 +265,69 @@ class Source(Debuggable): # pylint: disable=R0902
setting['name'] = self.get_repo_name(setting)
self.url_map.extend(usettings)
+ def _init_attributes(self, xsource):
+ """
+ This functions evaluates the Source tag and parses all
+ attributes. Override this function in a sub class to
+ parse specific attributes. Do not use ``__init__`` because
+ ``Source.__init__`` may call other functions that already
+ need this specific fields. This functions is called before
+ any other function.
+
+ :param xsource: The XML tag that describes this source
+ :type source: lxml.etree._Element
+ """
+
+ self.components = [item.text for item in xsource.findall('Component')]
+ self.arches = [item.text for item in xsource.findall('Arch')]
+ self.blacklist = [item.text for item in xsource.findall('Blacklist')]
+ self.whitelist = [item.text for item in xsource.findall('Whitelist')]
+ self.debsrc = xsource.get('debsrc', 'false') == 'true'
+
+ opts = xsource.findall("Options")
+ for el in opts:
+ repoopts = dict([(k, v)
+ for k, v in el.attrib.items()
+ if k != "clientonly" and k != "serveronly"])
+ if el.get("clientonly", "false").lower() == "false":
+ self.server_options.update(repoopts)
+ if el.get("serveronly", "false").lower() == "false":
+ self.client_options.update(repoopts)
+
+ self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
+
+ self.essential = xsource.get('essential', 'true').lower() == 'true'
+ self.recommended = xsource.get('recommended',
+ 'false').lower() == 'true'
+
+ self.rawurl = xsource.get('rawurl', '')
+ if self.rawurl and not self.rawurl.endswith("/"):
+ self.rawurl += "/"
+
+ self.url = xsource.get('url', '')
+ if self.url and not self.url.endswith("/"):
+ self.url += "/"
+
+ self.version = xsource.get('version', '')
+ self.name = xsource.get('name', None)
+
+ for el in xsource.iterancestors():
+ if el.tag == "Group":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") not in m.groups)
+ else:
+ self.groups.append(el.get("name"))
+ self.conditions.append(lambda m, el=el:
+ el.get("name") in m.groups)
+ elif el.tag == "Client":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") != m.hostname)
+ else:
+ self.conditions.append(lambda m, el=el:
+ el.get("name") == m.hostname)
+
@property
def cachekey(self):
""" A unique key for this source that will be used to generate
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index dbe3f9ce5..846fb89cd 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -456,16 +456,13 @@ class YumCollection(Collection):
reponame = basereponame
added = False
+ rid = 1
while not added:
try:
config.add_section(reponame)
added = True
except ConfigParser.DuplicateSectionError:
- match = re.search(r'-(\d+)', reponame)
- if match:
- rid = int(match.group(1)) + 1
- else:
- rid = 1
+ rid += 1
reponame = "%s-%d" % (basereponame, rid)
config.set(reponame, "name", reponame)
@@ -1004,8 +1001,20 @@ class YumSource(Source):
ptype = 'yum'
def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
+ self.filemap = dict()
+ self.file_to_arch = dict()
+ self.needed_paths = set()
+ self.packages = dict()
+ self.yumgroups = dict()
self.pulp_id = None
+ self.repo = None
+
+ Source.__init__(self, basepath, xsource)
+ __init__.__doc__ = Source.__init__.__doc__
+
+ def _init_attributes(self, xsource):
+ Source._init_attributes(self, xsource)
+
if HAS_PULP and xsource.get("pulp_id"):
self.pulp_id = xsource.get("pulp_id")
@@ -1034,15 +1043,11 @@ class YumSource(Source):
self.repo['relative_path'])
self.arches = [self.repo['arch']]
- self.packages = dict()
self.deps = dict([('global', dict())])
self.provides = dict([('global', dict())])
self.filemap = dict([(x, dict())
for x in ['global'] + self.arches])
- self.needed_paths = set()
- self.file_to_arch = dict()
- self.yumgroups = dict()
- __init__.__doc__ = Source.__init__.__doc__
+ _init_attributes.__doc__ = Source._init_attributes.__doc__
@property
def use_yum(self):
@@ -1130,6 +1135,94 @@ class YumSource(Source):
self.file_to_arch[self.escape_url(fullurl)] = arch
return urls
+ # pylint: disable=R0911,R0912
+ # disabling the pylint errors above because we are interesting in
+ # replicating the flow of the RPM code.
+ def _compare_rpm_versions(self, str1, str2):
+ """ Compare RPM versions.
+
+ This is an attempt to reimplement RPM's rpmvercmp method in python.
+
+ :param str1: package 1 version string
+ :param str2: package 2 version string
+ :return: 1 - str1 is newer than str2
+ 0 - str1 and str2 are the same version
+ -1 - str2 is newer than str1"""
+ if str1 == str2:
+ return 0
+
+ front_strip_re = re.compile('^[^A-Za-z0-9~]+')
+ risdigit = re.compile('(^[0-9]+)')
+ risalpha = re.compile('(^[A-Za-z])')
+ lzeroes = re.compile('^0+')
+
+ while len(str1) > 0 or len(str2) > 0:
+ str1 = front_strip_re.sub('', str1)
+ str2 = front_strip_re.sub('', str2)
+
+ if len(str1) == 0 or len(str2) == 0:
+ break
+
+ # handle the tilde separator
+ if str1[0] == '~' and str2[0] == '~':
+ str1 = str1[1:]
+ str2 = str2[1:]
+ elif str1[0] == '~':
+ return -1
+ elif str2[0] == '~':
+ return 1
+
+ # grab continuous segments from each string
+ isnum = False
+ if risdigit.match(str1):
+ segment1 = risdigit.split(str1)[1]
+ str1 = risdigit.split(str1)[2]
+ if risdigit.match(str2):
+ segment2 = risdigit.split(str2)[1]
+ str2 = risdigit.split(str2)[2]
+ else:
+ segment2 = ''
+ isnum = True
+ else:
+ segment1 = risalpha.split(str1)[1]
+ str1 = risalpha.split(str1)[2]
+ if risalpha.match(str2):
+ segment2 = risalpha.split(str2)[1]
+ str2 = risalpha.split(str2)[2]
+ else:
+ segment2 = ''
+
+ # numeric segments are always newer than alpha segments
+ if len(segment2) == 0:
+ if isnum:
+ return 1
+ return -1
+
+ if isnum:
+ # discard leading zeroes
+ segment1 = lzeroes.sub('', segment1)
+ segment2 = lzeroes.sub('', segment2)
+ # higher number has more digits
+ if len(segment1) > len(segment2):
+ return 1
+ elif len(segment2) > len(segment1):
+ return -1
+ # do a simple string comparison
+ if segment1 > segment2:
+ return 1
+ elif segment2 > segment1:
+ return -1
+
+ # if one of the strings is empty, the version of the longer
+ # string is higher
+ if len(str1) > len(str2):
+ return 1
+ elif len(str2) > len(str1):
+ return -1
+ else:
+ return 0
+ # pylint: enable=R0911,R0912
+
@track_statistics()
def read_files(self):
""" When using the builtin yum parser, read and parse locally
@@ -1198,13 +1291,33 @@ class YumSource(Source):
if arch not in self.packages:
self.packages[arch] = set()
if arch not in self.deps:
- self.deps[arch] = dict()
+ self.deps[arch] = {}
if arch not in self.provides:
- self.provides[arch] = dict()
+ self.provides[arch] = {}
+ versionmap = {}
for pkg in data.getchildren():
if not pkg.tag.endswith('package'):
continue
pkgname = pkg.find(XP + 'name').text
+ vtag = pkg.find(XP + 'version')
+ epoch = vtag.get('epoch')
+ version = vtag.get('ver')
+ release = vtag.get('rel')
+ if pkgname in self.packages[arch]:
+ # skip if version older than a previous version
+ if (self._compare_rpm_versions(
+ epoch, versionmap[pkgname]['epoch']) < 0):
+ continue
+ elif (self._compare_rpm_versions(
+ version, versionmap[pkgname]['version']) < 0):
+ continue
+ elif (self._compare_rpm_versions(
+ release, versionmap[pkgname]['release']) < 0):
+ continue
+ versionmap[pkgname] = {}
+ versionmap[pkgname]['epoch'] = epoch
+ versionmap[pkgname]['version'] = version
+ versionmap[pkgname]['release'] = release
self.packages[arch].add(pkgname)
pdata = pkg.find(XP + 'format')
@@ -1256,10 +1369,15 @@ class YumSource(Source):
arch = [a for a in self.arches if a in metadata.groups]
if not arch:
return False
- return ((package in self.packages['global'] or
- package in self.packages[arch[0]]) and
- package not in self.blacklist and
- (len(self.whitelist) == 0 or package in self.whitelist))
+ try:
+ return ((package in self.packages['global'] or
+ package in self.packages[arch[0]]) and
+ package not in self.blacklist and
+ (len(self.whitelist) == 0 or package in self.whitelist))
+ except KeyError:
+ self.logger.debug("Packages: Unable to find %s for arch %s" %
+ (package, arch[0]))
+ return False
is_package.__doc__ = Source.is_package.__doc__
def get_vpkgs(self, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
index b2e43bde7..89cc23090 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
@@ -274,29 +274,31 @@ class HelperSubcommand(Bcfg2.Options.Subcommand):
# whether or not this command accepts input on stdin
accept_input = True
- def __init__(self):
- Bcfg2.Options.Subcommand.__init__(self)
- self.verbosity = 0
+ # logging level
+ verbosity = 0
+
+ def run(self, setup):
if Bcfg2.Options.setup.debug:
self.verbosity = 5
elif Bcfg2.Options.setup.verbose:
self.verbosity = 1
- def run(self, setup):
- try:
- data = json.loads(sys.stdin.read())
- except ValueError:
- self.logger.error("Error decoding JSON input: %s" %
- sys.exc_info()[1])
- print(json.dumps(self.fallback))
- return 2
+ data = None
+ if self.accept_input:
+ try:
+ data = json.loads(sys.stdin.read())
+ except ValueError:
+ self.logger.error("Error decoding JSON input: %s" %
+ sys.exc_info()[1])
+ print(json.dumps(self.fallback))
+ return 2
try:
print(json.dumps(self._run(setup, data)))
except: # pylint: disable=W0702
self.logger.error("Unexpected error running %s: %s" %
- self.__class__.__name__.lower(),
- sys.exc_info()[1], exc_info=1)
+ (self.__class__.__name__.lower(),
+ sys.exc_info()[1]), exc_info=1)
print(json.dumps(self.fallback))
return 2
return 0
@@ -310,10 +312,13 @@ class DepSolverSubcommand(HelperSubcommand): # pylint: disable=W0223
""" Base class for helper commands that use the depsolver (i.e.,
only resolve dependencies, don't modify the cache) """
- def __init__(self):
- HelperSubcommand.__init__(self)
+ # DepSolver instance used in _run function
+ depsolver = None
+
+ def run(self, setup):
self.depsolver = DepSolver(Bcfg2.Options.setup.yum_config,
self.verbosity)
+ HelperSubcommand.run(self, setup)
class CacheManagerSubcommand(HelperSubcommand): # pylint: disable=W0223
@@ -322,10 +327,13 @@ class CacheManagerSubcommand(HelperSubcommand): # pylint: disable=W0223
fallback = False
accept_input = False
- def __init__(self):
- HelperSubcommand.__init__(self)
+ # CacheManager instance used in _run function
+ cachemgr = None
+
+ def run(self, setup):
self.cachemgr = CacheManager(Bcfg2.Options.setup.yum_config,
self.verbosity)
+ HelperSubcommand.run(self, setup)
class Clean(CacheManagerSubcommand):
@@ -376,10 +384,7 @@ class CLI(Bcfg2.Options.CommandRegistry):
""" The bcfg2-yum-helper CLI """
options = [
Bcfg2.Options.PathOption(
- "-c", "--yum-config", help="Yum config file"),
- Bcfg2.Options.PositionalArgument(
- "command", help="Yum helper command",
- choices=['clean', 'complete', 'get_groups'])]
+ "-c", "--yum-config", help="Yum config file")]
def __init__(self):
Bcfg2.Options.CommandRegistry.__init__(self)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index 3aa5c415f..95b4baa3e 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -10,7 +10,7 @@ import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Cache
import Bcfg2.Server.Plugin
-from Bcfg2.Compat import urlopen, HTTPError, URLError, MutableMapping
+from Bcfg2.Compat import urlopen, HTTPError, URLError
from Bcfg2.Server.Plugins.Packages.Collection import Collection, \
get_collection_class
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
@@ -36,52 +36,6 @@ class PackagesBackendAction(Bcfg2.Options.ComponentAction):
fail_silently = True
-class OnDemandDict(MutableMapping):
- """ This maps a set of keys to a set of value-getting functions;
- the values are populated on-the-fly by the functions as the values
- are needed (and not before). This is used by
- :func:`Bcfg2.Server.Plugins.Packages.Packages.get_additional_data`;
- see the docstring for that function for details on why.
-
- Unlike a dict, you should not specify values for for the righthand
- side of this mapping, but functions that get values. E.g.:
-
- .. code-block:: python
-
- d = OnDemandDict(foo=load_foo,
- bar=lambda: "bar");
- """
-
- def __init__(self, **getters):
- self._values = dict()
- self._getters = dict(**getters)
-
- def __getitem__(self, key):
- if key not in self._values:
- self._values[key] = self._getters[key]()
- return self._values[key]
-
- def __setitem__(self, key, getter):
- self._getters[key] = getter
-
- def __delitem__(self, key):
- del self._values[key]
- del self._getters[key]
-
- def __len__(self):
- return len(self._getters)
-
- def __iter__(self):
- return iter(self._getters.keys())
-
- def __repr__(self):
- rv = dict(self._values)
- for key in self._getters.keys():
- if key not in rv:
- rv[key] = 'unknown'
- return str(rv)
-
-
class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
@@ -103,7 +57,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
help="Packages backends to load",
type=Bcfg2.Options.Types.comma_list,
action=PackagesBackendAction,
- default=['Yum', 'Apt', 'Pac', 'Pkgng']),
+ default=['Yum', 'Apt', 'Pac', 'Pkgng', 'Dummy']),
Bcfg2.Options.PathOption(
cf=("packages", "cache"), dest="packages_cache",
help="Path to the Packages cache",
@@ -578,7 +532,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
def get_additional_data(self, metadata):
""" Return additional data for the given client. This will be
- an :class:`Bcfg2.Server.Plugins.Packages.OnDemandDict`
+ an :class:`Bcfg2.Server.Plugin.OnDemandDict`
containing two keys:
* ``sources``, whose value is a list of data returned from
@@ -610,7 +564,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
get_collection() until it's absolutely necessary. """
return self.get_collection(metadata).get_additional_data()
- return OnDemandDict(
+ return Bcfg2.Server.Plugin.OnDemandDict(
sources=get_sources,
get_config=lambda: self.get_config)
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index 76aab69b5..270bfa62d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -15,7 +15,12 @@ import Bcfg2.Server.FileMonitor
from Bcfg2.Logger import Debuggable
from Bcfg2.Server.Statistics import track_statistics
-HAS_DJANGO = False
+try:
+ from django.db import models
+ HAS_DJANGO = True
+except ImportError:
+ HAS_DJANGO = False
+
# pylint: disable=C0103
ProbesDataModel = None
ProbesGroupsModel = None
@@ -25,13 +30,10 @@ ProbesGroupsModel = None
def load_django_models():
""" Load models for Django after option parsing has completed """
# pylint: disable=W0602
- global ProbesDataModel, ProbesGroupsModel, HAS_DJANGO
+ global ProbesDataModel, ProbesGroupsModel
# pylint: enable=W0602
- try:
- from django.db import models
- HAS_DJANGO = True
- except ImportError:
- HAS_DJANGO = False
+
+ if not HAS_DJANGO:
return
class ProbesDataModel(models.Model, # pylint: disable=W0621,W0612
@@ -74,6 +76,7 @@ class ProbeStore(Debuggable):
def __init__(self, core, datadir): # pylint: disable=W0613
Debuggable.__init__(self)
+ self.core = core
self._groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups")
self._datacache = Bcfg2.Server.Cache.Cache("Probes", "probedata")
@@ -134,7 +137,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
groupdata = ProbesGroupsModel.objects.filter(hostname=hostname)
self._groupcache[hostname] = list(set(r.group for r in groupdata))
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
@Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock
def set_groups(self, hostname, groups):
@@ -155,7 +158,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
ProbesGroupsModel.objects.filter(
hostname=hostname).exclude(group__in=groups).delete()
if olddata != groups:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
def _load_data(self, hostname):
Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
@@ -168,7 +171,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
time.mktime(pdata.timestamp.timetuple())
ts_set = True
self._datacache[hostname][pdata.probe] = ProbeData(pdata.data)
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
@Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock
def set_data(self, hostname, data):
@@ -198,7 +201,7 @@ class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
qset.delete()
expire_metadata = True
if expire_metadata:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
class XMLProbeStore(ProbeStore):
@@ -234,7 +237,7 @@ class XMLProbeStore(ProbeStore):
self._groupcache[client.get('name')].append(
pdata.get('name'))
- Bcfg2.Server.Cache.expire("Metadata")
+ self.core.metadata_cache.expire()
def _load_groups(self, hostname):
self._load_data(hostname)
@@ -274,7 +277,7 @@ class XMLProbeStore(ProbeStore):
olddata = self._groupcache.get(hostname, [])
self._groupcache[hostname] = groups
if olddata != groups:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
def set_data(self, hostname, data):
Bcfg2.Server.Cache.expire("Probes", "probedata", hostname)
@@ -285,7 +288,7 @@ class XMLProbeStore(ProbeStore):
self._datacache[hostname][probe] = pdata
expire_metadata |= olddata != data
if expire_metadata:
- Bcfg2.Server.Cache.expire("Metadata", hostname)
+ self.core.metadata_cache.expire(hostname)
class ClientProbeDataSet(dict):
@@ -304,7 +307,8 @@ class ProbeData(str): # pylint: disable=E0012,R0924
.json, and .yaml properties to provide convenient ways to use
ProbeData objects as XML, JSON, or YAML data """
def __new__(cls, data):
- if isinstance(data, unicode):
+ # prevent double encoding utf-8 in python3
+ if isinstance(data, unicode) and not isinstance(data, str):
return str.__new__(cls, data.encode('utf-8'))
else:
return str.__new__(cls, data)
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index c4dd75e60..e6549b714 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -35,13 +35,17 @@ LOGGER = logging.getLogger(__name__)
class PropertyFile(object):
""" Base Properties file handler """
- def __init__(self, name):
+ def __init__(self, name, core):
"""
:param name: The filename of this properties file.
+ :type name: string
+ :param core: The Bcfg2.Server.Core initializing the Properties plugin
+ :type core: Bcfg2.Server.Core
.. automethod:: _write
"""
self.name = name
+ self.core = core
def write(self):
""" Write the data in this data structure back to the property
@@ -69,6 +73,12 @@ class PropertyFile(object):
file. """
raise NotImplementedError
+ def _expire_metadata_cache(self):
+ """ Expires the metadata cache, if it is required by the caching
+ mode. """
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+
def validate_data(self):
""" Verify that the data in this file is valid. """
raise NotImplementedError
@@ -81,9 +91,9 @@ class PropertyFile(object):
class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
"""Handle JSON Properties files."""
- def __init__(self, name):
+ def __init__(self, name, core):
Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
- PropertyFile.__init__(self, name)
+ PropertyFile.__init__(self, name, core)
self.json = None
def Index(self):
@@ -93,10 +103,13 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
err = sys.exc_info()[1]
raise PluginExecutionError("Could not load JSON data from %s: %s" %
(self.name, err))
+ self._expire_metadata_cache()
+ Index.__doc__ = Bcfg2.Server.Plugin.FileBacked.Index.__doc__
def _write(self):
json.dump(self.json, open(self.name, 'wb'))
return True
+ _write.__doc__ = PropertyFile._write.__doc__
def validate_data(self):
try:
@@ -105,6 +118,7 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
err = sys.exc_info()[1]
raise PluginExecutionError("Data for %s cannot be dumped to JSON: "
"%s" % (self.name, err))
+ validate_data.__doc__ = PropertyFile.validate_data.__doc__
def __str__(self):
return str(self.json)
@@ -116,11 +130,10 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
""" Handle YAML Properties files. """
- def __init__(self, name):
+ def __init__(self, name, core):
Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
- PropertyFile.__init__(self, name)
+ PropertyFile.__init__(self, name, core)
self.yaml = None
- __init__.__doc__ = Bcfg2.Server.Plugin.FileBacked.__init__.__doc__
def Index(self):
try:
@@ -129,6 +142,7 @@ class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
err = sys.exc_info()[1]
raise PluginExecutionError("Could not load YAML data from %s: %s" %
(self.name, err))
+ self._expire_metadata_cache()
Index.__doc__ = Bcfg2.Server.Plugin.FileBacked.Index.__doc__
def _write(self):
@@ -155,10 +169,15 @@ class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile):
""" Handle XML Properties files. """
- def __init__(self, name, should_monitor=False):
+ def __init__(self, name, core, should_monitor=False):
Bcfg2.Server.Plugin.StructFile.__init__(self, name,
should_monitor=should_monitor)
- PropertyFile.__init__(self, name)
+ PropertyFile.__init__(self, name, core)
+
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+ self._expire_metadata_cache()
+ Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__
def _write(self):
open(self.name, "wb").write(
@@ -258,11 +277,11 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
:class:`PropertyFile`
"""
if fname.endswith(".xml"):
- return XMLPropertyFile(fname)
+ return XMLPropertyFile(fname, self.core)
elif HAS_JSON and fname.endswith(".json"):
- return JSONPropertyFile(fname)
+ return JSONPropertyFile(fname, self.core)
elif HAS_YAML and (fname.endswith(".yaml") or fname.endswith(".yml")):
- return YAMLPropertyFile(fname)
+ return YAMLPropertyFile(fname, self.core)
else:
raise Bcfg2.Server.Plugin.PluginExecutionError(
"Properties: Unknown extension %s" % fname)
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
index 59fbe6f03..e2d8a058f 100644
--- a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -117,7 +117,7 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
self.logger.warning("PuppetENC is incompatible with aggressive "
"client metadata caching, try 'cautious' or "
"'initial' instead")
- self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+ self.core.metadata_cache.expire()
def end_statistics(self, metadata):
self.end_client_run(self, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Reporting.py b/src/lib/Bcfg2/Server/Plugins/Reporting.py
index 5c73546b4..e372006c7 100644
--- a/src/lib/Bcfg2/Server/Plugins/Reporting.py
+++ b/src/lib/Bcfg2/Server/Plugins/Reporting.py
@@ -9,12 +9,15 @@ from Bcfg2.Reporting.Transport.base import TransportError
from Bcfg2.Server.Plugin import Statistics, PullSource, Threaded, \
PluginInitError, PluginExecutionError
-# required for reporting
try:
- import south # pylint: disable=W0611
- HAS_SOUTH = True
+ import django
+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7:
+ HAS_REPORTING = True
+ else:
+ import south # pylint: disable=W0611
+ HAS_REPORTING = True
except ImportError:
- HAS_SOUTH = False
+ HAS_REPORTING = False
def _rpc_call(method):
@@ -48,8 +51,8 @@ class Reporting(Statistics, Threaded, PullSource):
self.whoami = platform.node()
self.transport = None
- if not HAS_SOUTH:
- msg = "Django south is required for Reporting"
+ if not HAS_REPORTING:
+ msg = "Django 1.7+ or Django south is required for Reporting"
self.logger.error(msg)
raise PluginInitError(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/Rules.py b/src/lib/Bcfg2/Server/Plugins/Rules.py
index a3f682ed6..cf659251c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Rules.py
+++ b/src/lib/Bcfg2/Server/Plugins/Rules.py
@@ -1,10 +1,17 @@
"""This generator provides rule-based entry mappings."""
+import copy
import re
+import string
import Bcfg2.Options
import Bcfg2.Server.Plugin
+class NameTemplate(string.Template):
+ """Simple subclass of string.Template with a custom delimiter."""
+ delimiter = '%'
+
+
class Rules(Bcfg2.Server.Plugin.PrioDir):
"""This is a generator that handles service assignments."""
__author__ = 'bcfg-dev@mcs.anl.gov'
@@ -12,7 +19,10 @@ class Rules(Bcfg2.Server.Plugin.PrioDir):
options = Bcfg2.Server.Plugin.PrioDir.options + [
Bcfg2.Options.BooleanOption(
cf=("rules", "regex"), dest="rules_regex",
- help="Allow regular expressions in Rules")]
+ help="Allow regular expressions in Rules"),
+ Bcfg2.Options.BooleanOption(
+ cf=("rules", "replace_name"), dest="rules_replace_name",
+ help="Replace %{name} in attributes with name of target entry")]
def __init__(self, core):
Bcfg2.Server.Plugin.PrioDir.__init__(self, core)
@@ -46,7 +56,22 @@ class Rules(Bcfg2.Server.Plugin.PrioDir):
return True
return False
+ def _apply(self, entry, data):
+ if self._replace_name_enabled:
+ data = copy.deepcopy(data)
+ for key, val in list(data.attrib.items()):
+ data.attrib[key] = NameTemplate(val).safe_substitute(
+ name=entry.get('name'))
+
+ Bcfg2.Server.Plugin.PrioDir._apply(self, entry, data)
+
@property
def _regex_enabled(self):
""" Return True if rules regexes are enabled, False otherwise """
return Bcfg2.Options.setup.rules_regex
+
+ @property
+ def _replace_name_enabled(self):
+ """ Return True if the replace_name feature is enabled,
+ False otherwise """
+ return Bcfg2.Options.setup.rules_replace_name
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index e4fb9b565..08acc4d8d 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -103,6 +103,7 @@ class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet):
class SSHbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.PullTarget):
"""
@@ -120,6 +121,10 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
private key for (hostname)
ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host
public key for (hostname)
+ ssh_host_ed25519_key.H_(hostname) -> the v2 ssh host
+ private key for (hostname)
+ ssh_host_ed25519_key.pub.H_(hostname) -> the v2 ssh host
+ public key for (hostname)
ssh_known_hosts -> the current known hosts file. this
is regenerated each time a new key is generated.
@@ -127,10 +132,12 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
__author__ = 'bcfg-dev@mcs.anl.gov'
keypatterns = ["ssh_host_dsa_key",
"ssh_host_ecdsa_key",
+ "ssh_host_ed25519_key",
"ssh_host_rsa_key",
"ssh_host_key",
"ssh_host_dsa_key.pub",
"ssh_host_ecdsa_key.pub",
+ "ssh_host_ed25519_key.pub",
"ssh_host_rsa_key.pub",
"ssh_host_key.pub"]
@@ -141,6 +148,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core):
Bcfg2.Server.Plugin.Plugin.__init__(self, core)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.PullTarget.__init__(self)
self.ipcache = {}
@@ -210,7 +218,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
try:
names[cmeta.hostname].update(
self.get_namecache_entry(ip))
- except socket.gaierror:
+ except socket.herror:
continue
names[cmeta.hostname] = sorted(names[cmeta.hostname])
@@ -284,6 +292,10 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.debug_log("New public key %s; invalidating "
"ssh_known_hosts cache" % event.filename)
self.skn = False
+
+ if self.core.metadata_cache_mode in ['cautious',
+ 'aggressive']:
+ self.core.metadata_cache.expire()
return
if event.filename == 'info.xml':
@@ -332,7 +344,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.ipcache[client] = False
msg = "Failed to find IP address for %s: %s" % (client,
result.error)
- self.logger(msg)
+ self.logger.error(msg)
raise PluginExecutionError(msg)
def get_namecache_entry(self, cip):
@@ -342,7 +354,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
if self.namecache[cip]:
return self.namecache[cip]
else:
- raise socket.gaierror
+ raise socket.herror
else:
# add an entry that has not been cached
try:
@@ -353,7 +365,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.namecache[cip] = []
self.namecache[cip].extend(rvlookup[1])
return self.namecache[cip]
- except socket.gaierror:
+ except socket.herror:
self.namecache[cip] = False
self.logger.error("Failed to find any names associated with "
"IP address %s" % cip)
@@ -415,7 +427,8 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
def GenerateHostKeyPair(self, client, filename):
"""Generate new host key pair for client."""
- match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa)_)?key)', filename)
+ match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa|ed25519)_)?key)',
+ filename)
if match:
hostkey = "%s.H_%s" % (match.group(1), client)
if match.group(2):
@@ -489,3 +502,15 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Failed to pull %s. This file does not "
"currently exist on the client" %
entry.get('name'))
+
+ def get_additional_data(self, metadata):
+ data = dict()
+ for key in self.keypatterns:
+ if key.endswith(".pub"):
+ try:
+ keyfile = "/etc/ssh/" + key
+ entry = self.entries[keyfile].best_matching(metadata)
+ data[key] = entry.data
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ pass
+ return data
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index cec2de297..ff67571fa 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -7,24 +7,18 @@ import lxml.etree
from Bcfg2.Server.Plugin import Plugin, Connector, DirectoryBacked, \
TemplateDataProvider, DefaultTemplateDataProvider
from Bcfg2.Logger import Debuggable
+from Bcfg2.Utils import safe_module_name
MODULE_RE = re.compile(r'(?P<filename>(?P<module>[^\/]+)\.py)$')
-def safe_module_name(module):
- """ Munge the name of a TemplateHelper module to avoid collisions
- with other Python modules. E.g., if someone has a helper named
- 'ldap.py', it should not be added to ``sys.modules`` as ``ldap``,
- but rather as something more obscure. """
- return '__TemplateHelper_%s' % module
-
-
class HelperModule(Debuggable):
""" Representation of a TemplateHelper module """
- def __init__(self, name):
+ def __init__(self, name, core):
Debuggable.__init__(self)
self.name = name
+ self.core = core
#: The name of the module as used by get_additional_data().
#: the name of the file with .py stripped off.
@@ -51,9 +45,14 @@ class HelperModule(Debuggable):
if event and event.code2str() not in ['exists', 'changed', 'created']:
return
+ # expire the metadata cache, because the module might have changed
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.metadata_cache.expire()
+
try:
- module = imp.load_source(safe_module_name(self._module_name),
- self.name)
+ module = imp.load_source(
+ safe_module_name('TemplateHelper', self._module_name),
+ self.name)
except: # pylint: disable=W0702
# this needs to be a blanket except because the
# imp.load_source() call can raise literally any error,
@@ -107,7 +106,6 @@ class TemplateHelper(Plugin, Connector, DirectoryBacked, TemplateDataProvider):
__author__ = 'chris.a.st.pierre@gmail.com'
ignore = re.compile(r'^(\.#.*|.*~|\..*\.(sw[px])|.*\.py[co])$')
patterns = MODULE_RE
- __child__ = HelperModule
def __init__(self, core):
Plugin.__init__(self, core)
@@ -115,6 +113,10 @@ class TemplateHelper(Plugin, Connector, DirectoryBacked, TemplateDataProvider):
DirectoryBacked.__init__(self, self.data)
TemplateDataProvider.__init__(self)
+ # The HelperModule needs access to the core, so we have to construct
+ # it manually and add the custom argument.
+ self.__child__ = lambda fname: HelperModule(fname, core)
+
def get_additional_data(self, _):
return dict([(h._module_name, h) # pylint: disable=W0212
for h in self.entries.values()])
diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py
index ac4c8eac4..67aa425d9 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/models.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/models.py
@@ -266,7 +266,7 @@ class Reason(models.Model):
current_to = models.CharField(max_length=1024, blank=True)
version = models.CharField(max_length=1024, blank=True)
current_version = models.CharField(max_length=1024, blank=True)
- current_exists = models.BooleanField() # False means its missing. Default True
+ current_exists = models.BooleanField(default=True) # False means its missing.
current_diff = models.TextField(max_length=1024*1024, blank=True)
is_binary = models.BooleanField(default=False)
is_sensitive = models.BooleanField(default=False)
diff --git a/src/lib/Bcfg2/Server/Reports/updatefix.py b/src/lib/Bcfg2/Server/Reports/updatefix.py
index 91c370994..09b218464 100644
--- a/src/lib/Bcfg2/Server/Reports/updatefix.py
+++ b/src/lib/Bcfg2/Server/Reports/updatefix.py
@@ -4,7 +4,7 @@ import django.core.management
import sys
import logging
import traceback
-from Bcfg2.Server.models import InternalDatabaseVersion
+from Bcfg2.Server.models import internal_database_version
logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix')
@@ -138,7 +138,7 @@ def rollupdate(current_version):
exc_info=1)
# since array start at 0 but version start at 1
# we add 1 to the normal count
- ret = InternalDatabaseVersion.objects.create(version=i + 1)
+ ret = internal_database_version().create(version=i + 1)
return ret
else:
return None
@@ -149,10 +149,10 @@ def update_database():
try:
logger.debug("Running upgrade of models to the new one")
django.core.management.call_command("syncdb", interactive=False, verbosity=0)
- know_version = InternalDatabaseVersion.objects.order_by('-version')
+ know_version = internal_database_version().order_by('-version')
if not know_version:
logger.debug("No version, creating initial version")
- know_version = InternalDatabaseVersion.objects.create(version=lastversion)
+ know_version = internal_database_version().create(version=lastversion)
else:
know_version = know_version[0]
logger.debug("Presently at %s" % know_version)
diff --git a/src/lib/Bcfg2/Server/migrations/0001_initial.py b/src/lib/Bcfg2/Server/migrations/0001_initial.py
new file mode 100644
index 000000000..3b3dca455
--- /dev/null
+++ b/src/lib/Bcfg2/Server/migrations/0001_initial.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 1.9.9 on 2016-08-17 18:52
+from __future__ import unicode_literals
+
+import Bcfg2.Server.Plugin.helpers
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='MetadataClientModel',
+ fields=[
+ ('hostname', models.CharField(max_length=255, primary_key=True, serialize=False)),
+ ('version', models.CharField(max_length=31, null=True)),
+ ],
+ bases=(models.Model, Bcfg2.Server.Plugin.helpers.PluginDatabaseModel),
+ ),
+ migrations.CreateModel(
+ name='ProbesDataModel',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('hostname', models.CharField(max_length=255)),
+ ('probe', models.CharField(max_length=255)),
+ ('timestamp', models.DateTimeField(auto_now=True)),
+ ('data', models.TextField(null=True)),
+ ],
+ bases=(models.Model, Bcfg2.Server.Plugin.helpers.PluginDatabaseModel),
+ ),
+ migrations.CreateModel(
+ name='ProbesGroupsModel',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('hostname', models.CharField(max_length=255)),
+ ('group', models.CharField(max_length=255)),
+ ],
+ bases=(models.Model, Bcfg2.Server.Plugin.helpers.PluginDatabaseModel),
+ ),
+ ]
diff --git a/src/lib/Bcfg2/Server/migrations/__init__.py b/src/lib/Bcfg2/Server/migrations/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/lib/Bcfg2/Server/migrations/__init__.py
diff --git a/src/lib/Bcfg2/Server/models.py b/src/lib/Bcfg2/Server/models.py
index 8d6642a25..9c0153c74 100644
--- a/src/lib/Bcfg2/Server/models.py
+++ b/src/lib/Bcfg2/Server/models.py
@@ -8,6 +8,7 @@ import Bcfg2.Server.Plugins
LOGGER = logging.getLogger(__name__)
MODELS = []
+INTERNAL_DATABASE_VERSION = None
class _OptionContainer(object):
@@ -56,15 +57,23 @@ def load_models(plugins=None):
setattr(sys.modules[__name__], sym, obj)
MODELS.append(sym)
- class InternalDatabaseVersion(models.Model):
- """ Object that tell us to which version the database is """
- version = models.IntegerField()
- updated = models.DateTimeField(auto_now_add=True)
+def internal_database_version():
+ global INTERNAL_DATABASE_VERSION
- def __str__(self):
- return "version %d updated %s" % (self.version,
- self.updated.isoformat())
+ if INTERNAL_DATABASE_VERSION is None:
+ from django.db import models
+ class InternalDatabaseVersion(models.Model):
+ """ Object that tell us to which version the database is """
+ version = models.IntegerField()
+ updated = models.DateTimeField(auto_now_add=True)
- class Meta: # pylint: disable=C0111,W0232
- app_label = "reports"
- get_latest_by = "version"
+ def __str__(self):
+ return "version %d updated %s" % (self.version,
+ self.updated.isoformat())
+
+ class Meta: # pylint: disable=C0111,W0232
+ app_label = "reports"
+ get_latest_by = "version"
+ INTERNAL_DATABASE_VERSION = InternalDatabaseVersion
+
+ return INTERNAL_DATABASE_VERSION.objects
diff --git a/src/lib/Bcfg2/Server/south_migrations/0001_initial.py b/src/lib/Bcfg2/Server/south_migrations/0001_initial.py
new file mode 100644
index 000000000..864c311e5
--- /dev/null
+++ b/src/lib/Bcfg2/Server/south_migrations/0001_initial.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+from south.utils import datetime_utils as datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding model 'MetadataClientModel'
+ db.create_table(u'Server_metadataclientmodel', (
+ ('hostname', self.gf('django.db.models.fields.CharField')(max_length=255, primary_key=True)),
+ ('version', self.gf('django.db.models.fields.CharField')(max_length=31, null=True)),
+ ))
+ db.send_create_signal('Server', ['MetadataClientModel'])
+
+ # Adding model 'ProbesDataModel'
+ db.create_table(u'Server_probesdatamodel', (
+ (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('hostname', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ('probe', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
+ ('data', self.gf('django.db.models.fields.TextField')(null=True)),
+ ))
+ db.send_create_signal('Server', ['ProbesDataModel'])
+
+ # Adding model 'ProbesGroupsModel'
+ db.create_table(u'Server_probesgroupsmodel', (
+ (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('hostname', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ('group', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ))
+ db.send_create_signal('Server', ['ProbesGroupsModel'])
+
+
+ def backwards(self, orm):
+ # Deleting model 'MetadataClientModel'
+ db.delete_table(u'Server_metadataclientmodel')
+
+ # Deleting model 'ProbesDataModel'
+ db.delete_table(u'Server_probesdatamodel')
+
+ # Deleting model 'ProbesGroupsModel'
+ db.delete_table(u'Server_probesgroupsmodel')
+
+
+ models = {
+ 'Server.metadataclientmodel': {
+ 'Meta': {'object_name': 'MetadataClientModel'},
+ 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
+ 'version': ('django.db.models.fields.CharField', [], {'max_length': '31', 'null': 'True'})
+ },
+ 'Server.probesdatamodel': {
+ 'Meta': {'object_name': 'ProbesDataModel'},
+ 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
+ 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'probe': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
+ },
+ 'Server.probesgroupsmodel': {
+ 'Meta': {'object_name': 'ProbesGroupsModel'},
+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
+ }
+ }
+
+ complete_apps = ['Server'] \ No newline at end of file
diff --git a/src/lib/Bcfg2/Server/south_migrations/__init__.py b/src/lib/Bcfg2/Server/south_migrations/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/lib/Bcfg2/Server/south_migrations/__init__.py